diff options
64 files changed, 395 insertions, 313 deletions
diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt index c55df2911136..cd9c9f6a7cd9 100644 --- a/Documentation/PCI/MSI-HOWTO.txt +++ b/Documentation/PCI/MSI-HOWTO.txt | |||
@@ -94,14 +94,11 @@ has a requirements for a minimum number of vectors the driver can pass a | |||
94 | min_vecs argument set to this limit, and the PCI core will return -ENOSPC | 94 | min_vecs argument set to this limit, and the PCI core will return -ENOSPC |
95 | if it can't meet the minimum number of vectors. | 95 | if it can't meet the minimum number of vectors. |
96 | 96 | ||
97 | The flags argument should normally be set to 0, but can be used to pass the | 97 | The flags argument is used to specify which type of interrupt can be used |
98 | PCI_IRQ_NOMSI and PCI_IRQ_NOMSIX flag in case a device claims to support | 98 | by the device and the driver (PCI_IRQ_LEGACY, PCI_IRQ_MSI, PCI_IRQ_MSIX). |
99 | MSI or MSI-X, but the support is broken, or to pass PCI_IRQ_NOLEGACY in | 99 | A convenient short-hand (PCI_IRQ_ALL_TYPES) is also available to ask for |
100 | case the device does not support legacy interrupt lines. | 100 | any possible kind of interrupt. If the PCI_IRQ_AFFINITY flag is set, |
101 | 101 | pci_alloc_irq_vectors() will spread the interrupts around the available CPUs. | |
102 | By default this function will spread the interrupts around the available | ||
103 | CPUs, but this feature can be disabled by passing the PCI_IRQ_NOAFFINITY | ||
104 | flag. | ||
105 | 102 | ||
106 | To get the Linux IRQ numbers passed to request_irq() and free_irq() and the | 103 | To get the Linux IRQ numbers passed to request_irq() and free_irq() and the |
107 | vectors, use the following function: | 104 | vectors, use the following function: |
@@ -131,7 +128,7 @@ larger than the number supported by the device it will automatically be | |||
131 | capped to the supported limit, so there is no need to query the number of | 128 | capped to the supported limit, so there is no need to query the number of |
132 | vectors supported beforehand: | 129 | vectors supported beforehand: |
133 | 130 | ||
134 | nvec = pci_alloc_irq_vectors(pdev, 1, nvec, 0); | 131 | nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_ALL_TYPES) |
135 | if (nvec < 0) | 132 | if (nvec < 0) |
136 | goto out_err; | 133 | goto out_err; |
137 | 134 | ||
@@ -140,7 +137,7 @@ interrupts it can request a particular number of interrupts by passing that | |||
140 | number to pci_alloc_irq_vectors() function as both 'min_vecs' and | 137 | number to pci_alloc_irq_vectors() function as both 'min_vecs' and |
141 | 'max_vecs' parameters: | 138 | 'max_vecs' parameters: |
142 | 139 | ||
143 | ret = pci_alloc_irq_vectors(pdev, nvec, nvec, 0); | 140 | ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_ALL_TYPES); |
144 | if (ret < 0) | 141 | if (ret < 0) |
145 | goto out_err; | 142 | goto out_err; |
146 | 143 | ||
@@ -148,15 +145,14 @@ The most notorious example of the request type described above is enabling | |||
148 | the single MSI mode for a device. It could be done by passing two 1s as | 145 | the single MSI mode for a device. It could be done by passing two 1s as |
149 | 'min_vecs' and 'max_vecs': | 146 | 'min_vecs' and 'max_vecs': |
150 | 147 | ||
151 | ret = pci_alloc_irq_vectors(pdev, 1, 1, 0); | 148 | ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); |
152 | if (ret < 0) | 149 | if (ret < 0) |
153 | goto out_err; | 150 | goto out_err; |
154 | 151 | ||
155 | Some devices might not support using legacy line interrupts, in which case | 152 | Some devices might not support using legacy line interrupts, in which case |
156 | the PCI_IRQ_NOLEGACY flag can be used to fail the request if the platform | 153 | the driver can specify that only MSI or MSI-X is acceptable: |
157 | can't provide MSI or MSI-X interrupts: | ||
158 | 154 | ||
159 | nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_NOLEGACY); | 155 | nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_MSI | PCI_IRQ_MSIX); |
160 | if (nvec < 0) | 156 | if (nvec < 0) |
161 | goto out_err; | 157 | goto out_err; |
162 | 158 | ||
diff --git a/Documentation/devicetree/bindings/thermal/thermal.txt b/Documentation/devicetree/bindings/thermal/thermal.txt index 41b817f7b670..88b6ea1ad290 100644 --- a/Documentation/devicetree/bindings/thermal/thermal.txt +++ b/Documentation/devicetree/bindings/thermal/thermal.txt | |||
@@ -62,7 +62,7 @@ For more examples of cooling devices, refer to the example sections below. | |||
62 | Required properties: | 62 | Required properties: |
63 | - #cooling-cells: Used to provide cooling device specific information | 63 | - #cooling-cells: Used to provide cooling device specific information |
64 | Type: unsigned while referring to it. Must be at least 2, in order | 64 | Type: unsigned while referring to it. Must be at least 2, in order |
65 | Size: one cell to specify minimum and maximum cooling state used | 65 | Size: one cell to specify minimum and maximum cooling state used |
66 | in the reference. The first cell is the minimum | 66 | in the reference. The first cell is the minimum |
67 | cooling state requested and the second cell is | 67 | cooling state requested and the second cell is |
68 | the maximum cooling state requested in the reference. | 68 | the maximum cooling state requested in the reference. |
@@ -119,7 +119,7 @@ Required properties: | |||
119 | Optional property: | 119 | Optional property: |
120 | - contribution: The cooling contribution to the thermal zone of the | 120 | - contribution: The cooling contribution to the thermal zone of the |
121 | Type: unsigned referred cooling device at the referred trip point. | 121 | Type: unsigned referred cooling device at the referred trip point. |
122 | Size: one cell The contribution is a ratio of the sum | 122 | Size: one cell The contribution is a ratio of the sum |
123 | of all cooling contributions within a thermal zone. | 123 | of all cooling contributions within a thermal zone. |
124 | 124 | ||
125 | Note: Using the THERMAL_NO_LIMIT (-1UL) constant in the cooling-device phandle | 125 | Note: Using the THERMAL_NO_LIMIT (-1UL) constant in the cooling-device phandle |
@@ -145,7 +145,7 @@ Required properties: | |||
145 | Size: one cell | 145 | Size: one cell |
146 | 146 | ||
147 | - thermal-sensors: A list of thermal sensor phandles and sensor specifier | 147 | - thermal-sensors: A list of thermal sensor phandles and sensor specifier |
148 | Type: list of used while monitoring the thermal zone. | 148 | Type: list of used while monitoring the thermal zone. |
149 | phandles + sensor | 149 | phandles + sensor |
150 | specifier | 150 | specifier |
151 | 151 | ||
@@ -473,7 +473,7 @@ thermal-zones { | |||
473 | <&adc>; /* pcb north */ | 473 | <&adc>; /* pcb north */ |
474 | 474 | ||
475 | /* hotspot = 100 * bandgap - 120 * adc + 484 */ | 475 | /* hotspot = 100 * bandgap - 120 * adc + 484 */ |
476 | coefficients = <100 -120 484>; | 476 | coefficients = <100 -120 484>; |
477 | 477 | ||
478 | trips { | 478 | trips { |
479 | ... | 479 | ... |
@@ -502,7 +502,7 @@ from the ADC sensor. The binding would be then: | |||
502 | thermal-sensors = <&adc>; | 502 | thermal-sensors = <&adc>; |
503 | 503 | ||
504 | /* hotspot = 1 * adc + 6000 */ | 504 | /* hotspot = 1 * adc + 6000 */ |
505 | coefficients = <1 6000>; | 505 | coefficients = <1 6000>; |
506 | 506 | ||
507 | (d) - Board thermal | 507 | (d) - Board thermal |
508 | 508 | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 46c030a49186..a4f4d693e2c1 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -3032,6 +3032,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
3032 | PAGE_SIZE is used as alignment. | 3032 | PAGE_SIZE is used as alignment. |
3033 | PCI-PCI bridge can be specified, if resource | 3033 | PCI-PCI bridge can be specified, if resource |
3034 | windows need to be expanded. | 3034 | windows need to be expanded. |
3035 | To specify the alignment for several | ||
3036 | instances of a device, the PCI vendor, | ||
3037 | device, subvendor, and subdevice may be | ||
3038 | specified, e.g., 4096@pci:8086:9c22:103c:198f | ||
3035 | ecrc= Enable/disable PCIe ECRC (transaction layer | 3039 | ecrc= Enable/disable PCIe ECRC (transaction layer |
3036 | end-to-end CRC checking). | 3040 | end-to-end CRC checking). |
3037 | bios: Use BIOS/firmware settings. This is the | 3041 | bios: Use BIOS/firmware settings. This is the |
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h index ad7860c5ce15..51597f344a62 100644 --- a/arch/arc/include/asm/entry.h +++ b/arch/arc/include/asm/entry.h | |||
@@ -142,7 +142,7 @@ | |||
142 | 142 | ||
143 | #ifdef CONFIG_ARC_CURR_IN_REG | 143 | #ifdef CONFIG_ARC_CURR_IN_REG |
144 | ; Retrieve orig r25 and save it with rest of callee_regs | 144 | ; Retrieve orig r25 and save it with rest of callee_regs |
145 | ld.as r12, [r12, PT_user_r25] | 145 | ld r12, [r12, PT_user_r25] |
146 | PUSH r12 | 146 | PUSH r12 |
147 | #else | 147 | #else |
148 | PUSH r25 | 148 | PUSH r25 |
@@ -198,7 +198,7 @@ | |||
198 | 198 | ||
199 | ; SP is back to start of pt_regs | 199 | ; SP is back to start of pt_regs |
200 | #ifdef CONFIG_ARC_CURR_IN_REG | 200 | #ifdef CONFIG_ARC_CURR_IN_REG |
201 | st.as r12, [sp, PT_user_r25] | 201 | st r12, [sp, PT_user_r25] |
202 | #endif | 202 | #endif |
203 | .endm | 203 | .endm |
204 | 204 | ||
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h index c1d36458bfb7..4c6eed80cd8b 100644 --- a/arch/arc/include/asm/irqflags-compact.h +++ b/arch/arc/include/asm/irqflags-compact.h | |||
@@ -188,10 +188,10 @@ static inline int arch_irqs_disabled(void) | |||
188 | .endm | 188 | .endm |
189 | 189 | ||
190 | .macro IRQ_ENABLE scratch | 190 | .macro IRQ_ENABLE scratch |
191 | TRACE_ASM_IRQ_ENABLE | ||
191 | lr \scratch, [status32] | 192 | lr \scratch, [status32] |
192 | or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK) | 193 | or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK) |
193 | flag \scratch | 194 | flag \scratch |
194 | TRACE_ASM_IRQ_ENABLE | ||
195 | .endm | 195 | .endm |
196 | 196 | ||
197 | #endif /* __ASSEMBLY__ */ | 197 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 0f92d97432a2..89eeb3720051 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h | |||
@@ -280,7 +280,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) | |||
280 | 280 | ||
281 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) | 281 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
282 | #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) | 282 | #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) |
283 | #define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) | 283 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) |
284 | 284 | ||
285 | /* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/ | 285 | /* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/ |
286 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) | 286 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) |
diff --git a/arch/arc/include/uapi/asm/elf.h b/arch/arc/include/uapi/asm/elf.h index 0f99ac8fcbb2..0037a587320d 100644 --- a/arch/arc/include/uapi/asm/elf.h +++ b/arch/arc/include/uapi/asm/elf.h | |||
@@ -13,8 +13,15 @@ | |||
13 | 13 | ||
14 | /* Machine specific ELF Hdr flags */ | 14 | /* Machine specific ELF Hdr flags */ |
15 | #define EF_ARC_OSABI_MSK 0x00000f00 | 15 | #define EF_ARC_OSABI_MSK 0x00000f00 |
16 | #define EF_ARC_OSABI_ORIG 0x00000000 /* MUST be zero for back-compat */ | 16 | |
17 | #define EF_ARC_OSABI_CURRENT 0x00000300 /* v3 (no legacy syscalls) */ | 17 | #define EF_ARC_OSABI_V3 0x00000300 /* v3 (no legacy syscalls) */ |
18 | #define EF_ARC_OSABI_V4 0x00000400 /* v4 (64bit data any reg align) */ | ||
19 | |||
20 | #if __GNUC__ < 6 | ||
21 | #define EF_ARC_OSABI_CURRENT EF_ARC_OSABI_V3 | ||
22 | #else | ||
23 | #define EF_ARC_OSABI_CURRENT EF_ARC_OSABI_V4 | ||
24 | #endif | ||
18 | 25 | ||
19 | typedef unsigned long elf_greg_t; | 26 | typedef unsigned long elf_greg_t; |
20 | typedef unsigned long elf_fpregset_t; | 27 | typedef unsigned long elf_fpregset_t; |
diff --git a/arch/arc/kernel/arcksyms.c b/arch/arc/kernel/arcksyms.c index 4d9e77724bed..000dd041ab42 100644 --- a/arch/arc/kernel/arcksyms.c +++ b/arch/arc/kernel/arcksyms.c | |||
@@ -28,6 +28,7 @@ extern void __muldf3(void); | |||
28 | extern void __divdf3(void); | 28 | extern void __divdf3(void); |
29 | extern void __floatunsidf(void); | 29 | extern void __floatunsidf(void); |
30 | extern void __floatunsisf(void); | 30 | extern void __floatunsisf(void); |
31 | extern void __udivdi3(void); | ||
31 | 32 | ||
32 | EXPORT_SYMBOL(__ashldi3); | 33 | EXPORT_SYMBOL(__ashldi3); |
33 | EXPORT_SYMBOL(__ashrdi3); | 34 | EXPORT_SYMBOL(__ashrdi3); |
@@ -45,6 +46,7 @@ EXPORT_SYMBOL(__muldf3); | |||
45 | EXPORT_SYMBOL(__divdf3); | 46 | EXPORT_SYMBOL(__divdf3); |
46 | EXPORT_SYMBOL(__floatunsidf); | 47 | EXPORT_SYMBOL(__floatunsidf); |
47 | EXPORT_SYMBOL(__floatunsisf); | 48 | EXPORT_SYMBOL(__floatunsisf); |
49 | EXPORT_SYMBOL(__udivdi3); | ||
48 | 50 | ||
49 | /* ARC optimised assembler routines */ | 51 | /* ARC optimised assembler routines */ |
50 | EXPORT_SYMBOL(memset); | 52 | EXPORT_SYMBOL(memset); |
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index b5db9e7fd649..be1972bd2729 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c | |||
@@ -199,7 +199,7 @@ int elf_check_arch(const struct elf32_hdr *x) | |||
199 | } | 199 | } |
200 | 200 | ||
201 | eflags = x->e_flags; | 201 | eflags = x->e_flags; |
202 | if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_CURRENT) { | 202 | if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) { |
203 | pr_err("ABI mismatch - you need newer toolchain\n"); | 203 | pr_err("ABI mismatch - you need newer toolchain\n"); |
204 | force_sigsegv(SIGSEGV, current); | 204 | force_sigsegv(SIGSEGV, current); |
205 | return 0; | 205 | return 0; |
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index a946400a86d0..f52a0d0dc462 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c | |||
@@ -291,8 +291,10 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) | |||
291 | cpu->dccm.base_addr, TO_KB(cpu->dccm.sz), | 291 | cpu->dccm.base_addr, TO_KB(cpu->dccm.sz), |
292 | cpu->iccm.base_addr, TO_KB(cpu->iccm.sz)); | 292 | cpu->iccm.base_addr, TO_KB(cpu->iccm.sz)); |
293 | 293 | ||
294 | n += scnprintf(buf + n, len - n, | 294 | n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n", |
295 | "OS ABI [v3]\t: no-legacy-syscalls\n"); | 295 | EF_ARC_OSABI_CURRENT >> 8, |
296 | EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ? | ||
297 | "no-legacy-syscalls" : "64-bit data any register aligned"); | ||
296 | 298 | ||
297 | return buf; | 299 | return buf; |
298 | } | 300 | } |
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index 5a294b2c3cb3..0b10efe3a6a7 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c | |||
@@ -921,6 +921,15 @@ void arc_cache_init(void) | |||
921 | 921 | ||
922 | printk(arc_cache_mumbojumbo(0, str, sizeof(str))); | 922 | printk(arc_cache_mumbojumbo(0, str, sizeof(str))); |
923 | 923 | ||
924 | /* | ||
925 | * Only master CPU needs to execute rest of function: | ||
926 | * - Assume SMP so all cores will have same cache config so | ||
927 | * any geomtry checks will be same for all | ||
928 | * - IOC setup / dma callbacks only need to be setup once | ||
929 | */ | ||
930 | if (cpu) | ||
931 | return; | ||
932 | |||
924 | if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { | 933 | if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { |
925 | struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; | 934 | struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; |
926 | 935 | ||
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c index 04f83322c9fd..77ff64a874a1 100644 --- a/arch/arc/mm/highmem.c +++ b/arch/arc/mm/highmem.c | |||
@@ -61,6 +61,7 @@ void *kmap(struct page *page) | |||
61 | 61 | ||
62 | return kmap_high(page); | 62 | return kmap_high(page); |
63 | } | 63 | } |
64 | EXPORT_SYMBOL(kmap); | ||
64 | 65 | ||
65 | void *kmap_atomic(struct page *page) | 66 | void *kmap_atomic(struct page *page) |
66 | { | 67 | { |
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index b0b82f5ea338..3d2cef6488ea 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c | |||
@@ -50,7 +50,7 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); | |||
50 | static struct vcpu_info __percpu *xen_vcpu_info; | 50 | static struct vcpu_info __percpu *xen_vcpu_info; |
51 | 51 | ||
52 | /* Linux <-> Xen vCPU id mapping */ | 52 | /* Linux <-> Xen vCPU id mapping */ |
53 | DEFINE_PER_CPU(int, xen_vcpu_id) = -1; | 53 | DEFINE_PER_CPU(uint32_t, xen_vcpu_id); |
54 | EXPORT_PER_CPU_SYMBOL(xen_vcpu_id); | 54 | EXPORT_PER_CPU_SYMBOL(xen_vcpu_id); |
55 | 55 | ||
56 | /* These are unused until we support booting "pre-ballooned" */ | 56 | /* These are unused until we support booting "pre-ballooned" */ |
diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S index 1dd5bd8a8c59..133055311dce 100644 --- a/arch/um/include/asm/common.lds.S +++ b/arch/um/include/asm/common.lds.S | |||
@@ -81,7 +81,7 @@ | |||
81 | .altinstr_replacement : { *(.altinstr_replacement) } | 81 | .altinstr_replacement : { *(.altinstr_replacement) } |
82 | /* .exit.text is discard at runtime, not link time, to deal with references | 82 | /* .exit.text is discard at runtime, not link time, to deal with references |
83 | from .altinstructions and .eh_frame */ | 83 | from .altinstructions and .eh_frame */ |
84 | .exit.text : { *(.exit.text) } | 84 | .exit.text : { EXIT_TEXT } |
85 | .exit.data : { *(.exit.data) } | 85 | .exit.data : { *(.exit.data) } |
86 | 86 | ||
87 | .preinit_array : { | 87 | .preinit_array : { |
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c index 89fa85e8b10c..6f97fb33ae21 100644 --- a/arch/x86/crypto/sha256-mb/sha256_mb.c +++ b/arch/x86/crypto/sha256-mb/sha256_mb.c | |||
@@ -485,10 +485,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx, | |||
485 | 485 | ||
486 | req = cast_mcryptd_ctx_to_req(req_ctx); | 486 | req = cast_mcryptd_ctx_to_req(req_ctx); |
487 | if (irqs_disabled()) | 487 | if (irqs_disabled()) |
488 | rctx->complete(&req->base, ret); | 488 | req_ctx->complete(&req->base, ret); |
489 | else { | 489 | else { |
490 | local_bh_disable(); | 490 | local_bh_disable(); |
491 | rctx->complete(&req->base, ret); | 491 | req_ctx->complete(&req->base, ret); |
492 | local_bh_enable(); | 492 | local_bh_enable(); |
493 | } | 493 | } |
494 | } | 494 | } |
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S index b691da981cd9..a78a0694ddef 100644 --- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S +++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S | |||
@@ -265,13 +265,14 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2) | |||
265 | vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0 | 265 | vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0 |
266 | vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0 | 266 | vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0 |
267 | vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0 | 267 | vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0 |
268 | movl _args_digest+4*32(state, idx, 4), tmp2_w | 268 | vmovd _args_digest(state , idx, 4) , %xmm0 |
269 | vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1 | 269 | vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1 |
270 | vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1 | 270 | vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1 |
271 | vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1 | 271 | vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1 |
272 | 272 | ||
273 | vmovdqu %xmm0, _result_digest(job_rax) | 273 | vmovdqu %xmm0, _result_digest(job_rax) |
274 | movl tmp2_w, _result_digest+1*16(job_rax) | 274 | offset = (_result_digest + 1*16) |
275 | vmovdqu %xmm1, offset(job_rax) | ||
275 | 276 | ||
276 | pop %rbx | 277 | pop %rbx |
277 | 278 | ||
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c index f4cf5b78fd36..d210174a52b0 100644 --- a/arch/x86/crypto/sha512-mb/sha512_mb.c +++ b/arch/x86/crypto/sha512-mb/sha512_mb.c | |||
@@ -497,10 +497,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx, | |||
497 | 497 | ||
498 | req = cast_mcryptd_ctx_to_req(req_ctx); | 498 | req = cast_mcryptd_ctx_to_req(req_ctx); |
499 | if (irqs_disabled()) | 499 | if (irqs_disabled()) |
500 | rctx->complete(&req->base, ret); | 500 | req_ctx->complete(&req->base, ret); |
501 | else { | 501 | else { |
502 | local_bh_disable(); | 502 | local_bh_disable(); |
503 | rctx->complete(&req->base, ret); | 503 | req_ctx->complete(&req->base, ret); |
504 | local_bh_enable(); | 504 | local_bh_enable(); |
505 | } | 505 | } |
506 | } | 506 | } |
diff --git a/arch/x86/pci/vmd.c b/arch/x86/pci/vmd.c index b814ca675131..7948be342ee9 100644 --- a/arch/x86/pci/vmd.c +++ b/arch/x86/pci/vmd.c | |||
@@ -41,6 +41,7 @@ static DEFINE_RAW_SPINLOCK(list_lock); | |||
41 | * @node: list item for parent traversal. | 41 | * @node: list item for parent traversal. |
42 | * @rcu: RCU callback item for freeing. | 42 | * @rcu: RCU callback item for freeing. |
43 | * @irq: back pointer to parent. | 43 | * @irq: back pointer to parent. |
44 | * @enabled: true if driver enabled IRQ | ||
44 | * @virq: the virtual IRQ value provided to the requesting driver. | 45 | * @virq: the virtual IRQ value provided to the requesting driver. |
45 | * | 46 | * |
46 | * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to | 47 | * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to |
@@ -50,6 +51,7 @@ struct vmd_irq { | |||
50 | struct list_head node; | 51 | struct list_head node; |
51 | struct rcu_head rcu; | 52 | struct rcu_head rcu; |
52 | struct vmd_irq_list *irq; | 53 | struct vmd_irq_list *irq; |
54 | bool enabled; | ||
53 | unsigned int virq; | 55 | unsigned int virq; |
54 | }; | 56 | }; |
55 | 57 | ||
@@ -122,7 +124,9 @@ static void vmd_irq_enable(struct irq_data *data) | |||
122 | unsigned long flags; | 124 | unsigned long flags; |
123 | 125 | ||
124 | raw_spin_lock_irqsave(&list_lock, flags); | 126 | raw_spin_lock_irqsave(&list_lock, flags); |
127 | WARN_ON(vmdirq->enabled); | ||
125 | list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list); | 128 | list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list); |
129 | vmdirq->enabled = true; | ||
126 | raw_spin_unlock_irqrestore(&list_lock, flags); | 130 | raw_spin_unlock_irqrestore(&list_lock, flags); |
127 | 131 | ||
128 | data->chip->irq_unmask(data); | 132 | data->chip->irq_unmask(data); |
@@ -136,8 +140,10 @@ static void vmd_irq_disable(struct irq_data *data) | |||
136 | data->chip->irq_mask(data); | 140 | data->chip->irq_mask(data); |
137 | 141 | ||
138 | raw_spin_lock_irqsave(&list_lock, flags); | 142 | raw_spin_lock_irqsave(&list_lock, flags); |
139 | list_del_rcu(&vmdirq->node); | 143 | if (vmdirq->enabled) { |
140 | INIT_LIST_HEAD_RCU(&vmdirq->node); | 144 | list_del_rcu(&vmdirq->node); |
145 | vmdirq->enabled = false; | ||
146 | } | ||
141 | raw_spin_unlock_irqrestore(&list_lock, flags); | 147 | raw_spin_unlock_irqrestore(&list_lock, flags); |
142 | } | 148 | } |
143 | 149 | ||
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 8ffb089b19a5..b86ebb1a9a7f 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -118,7 +118,7 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); | |||
118 | DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); | 118 | DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); |
119 | 119 | ||
120 | /* Linux <-> Xen vCPU id mapping */ | 120 | /* Linux <-> Xen vCPU id mapping */ |
121 | DEFINE_PER_CPU(int, xen_vcpu_id) = -1; | 121 | DEFINE_PER_CPU(uint32_t, xen_vcpu_id); |
122 | EXPORT_PER_CPU_SYMBOL(xen_vcpu_id); | 122 | EXPORT_PER_CPU_SYMBOL(xen_vcpu_id); |
123 | 123 | ||
124 | enum xen_domain_type xen_domain_type = XEN_NATIVE; | 124 | enum xen_domain_type xen_domain_type = XEN_NATIVE; |
diff --git a/block/bio.c b/block/bio.c index f39477538fef..aa7354088008 100644 --- a/block/bio.c +++ b/block/bio.c | |||
@@ -667,18 +667,19 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, | |||
667 | bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; | 667 | bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; |
668 | bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; | 668 | bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; |
669 | 669 | ||
670 | if (bio_op(bio) == REQ_OP_DISCARD) | 670 | switch (bio_op(bio)) { |
671 | goto integrity_clone; | 671 | case REQ_OP_DISCARD: |
672 | 672 | case REQ_OP_SECURE_ERASE: | |
673 | if (bio_op(bio) == REQ_OP_WRITE_SAME) { | 673 | break; |
674 | case REQ_OP_WRITE_SAME: | ||
674 | bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; | 675 | bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; |
675 | goto integrity_clone; | 676 | break; |
677 | default: | ||
678 | bio_for_each_segment(bv, bio_src, iter) | ||
679 | bio->bi_io_vec[bio->bi_vcnt++] = bv; | ||
680 | break; | ||
676 | } | 681 | } |
677 | 682 | ||
678 | bio_for_each_segment(bv, bio_src, iter) | ||
679 | bio->bi_io_vec[bio->bi_vcnt++] = bv; | ||
680 | |||
681 | integrity_clone: | ||
682 | if (bio_integrity(bio_src)) { | 683 | if (bio_integrity(bio_src)) { |
683 | int ret; | 684 | int ret; |
684 | 685 | ||
@@ -1788,7 +1789,7 @@ struct bio *bio_split(struct bio *bio, int sectors, | |||
1788 | * Discards need a mutable bio_vec to accommodate the payload | 1789 | * Discards need a mutable bio_vec to accommodate the payload |
1789 | * required by the DSM TRIM and UNMAP commands. | 1790 | * required by the DSM TRIM and UNMAP commands. |
1790 | */ | 1791 | */ |
1791 | if (bio_op(bio) == REQ_OP_DISCARD) | 1792 | if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE) |
1792 | split = bio_clone_bioset(bio, gfp, bs); | 1793 | split = bio_clone_bioset(bio, gfp, bs); |
1793 | else | 1794 | else |
1794 | split = bio_clone_fast(bio, gfp, bs); | 1795 | split = bio_clone_fast(bio, gfp, bs); |
diff --git a/block/blk-core.c b/block/blk-core.c index 999442ec4601..36c7ac328d8c 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -515,7 +515,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end); | |||
515 | 515 | ||
516 | void blk_set_queue_dying(struct request_queue *q) | 516 | void blk_set_queue_dying(struct request_queue *q) |
517 | { | 517 | { |
518 | queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); | 518 | spin_lock_irq(q->queue_lock); |
519 | queue_flag_set(QUEUE_FLAG_DYING, q); | ||
520 | spin_unlock_irq(q->queue_lock); | ||
519 | 521 | ||
520 | if (q->mq_ops) | 522 | if (q->mq_ops) |
521 | blk_mq_wake_waiters(q); | 523 | blk_mq_wake_waiters(q); |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 3eec75a9e91d..2642e5fc8b69 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -94,9 +94,31 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, | |||
94 | bool do_split = true; | 94 | bool do_split = true; |
95 | struct bio *new = NULL; | 95 | struct bio *new = NULL; |
96 | const unsigned max_sectors = get_max_io_size(q, bio); | 96 | const unsigned max_sectors = get_max_io_size(q, bio); |
97 | unsigned bvecs = 0; | ||
97 | 98 | ||
98 | bio_for_each_segment(bv, bio, iter) { | 99 | bio_for_each_segment(bv, bio, iter) { |
99 | /* | 100 | /* |
101 | * With arbitrary bio size, the incoming bio may be very | ||
102 | * big. We have to split the bio into small bios so that | ||
103 | * each holds at most BIO_MAX_PAGES bvecs because | ||
104 | * bio_clone() can fail to allocate big bvecs. | ||
105 | * | ||
106 | * It should have been better to apply the limit per | ||
107 | * request queue in which bio_clone() is involved, | ||
108 | * instead of globally. The biggest blocker is the | ||
109 | * bio_clone() in bio bounce. | ||
110 | * | ||
111 | * If bio is splitted by this reason, we should have | ||
112 | * allowed to continue bios merging, but don't do | ||
113 | * that now for making the change simple. | ||
114 | * | ||
115 | * TODO: deal with bio bounce's bio_clone() gracefully | ||
116 | * and convert the global limit into per-queue limit. | ||
117 | */ | ||
118 | if (bvecs++ >= BIO_MAX_PAGES) | ||
119 | goto split; | ||
120 | |||
121 | /* | ||
100 | * If the queue doesn't support SG gaps and adding this | 122 | * If the queue doesn't support SG gaps and adding this |
101 | * offset would create a gap, disallow it. | 123 | * offset would create a gap, disallow it. |
102 | */ | 124 | */ |
@@ -172,12 +194,18 @@ void blk_queue_split(struct request_queue *q, struct bio **bio, | |||
172 | struct bio *split, *res; | 194 | struct bio *split, *res; |
173 | unsigned nsegs; | 195 | unsigned nsegs; |
174 | 196 | ||
175 | if (bio_op(*bio) == REQ_OP_DISCARD) | 197 | switch (bio_op(*bio)) { |
198 | case REQ_OP_DISCARD: | ||
199 | case REQ_OP_SECURE_ERASE: | ||
176 | split = blk_bio_discard_split(q, *bio, bs, &nsegs); | 200 | split = blk_bio_discard_split(q, *bio, bs, &nsegs); |
177 | else if (bio_op(*bio) == REQ_OP_WRITE_SAME) | 201 | break; |
202 | case REQ_OP_WRITE_SAME: | ||
178 | split = blk_bio_write_same_split(q, *bio, bs, &nsegs); | 203 | split = blk_bio_write_same_split(q, *bio, bs, &nsegs); |
179 | else | 204 | break; |
205 | default: | ||
180 | split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs); | 206 | split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs); |
207 | break; | ||
208 | } | ||
181 | 209 | ||
182 | /* physical segments can be figured out during splitting */ | 210 | /* physical segments can be figured out during splitting */ |
183 | res = split ? split : *bio; | 211 | res = split ? split : *bio; |
@@ -213,7 +241,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | |||
213 | * This should probably be returning 0, but blk_add_request_payload() | 241 | * This should probably be returning 0, but blk_add_request_payload() |
214 | * (Christoph!!!!) | 242 | * (Christoph!!!!) |
215 | */ | 243 | */ |
216 | if (bio_op(bio) == REQ_OP_DISCARD) | 244 | if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE) |
217 | return 1; | 245 | return 1; |
218 | 246 | ||
219 | if (bio_op(bio) == REQ_OP_WRITE_SAME) | 247 | if (bio_op(bio) == REQ_OP_WRITE_SAME) |
@@ -385,7 +413,9 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, | |||
385 | nsegs = 0; | 413 | nsegs = 0; |
386 | cluster = blk_queue_cluster(q); | 414 | cluster = blk_queue_cluster(q); |
387 | 415 | ||
388 | if (bio_op(bio) == REQ_OP_DISCARD) { | 416 | switch (bio_op(bio)) { |
417 | case REQ_OP_DISCARD: | ||
418 | case REQ_OP_SECURE_ERASE: | ||
389 | /* | 419 | /* |
390 | * This is a hack - drivers should be neither modifying the | 420 | * This is a hack - drivers should be neither modifying the |
391 | * biovec, nor relying on bi_vcnt - but because of | 421 | * biovec, nor relying on bi_vcnt - but because of |
@@ -393,19 +423,16 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, | |||
393 | * a payload we need to set up here (thank you Christoph) and | 423 | * a payload we need to set up here (thank you Christoph) and |
394 | * bi_vcnt is really the only way of telling if we need to. | 424 | * bi_vcnt is really the only way of telling if we need to. |
395 | */ | 425 | */ |
396 | 426 | if (!bio->bi_vcnt) | |
397 | if (bio->bi_vcnt) | 427 | return 0; |
398 | goto single_segment; | 428 | /* Fall through */ |
399 | 429 | case REQ_OP_WRITE_SAME: | |
400 | return 0; | ||
401 | } | ||
402 | |||
403 | if (bio_op(bio) == REQ_OP_WRITE_SAME) { | ||
404 | single_segment: | ||
405 | *sg = sglist; | 430 | *sg = sglist; |
406 | bvec = bio_iovec(bio); | 431 | bvec = bio_iovec(bio); |
407 | sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); | 432 | sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); |
408 | return 1; | 433 | return 1; |
434 | default: | ||
435 | break; | ||
409 | } | 436 | } |
410 | 437 | ||
411 | for_each_bio(bio) | 438 | for_each_bio(bio) |
diff --git a/block/blk-mq.c b/block/blk-mq.c index e931a0e8e73d..13f5a6c1de76 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -793,11 +793,12 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) | |||
793 | struct list_head *dptr; | 793 | struct list_head *dptr; |
794 | int queued; | 794 | int queued; |
795 | 795 | ||
796 | WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)); | ||
797 | |||
798 | if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) | 796 | if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) |
799 | return; | 797 | return; |
800 | 798 | ||
799 | WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) && | ||
800 | cpu_online(hctx->next_cpu)); | ||
801 | |||
801 | hctx->run++; | 802 | hctx->run++; |
802 | 803 | ||
803 | /* | 804 | /* |
@@ -1036,10 +1037,11 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) | |||
1036 | EXPORT_SYMBOL(blk_mq_delay_queue); | 1037 | EXPORT_SYMBOL(blk_mq_delay_queue); |
1037 | 1038 | ||
1038 | static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, | 1039 | static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, |
1039 | struct blk_mq_ctx *ctx, | ||
1040 | struct request *rq, | 1040 | struct request *rq, |
1041 | bool at_head) | 1041 | bool at_head) |
1042 | { | 1042 | { |
1043 | struct blk_mq_ctx *ctx = rq->mq_ctx; | ||
1044 | |||
1043 | trace_block_rq_insert(hctx->queue, rq); | 1045 | trace_block_rq_insert(hctx->queue, rq); |
1044 | 1046 | ||
1045 | if (at_head) | 1047 | if (at_head) |
@@ -1053,20 +1055,16 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, | |||
1053 | { | 1055 | { |
1054 | struct blk_mq_ctx *ctx = rq->mq_ctx; | 1056 | struct blk_mq_ctx *ctx = rq->mq_ctx; |
1055 | 1057 | ||
1056 | __blk_mq_insert_req_list(hctx, ctx, rq, at_head); | 1058 | __blk_mq_insert_req_list(hctx, rq, at_head); |
1057 | blk_mq_hctx_mark_pending(hctx, ctx); | 1059 | blk_mq_hctx_mark_pending(hctx, ctx); |
1058 | } | 1060 | } |
1059 | 1061 | ||
1060 | void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, | 1062 | void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, |
1061 | bool async) | 1063 | bool async) |
1062 | { | 1064 | { |
1065 | struct blk_mq_ctx *ctx = rq->mq_ctx; | ||
1063 | struct request_queue *q = rq->q; | 1066 | struct request_queue *q = rq->q; |
1064 | struct blk_mq_hw_ctx *hctx; | 1067 | struct blk_mq_hw_ctx *hctx; |
1065 | struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx; | ||
1066 | |||
1067 | current_ctx = blk_mq_get_ctx(q); | ||
1068 | if (!cpu_online(ctx->cpu)) | ||
1069 | rq->mq_ctx = ctx = current_ctx; | ||
1070 | 1068 | ||
1071 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 1069 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
1072 | 1070 | ||
@@ -1076,8 +1074,6 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, | |||
1076 | 1074 | ||
1077 | if (run_queue) | 1075 | if (run_queue) |
1078 | blk_mq_run_hw_queue(hctx, async); | 1076 | blk_mq_run_hw_queue(hctx, async); |
1079 | |||
1080 | blk_mq_put_ctx(current_ctx); | ||
1081 | } | 1077 | } |
1082 | 1078 | ||
1083 | static void blk_mq_insert_requests(struct request_queue *q, | 1079 | static void blk_mq_insert_requests(struct request_queue *q, |
@@ -1088,14 +1084,9 @@ static void blk_mq_insert_requests(struct request_queue *q, | |||
1088 | 1084 | ||
1089 | { | 1085 | { |
1090 | struct blk_mq_hw_ctx *hctx; | 1086 | struct blk_mq_hw_ctx *hctx; |
1091 | struct blk_mq_ctx *current_ctx; | ||
1092 | 1087 | ||
1093 | trace_block_unplug(q, depth, !from_schedule); | 1088 | trace_block_unplug(q, depth, !from_schedule); |
1094 | 1089 | ||
1095 | current_ctx = blk_mq_get_ctx(q); | ||
1096 | |||
1097 | if (!cpu_online(ctx->cpu)) | ||
1098 | ctx = current_ctx; | ||
1099 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 1090 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
1100 | 1091 | ||
1101 | /* | 1092 | /* |
@@ -1107,15 +1098,14 @@ static void blk_mq_insert_requests(struct request_queue *q, | |||
1107 | struct request *rq; | 1098 | struct request *rq; |
1108 | 1099 | ||
1109 | rq = list_first_entry(list, struct request, queuelist); | 1100 | rq = list_first_entry(list, struct request, queuelist); |
1101 | BUG_ON(rq->mq_ctx != ctx); | ||
1110 | list_del_init(&rq->queuelist); | 1102 | list_del_init(&rq->queuelist); |
1111 | rq->mq_ctx = ctx; | 1103 | __blk_mq_insert_req_list(hctx, rq, false); |
1112 | __blk_mq_insert_req_list(hctx, ctx, rq, false); | ||
1113 | } | 1104 | } |
1114 | blk_mq_hctx_mark_pending(hctx, ctx); | 1105 | blk_mq_hctx_mark_pending(hctx, ctx); |
1115 | spin_unlock(&ctx->lock); | 1106 | spin_unlock(&ctx->lock); |
1116 | 1107 | ||
1117 | blk_mq_run_hw_queue(hctx, from_schedule); | 1108 | blk_mq_run_hw_queue(hctx, from_schedule); |
1118 | blk_mq_put_ctx(current_ctx); | ||
1119 | } | 1109 | } |
1120 | 1110 | ||
1121 | static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b) | 1111 | static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b) |
@@ -1630,16 +1620,17 @@ static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node) | |||
1630 | return 0; | 1620 | return 0; |
1631 | } | 1621 | } |
1632 | 1622 | ||
1623 | /* | ||
1624 | * 'cpu' is going away. splice any existing rq_list entries from this | ||
1625 | * software queue to the hw queue dispatch list, and ensure that it | ||
1626 | * gets run. | ||
1627 | */ | ||
1633 | static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) | 1628 | static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) |
1634 | { | 1629 | { |
1635 | struct request_queue *q = hctx->queue; | ||
1636 | struct blk_mq_ctx *ctx; | 1630 | struct blk_mq_ctx *ctx; |
1637 | LIST_HEAD(tmp); | 1631 | LIST_HEAD(tmp); |
1638 | 1632 | ||
1639 | /* | 1633 | ctx = __blk_mq_get_ctx(hctx->queue, cpu); |
1640 | * Move ctx entries to new CPU, if this one is going away. | ||
1641 | */ | ||
1642 | ctx = __blk_mq_get_ctx(q, cpu); | ||
1643 | 1634 | ||
1644 | spin_lock(&ctx->lock); | 1635 | spin_lock(&ctx->lock); |
1645 | if (!list_empty(&ctx->rq_list)) { | 1636 | if (!list_empty(&ctx->rq_list)) { |
@@ -1651,24 +1642,11 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) | |||
1651 | if (list_empty(&tmp)) | 1642 | if (list_empty(&tmp)) |
1652 | return NOTIFY_OK; | 1643 | return NOTIFY_OK; |
1653 | 1644 | ||
1654 | ctx = blk_mq_get_ctx(q); | 1645 | spin_lock(&hctx->lock); |
1655 | spin_lock(&ctx->lock); | 1646 | list_splice_tail_init(&tmp, &hctx->dispatch); |
1656 | 1647 | spin_unlock(&hctx->lock); | |
1657 | while (!list_empty(&tmp)) { | ||
1658 | struct request *rq; | ||
1659 | |||
1660 | rq = list_first_entry(&tmp, struct request, queuelist); | ||
1661 | rq->mq_ctx = ctx; | ||
1662 | list_move_tail(&rq->queuelist, &ctx->rq_list); | ||
1663 | } | ||
1664 | |||
1665 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | ||
1666 | blk_mq_hctx_mark_pending(hctx, ctx); | ||
1667 | |||
1668 | spin_unlock(&ctx->lock); | ||
1669 | 1648 | ||
1670 | blk_mq_run_hw_queue(hctx, true); | 1649 | blk_mq_run_hw_queue(hctx, true); |
1671 | blk_mq_put_ctx(ctx); | ||
1672 | return NOTIFY_OK; | 1650 | return NOTIFY_OK; |
1673 | } | 1651 | } |
1674 | 1652 | ||
diff --git a/block/elevator.c b/block/elevator.c index 7096c22041e7..f7d973a56fd7 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -366,7 +366,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) | |||
366 | list_for_each_prev(entry, &q->queue_head) { | 366 | list_for_each_prev(entry, &q->queue_head) { |
367 | struct request *pos = list_entry_rq(entry); | 367 | struct request *pos = list_entry_rq(entry); |
368 | 368 | ||
369 | if ((req_op(rq) == REQ_OP_DISCARD) != (req_op(pos) == REQ_OP_DISCARD)) | 369 | if (req_op(rq) != req_op(pos)) |
370 | break; | 370 | break; |
371 | if (rq_data_dir(rq) != rq_data_dir(pos)) | 371 | if (rq_data_dir(rq) != rq_data_dir(pos)) |
372 | break; | 372 | break; |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index b71a9c767009..e3d8e4ced4a2 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -3706,22 +3706,21 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) | |||
3706 | if (UFDCS->rawcmd == 1) | 3706 | if (UFDCS->rawcmd == 1) |
3707 | UFDCS->rawcmd = 2; | 3707 | UFDCS->rawcmd = 2; |
3708 | 3708 | ||
3709 | if (mode & (FMODE_READ|FMODE_WRITE)) { | 3709 | if (!(mode & FMODE_NDELAY)) { |
3710 | UDRS->last_checked = 0; | 3710 | if (mode & (FMODE_READ|FMODE_WRITE)) { |
3711 | clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); | 3711 | UDRS->last_checked = 0; |
3712 | check_disk_change(bdev); | 3712 | clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); |
3713 | if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) | 3713 | check_disk_change(bdev); |
3714 | goto out; | 3714 | if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) |
3715 | if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) | 3715 | goto out; |
3716 | if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) | ||
3717 | goto out; | ||
3718 | } | ||
3719 | res = -EROFS; | ||
3720 | if ((mode & FMODE_WRITE) && | ||
3721 | !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) | ||
3716 | goto out; | 3722 | goto out; |
3717 | } | 3723 | } |
3718 | |||
3719 | res = -EROFS; | ||
3720 | |||
3721 | if ((mode & FMODE_WRITE) && | ||
3722 | !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) | ||
3723 | goto out; | ||
3724 | |||
3725 | mutex_unlock(&open_lock); | 3724 | mutex_unlock(&open_lock); |
3726 | mutex_unlock(&floppy_mutex); | 3725 | mutex_unlock(&floppy_mutex); |
3727 | return 0; | 3726 | return 0; |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index be4fea6a5dd3..88ef6d4729b4 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -189,6 +189,8 @@ struct blkfront_info | |||
189 | struct mutex mutex; | 189 | struct mutex mutex; |
190 | struct xenbus_device *xbdev; | 190 | struct xenbus_device *xbdev; |
191 | struct gendisk *gd; | 191 | struct gendisk *gd; |
192 | u16 sector_size; | ||
193 | unsigned int physical_sector_size; | ||
192 | int vdevice; | 194 | int vdevice; |
193 | blkif_vdev_t handle; | 195 | blkif_vdev_t handle; |
194 | enum blkif_state connected; | 196 | enum blkif_state connected; |
@@ -910,9 +912,45 @@ static struct blk_mq_ops blkfront_mq_ops = { | |||
910 | .map_queue = blk_mq_map_queue, | 912 | .map_queue = blk_mq_map_queue, |
911 | }; | 913 | }; |
912 | 914 | ||
915 | static void blkif_set_queue_limits(struct blkfront_info *info) | ||
916 | { | ||
917 | struct request_queue *rq = info->rq; | ||
918 | struct gendisk *gd = info->gd; | ||
919 | unsigned int segments = info->max_indirect_segments ? : | ||
920 | BLKIF_MAX_SEGMENTS_PER_REQUEST; | ||
921 | |||
922 | queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); | ||
923 | |||
924 | if (info->feature_discard) { | ||
925 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); | ||
926 | blk_queue_max_discard_sectors(rq, get_capacity(gd)); | ||
927 | rq->limits.discard_granularity = info->discard_granularity; | ||
928 | rq->limits.discard_alignment = info->discard_alignment; | ||
929 | if (info->feature_secdiscard) | ||
930 | queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq); | ||
931 | } | ||
932 | |||
933 | /* Hard sector size and max sectors impersonate the equiv. hardware. */ | ||
934 | blk_queue_logical_block_size(rq, info->sector_size); | ||
935 | blk_queue_physical_block_size(rq, info->physical_sector_size); | ||
936 | blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512); | ||
937 | |||
938 | /* Each segment in a request is up to an aligned page in size. */ | ||
939 | blk_queue_segment_boundary(rq, PAGE_SIZE - 1); | ||
940 | blk_queue_max_segment_size(rq, PAGE_SIZE); | ||
941 | |||
942 | /* Ensure a merged request will fit in a single I/O ring slot. */ | ||
943 | blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG); | ||
944 | |||
945 | /* Make sure buffer addresses are sector-aligned. */ | ||
946 | blk_queue_dma_alignment(rq, 511); | ||
947 | |||
948 | /* Make sure we don't use bounce buffers. */ | ||
949 | blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); | ||
950 | } | ||
951 | |||
913 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, | 952 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, |
914 | unsigned int physical_sector_size, | 953 | unsigned int physical_sector_size) |
915 | unsigned int segments) | ||
916 | { | 954 | { |
917 | struct request_queue *rq; | 955 | struct request_queue *rq; |
918 | struct blkfront_info *info = gd->private_data; | 956 | struct blkfront_info *info = gd->private_data; |
@@ -944,36 +982,11 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, | |||
944 | } | 982 | } |
945 | 983 | ||
946 | rq->queuedata = info; | 984 | rq->queuedata = info; |
947 | queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); | 985 | info->rq = gd->queue = rq; |
948 | 986 | info->gd = gd; | |
949 | if (info->feature_discard) { | 987 | info->sector_size = sector_size; |
950 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); | 988 | info->physical_sector_size = physical_sector_size; |
951 | blk_queue_max_discard_sectors(rq, get_capacity(gd)); | 989 | blkif_set_queue_limits(info); |
952 | rq->limits.discard_granularity = info->discard_granularity; | ||
953 | rq->limits.discard_alignment = info->discard_alignment; | ||
954 | if (info->feature_secdiscard) | ||
955 | queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq); | ||
956 | } | ||
957 | |||
958 | /* Hard sector size and max sectors impersonate the equiv. hardware. */ | ||
959 | blk_queue_logical_block_size(rq, sector_size); | ||
960 | blk_queue_physical_block_size(rq, physical_sector_size); | ||
961 | blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512); | ||
962 | |||
963 | /* Each segment in a request is up to an aligned page in size. */ | ||
964 | blk_queue_segment_boundary(rq, PAGE_SIZE - 1); | ||
965 | blk_queue_max_segment_size(rq, PAGE_SIZE); | ||
966 | |||
967 | /* Ensure a merged request will fit in a single I/O ring slot. */ | ||
968 | blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG); | ||
969 | |||
970 | /* Make sure buffer addresses are sector-aligned. */ | ||
971 | blk_queue_dma_alignment(rq, 511); | ||
972 | |||
973 | /* Make sure we don't use bounce buffers. */ | ||
974 | blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); | ||
975 | |||
976 | gd->queue = rq; | ||
977 | 990 | ||
978 | return 0; | 991 | return 0; |
979 | } | 992 | } |
@@ -1136,16 +1149,11 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, | |||
1136 | gd->private_data = info; | 1149 | gd->private_data = info; |
1137 | set_capacity(gd, capacity); | 1150 | set_capacity(gd, capacity); |
1138 | 1151 | ||
1139 | if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size, | 1152 | if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) { |
1140 | info->max_indirect_segments ? : | ||
1141 | BLKIF_MAX_SEGMENTS_PER_REQUEST)) { | ||
1142 | del_gendisk(gd); | 1153 | del_gendisk(gd); |
1143 | goto release; | 1154 | goto release; |
1144 | } | 1155 | } |
1145 | 1156 | ||
1146 | info->rq = gd->queue; | ||
1147 | info->gd = gd; | ||
1148 | |||
1149 | xlvbd_flush(info); | 1157 | xlvbd_flush(info); |
1150 | 1158 | ||
1151 | if (vdisk_info & VDISK_READONLY) | 1159 | if (vdisk_info & VDISK_READONLY) |
@@ -1315,7 +1323,7 @@ free_shadow: | |||
1315 | rinfo->ring_ref[i] = GRANT_INVALID_REF; | 1323 | rinfo->ring_ref[i] = GRANT_INVALID_REF; |
1316 | } | 1324 | } |
1317 | } | 1325 | } |
1318 | free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * PAGE_SIZE)); | 1326 | free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE)); |
1319 | rinfo->ring.sring = NULL; | 1327 | rinfo->ring.sring = NULL; |
1320 | 1328 | ||
1321 | if (rinfo->irq) | 1329 | if (rinfo->irq) |
@@ -2007,8 +2015,10 @@ static int blkif_recover(struct blkfront_info *info) | |||
2007 | struct split_bio *split_bio; | 2015 | struct split_bio *split_bio; |
2008 | 2016 | ||
2009 | blkfront_gather_backend_features(info); | 2017 | blkfront_gather_backend_features(info); |
2018 | /* Reset limits changed by blk_mq_update_nr_hw_queues(). */ | ||
2019 | blkif_set_queue_limits(info); | ||
2010 | segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; | 2020 | segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; |
2011 | blk_queue_max_segments(info->rq, segs); | 2021 | blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG); |
2012 | 2022 | ||
2013 | for (r_index = 0; r_index < info->nr_rings; r_index++) { | 2023 | for (r_index = 0; r_index < info->nr_rings; r_index++) { |
2014 | struct blkfront_ring_info *rinfo = &info->rinfo[r_index]; | 2024 | struct blkfront_ring_info *rinfo = &info->rinfo[r_index]; |
@@ -2432,7 +2442,7 @@ static void blkfront_connect(struct blkfront_info *info) | |||
2432 | if (err) { | 2442 | if (err) { |
2433 | xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", | 2443 | xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", |
2434 | info->xbdev->otherend); | 2444 | info->xbdev->otherend); |
2435 | return; | 2445 | goto fail; |
2436 | } | 2446 | } |
2437 | 2447 | ||
2438 | xenbus_switch_state(info->xbdev, XenbusStateConnected); | 2448 | xenbus_switch_state(info->xbdev, XenbusStateConnected); |
@@ -2445,6 +2455,11 @@ static void blkfront_connect(struct blkfront_info *info) | |||
2445 | device_add_disk(&info->xbdev->dev, info->gd); | 2455 | device_add_disk(&info->xbdev->dev, info->gd); |
2446 | 2456 | ||
2447 | info->is_ready = 1; | 2457 | info->is_ready = 1; |
2458 | return; | ||
2459 | |||
2460 | fail: | ||
2461 | blkif_free(info, 0); | ||
2462 | return; | ||
2448 | } | 2463 | } |
2449 | 2464 | ||
2450 | /** | 2465 | /** |
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 98dd47a30fc7..66a94103798b 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
@@ -50,6 +50,7 @@ config GPIO_DEVRES | |||
50 | config OF_GPIO | 50 | config OF_GPIO |
51 | def_bool y | 51 | def_bool y |
52 | depends on OF | 52 | depends on OF |
53 | depends on HAS_IOMEM | ||
53 | 54 | ||
54 | config GPIO_ACPI | 55 | config GPIO_ACPI |
55 | def_bool y | 56 | def_bool y |
@@ -188,7 +189,7 @@ config GPIO_EP93XX | |||
188 | config GPIO_ETRAXFS | 189 | config GPIO_ETRAXFS |
189 | bool "Axis ETRAX FS General I/O" | 190 | bool "Axis ETRAX FS General I/O" |
190 | depends on CRIS || COMPILE_TEST | 191 | depends on CRIS || COMPILE_TEST |
191 | depends on OF | 192 | depends on OF_GPIO |
192 | select GPIO_GENERIC | 193 | select GPIO_GENERIC |
193 | select GPIOLIB_IRQCHIP | 194 | select GPIOLIB_IRQCHIP |
194 | help | 195 | help |
@@ -214,7 +215,7 @@ config GPIO_GENERIC_PLATFORM | |||
214 | 215 | ||
215 | config GPIO_GRGPIO | 216 | config GPIO_GRGPIO |
216 | tristate "Aeroflex Gaisler GRGPIO support" | 217 | tristate "Aeroflex Gaisler GRGPIO support" |
217 | depends on OF | 218 | depends on OF_GPIO |
218 | select GPIO_GENERIC | 219 | select GPIO_GENERIC |
219 | select IRQ_DOMAIN | 220 | select IRQ_DOMAIN |
220 | help | 221 | help |
@@ -312,7 +313,7 @@ config GPIO_MPC8XXX | |||
312 | config GPIO_MVEBU | 313 | config GPIO_MVEBU |
313 | def_bool y | 314 | def_bool y |
314 | depends on PLAT_ORION | 315 | depends on PLAT_ORION |
315 | depends on OF | 316 | depends on OF_GPIO |
316 | select GENERIC_IRQ_CHIP | 317 | select GENERIC_IRQ_CHIP |
317 | 318 | ||
318 | config GPIO_MXC | 319 | config GPIO_MXC |
@@ -405,7 +406,7 @@ config GPIO_TEGRA | |||
405 | bool "NVIDIA Tegra GPIO support" | 406 | bool "NVIDIA Tegra GPIO support" |
406 | default ARCH_TEGRA | 407 | default ARCH_TEGRA |
407 | depends on ARCH_TEGRA || COMPILE_TEST | 408 | depends on ARCH_TEGRA || COMPILE_TEST |
408 | depends on OF | 409 | depends on OF_GPIO |
409 | help | 410 | help |
410 | Say yes here to support GPIO pins on NVIDIA Tegra SoCs. | 411 | Say yes here to support GPIO pins on NVIDIA Tegra SoCs. |
411 | 412 | ||
@@ -1099,7 +1100,7 @@ menu "SPI GPIO expanders" | |||
1099 | 1100 | ||
1100 | config GPIO_74X164 | 1101 | config GPIO_74X164 |
1101 | tristate "74x164 serial-in/parallel-out 8-bits shift register" | 1102 | tristate "74x164 serial-in/parallel-out 8-bits shift register" |
1102 | depends on OF | 1103 | depends on OF_GPIO |
1103 | help | 1104 | help |
1104 | Driver for 74x164 compatible serial-in/parallel-out 8-outputs | 1105 | Driver for 74x164 compatible serial-in/parallel-out 8-outputs |
1105 | shift registers. This driver can be used to provide access | 1106 | shift registers. This driver can be used to provide access |
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c index 08807368f007..946d09195598 100644 --- a/drivers/gpio/gpio-max730x.c +++ b/drivers/gpio/gpio-max730x.c | |||
@@ -192,6 +192,10 @@ int __max730x_probe(struct max7301 *ts) | |||
192 | ts->chip.parent = dev; | 192 | ts->chip.parent = dev; |
193 | ts->chip.owner = THIS_MODULE; | 193 | ts->chip.owner = THIS_MODULE; |
194 | 194 | ||
195 | ret = gpiochip_add_data(&ts->chip, ts); | ||
196 | if (ret) | ||
197 | goto exit_destroy; | ||
198 | |||
195 | /* | 199 | /* |
196 | * initialize pullups according to platform data and cache the | 200 | * initialize pullups according to platform data and cache the |
197 | * register values for later use. | 201 | * register values for later use. |
@@ -213,10 +217,6 @@ int __max730x_probe(struct max7301 *ts) | |||
213 | } | 217 | } |
214 | } | 218 | } |
215 | 219 | ||
216 | ret = gpiochip_add_data(&ts->chip, ts); | ||
217 | if (ret) | ||
218 | goto exit_destroy; | ||
219 | |||
220 | return ret; | 220 | return ret; |
221 | 221 | ||
222 | exit_destroy: | 222 | exit_destroy: |
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c index 7d61439be5f2..0c07e1023a46 100644 --- a/drivers/input/keyboard/tegra-kbc.c +++ b/drivers/input/keyboard/tegra-kbc.c | |||
@@ -376,7 +376,7 @@ static int tegra_kbc_start(struct tegra_kbc *kbc) | |||
376 | /* Reset the KBC controller to clear all previous status.*/ | 376 | /* Reset the KBC controller to clear all previous status.*/ |
377 | reset_control_assert(kbc->rst); | 377 | reset_control_assert(kbc->rst); |
378 | udelay(100); | 378 | udelay(100); |
379 | reset_control_assert(kbc->rst); | 379 | reset_control_deassert(kbc->rst); |
380 | udelay(100); | 380 | udelay(100); |
381 | 381 | ||
382 | tegra_kbc_config_pins(kbc); | 382 | tegra_kbc_config_pins(kbc); |
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index faa295ec4f31..c83bce89028b 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c | |||
@@ -553,7 +553,6 @@ int rmi_read_register_desc(struct rmi_device *d, u16 addr, | |||
553 | goto free_struct_buff; | 553 | goto free_struct_buff; |
554 | 554 | ||
555 | reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS); | 555 | reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS); |
556 | map_offset = 0; | ||
557 | for (i = 0; i < rdesc->num_registers; i++) { | 556 | for (i = 0; i < rdesc->num_registers; i++) { |
558 | struct rmi_register_desc_item *item = &rdesc->registers[i]; | 557 | struct rmi_register_desc_item *item = &rdesc->registers[i]; |
559 | int reg_size = struct_buf[offset]; | 558 | int reg_size = struct_buf[offset]; |
@@ -576,6 +575,8 @@ int rmi_read_register_desc(struct rmi_device *d, u16 addr, | |||
576 | item->reg = reg; | 575 | item->reg = reg; |
577 | item->reg_size = reg_size; | 576 | item->reg_size = reg_size; |
578 | 577 | ||
578 | map_offset = 0; | ||
579 | |||
579 | do { | 580 | do { |
580 | for (b = 0; b < 7; b++) { | 581 | for (b = 0; b < 7; b++) { |
581 | if (struct_buf[offset] & (0x1 << b)) | 582 | if (struct_buf[offset] & (0x1 << b)) |
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index b4d34086e73f..405252a884dd 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c | |||
@@ -1305,6 +1305,7 @@ static int __init i8042_create_aux_port(int idx) | |||
1305 | serio->write = i8042_aux_write; | 1305 | serio->write = i8042_aux_write; |
1306 | serio->start = i8042_start; | 1306 | serio->start = i8042_start; |
1307 | serio->stop = i8042_stop; | 1307 | serio->stop = i8042_stop; |
1308 | serio->ps2_cmd_mutex = &i8042_mutex; | ||
1308 | serio->port_data = port; | 1309 | serio->port_data = port; |
1309 | serio->dev.parent = &i8042_platform_device->dev; | 1310 | serio->dev.parent = &i8042_platform_device->dev; |
1310 | if (idx < 0) { | 1311 | if (idx < 0) { |
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index a61b2153ab8c..1ce3ecbe37f8 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c | |||
@@ -1473,7 +1473,6 @@ static int ads7846_remove(struct spi_device *spi) | |||
1473 | 1473 | ||
1474 | ads784x_hwmon_unregister(spi, ts); | 1474 | ads784x_hwmon_unregister(spi, ts); |
1475 | 1475 | ||
1476 | regulator_disable(ts->reg); | ||
1477 | regulator_put(ts->reg); | 1476 | regulator_put(ts->reg); |
1478 | 1477 | ||
1479 | if (!ts->get_pendown_state) { | 1478 | if (!ts->get_pendown_state) { |
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c index 7379fe153cf9..b2744a64e933 100644 --- a/drivers/input/touchscreen/silead.c +++ b/drivers/input/touchscreen/silead.c | |||
@@ -464,7 +464,7 @@ static int silead_ts_probe(struct i2c_client *client, | |||
464 | return -ENODEV; | 464 | return -ENODEV; |
465 | 465 | ||
466 | /* Power GPIO pin */ | 466 | /* Power GPIO pin */ |
467 | data->gpio_power = gpiod_get_optional(dev, "power", GPIOD_OUT_LOW); | 467 | data->gpio_power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW); |
468 | if (IS_ERR(data->gpio_power)) { | 468 | if (IS_ERR(data->gpio_power)) { |
469 | if (PTR_ERR(data->gpio_power) != -EPROBE_DEFER) | 469 | if (PTR_ERR(data->gpio_power) != -EPROBE_DEFER) |
470 | dev_err(dev, "Shutdown GPIO request failed\n"); | 470 | dev_err(dev, "Shutdown GPIO request failed\n"); |
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index ce801170d5f2..641e88761319 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c | |||
@@ -879,7 +879,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) | |||
879 | * We may have concurrent producers, so we need to be careful | 879 | * We may have concurrent producers, so we need to be careful |
880 | * not to touch any of the shadow cmdq state. | 880 | * not to touch any of the shadow cmdq state. |
881 | */ | 881 | */ |
882 | queue_read(cmd, Q_ENT(q, idx), q->ent_dwords); | 882 | queue_read(cmd, Q_ENT(q, cons), q->ent_dwords); |
883 | dev_err(smmu->dev, "skipping command in error state:\n"); | 883 | dev_err(smmu->dev, "skipping command in error state:\n"); |
884 | for (i = 0; i < ARRAY_SIZE(cmd); ++i) | 884 | for (i = 0; i < ARRAY_SIZE(cmd); ++i) |
885 | dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]); | 885 | dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]); |
@@ -890,7 +890,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) | |||
890 | return; | 890 | return; |
891 | } | 891 | } |
892 | 892 | ||
893 | queue_write(cmd, Q_ENT(q, idx), q->ent_dwords); | 893 | queue_write(Q_ENT(q, cons), cmd, q->ent_dwords); |
894 | } | 894 | } |
895 | 895 | ||
896 | static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, | 896 | static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, |
@@ -1034,6 +1034,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, | |||
1034 | case STRTAB_STE_0_CFG_S2_TRANS: | 1034 | case STRTAB_STE_0_CFG_S2_TRANS: |
1035 | ste_live = true; | 1035 | ste_live = true; |
1036 | break; | 1036 | break; |
1037 | case STRTAB_STE_0_CFG_ABORT: | ||
1038 | if (disable_bypass) | ||
1039 | break; | ||
1037 | default: | 1040 | default: |
1038 | BUG(); /* STE corruption */ | 1041 | BUG(); /* STE corruption */ |
1039 | } | 1042 | } |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 4f49fe29f202..2db74ebc3240 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
@@ -686,8 +686,7 @@ static struct iommu_gather_ops arm_smmu_gather_ops = { | |||
686 | 686 | ||
687 | static irqreturn_t arm_smmu_context_fault(int irq, void *dev) | 687 | static irqreturn_t arm_smmu_context_fault(int irq, void *dev) |
688 | { | 688 | { |
689 | int flags, ret; | 689 | u32 fsr, fsynr; |
690 | u32 fsr, fsynr, resume; | ||
691 | unsigned long iova; | 690 | unsigned long iova; |
692 | struct iommu_domain *domain = dev; | 691 | struct iommu_domain *domain = dev; |
693 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); | 692 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
@@ -701,34 +700,15 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) | |||
701 | if (!(fsr & FSR_FAULT)) | 700 | if (!(fsr & FSR_FAULT)) |
702 | return IRQ_NONE; | 701 | return IRQ_NONE; |
703 | 702 | ||
704 | if (fsr & FSR_IGN) | ||
705 | dev_err_ratelimited(smmu->dev, | ||
706 | "Unexpected context fault (fsr 0x%x)\n", | ||
707 | fsr); | ||
708 | |||
709 | fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); | 703 | fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); |
710 | flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; | ||
711 | |||
712 | iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR); | 704 | iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR); |
713 | if (!report_iommu_fault(domain, smmu->dev, iova, flags)) { | ||
714 | ret = IRQ_HANDLED; | ||
715 | resume = RESUME_RETRY; | ||
716 | } else { | ||
717 | dev_err_ratelimited(smmu->dev, | ||
718 | "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n", | ||
719 | iova, fsynr, cfg->cbndx); | ||
720 | ret = IRQ_NONE; | ||
721 | resume = RESUME_TERMINATE; | ||
722 | } | ||
723 | |||
724 | /* Clear the faulting FSR */ | ||
725 | writel(fsr, cb_base + ARM_SMMU_CB_FSR); | ||
726 | 705 | ||
727 | /* Retry or terminate any stalled transactions */ | 706 | dev_err_ratelimited(smmu->dev, |
728 | if (fsr & FSR_SS) | 707 | "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n", |
729 | writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME); | 708 | fsr, iova, fsynr, cfg->cbndx); |
730 | 709 | ||
731 | return ret; | 710 | writel(fsr, cb_base + ARM_SMMU_CB_FSR); |
711 | return IRQ_HANDLED; | ||
732 | } | 712 | } |
733 | 713 | ||
734 | static irqreturn_t arm_smmu_global_fault(int irq, void *dev) | 714 | static irqreturn_t arm_smmu_global_fault(int irq, void *dev) |
@@ -837,7 +817,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, | |||
837 | } | 817 | } |
838 | 818 | ||
839 | /* SCTLR */ | 819 | /* SCTLR */ |
840 | reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; | 820 | reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; |
841 | if (stage1) | 821 | if (stage1) |
842 | reg |= SCTLR_S1_ASIDPNE; | 822 | reg |= SCTLR_S1_ASIDPNE; |
843 | #ifdef __BIG_ENDIAN | 823 | #ifdef __BIG_ENDIAN |
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 8c6139986d7d..def8ca1c982d 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c | |||
@@ -286,12 +286,14 @@ static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl) | |||
286 | int prot = IOMMU_READ; | 286 | int prot = IOMMU_READ; |
287 | arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl); | 287 | arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl); |
288 | 288 | ||
289 | if (attr & ARM_V7S_PTE_AP_RDONLY) | 289 | if (!(attr & ARM_V7S_PTE_AP_RDONLY)) |
290 | prot |= IOMMU_WRITE; | 290 | prot |= IOMMU_WRITE; |
291 | if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0) | 291 | if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0) |
292 | prot |= IOMMU_MMIO; | 292 | prot |= IOMMU_MMIO; |
293 | else if (pte & ARM_V7S_ATTR_C) | 293 | else if (pte & ARM_V7S_ATTR_C) |
294 | prot |= IOMMU_CACHE; | 294 | prot |= IOMMU_CACHE; |
295 | if (pte & ARM_V7S_ATTR_XN(lvl)) | ||
296 | prot |= IOMMU_NOEXEC; | ||
295 | 297 | ||
296 | return prot; | 298 | return prot; |
297 | } | 299 | } |
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 95a4ca6ce6ff..849ad441cd76 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
@@ -760,7 +760,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, | |||
760 | if (!d->nr_stripes || | 760 | if (!d->nr_stripes || |
761 | d->nr_stripes > INT_MAX || | 761 | d->nr_stripes > INT_MAX || |
762 | d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { | 762 | d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { |
763 | pr_err("nr_stripes too large"); | 763 | pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)", |
764 | (unsigned)d->nr_stripes); | ||
764 | return -ENOMEM; | 765 | return -ENOMEM; |
765 | } | 766 | } |
766 | 767 | ||
@@ -1820,7 +1821,7 @@ static int cache_alloc(struct cache *ca) | |||
1820 | free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; | 1821 | free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; |
1821 | 1822 | ||
1822 | if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) || | 1823 | if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) || |
1823 | !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || | 1824 | !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || |
1824 | !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || | 1825 | !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || |
1825 | !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || | 1826 | !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || |
1826 | !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || | 1827 | !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || |
@@ -1844,7 +1845,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, | |||
1844 | struct block_device *bdev, struct cache *ca) | 1845 | struct block_device *bdev, struct cache *ca) |
1845 | { | 1846 | { |
1846 | char name[BDEVNAME_SIZE]; | 1847 | char name[BDEVNAME_SIZE]; |
1847 | const char *err = NULL; | 1848 | const char *err = NULL; /* must be set for any error case */ |
1848 | int ret = 0; | 1849 | int ret = 0; |
1849 | 1850 | ||
1850 | memcpy(&ca->sb, sb, sizeof(struct cache_sb)); | 1851 | memcpy(&ca->sb, sb, sizeof(struct cache_sb)); |
@@ -1861,8 +1862,13 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, | |||
1861 | ca->discard = CACHE_DISCARD(&ca->sb); | 1862 | ca->discard = CACHE_DISCARD(&ca->sb); |
1862 | 1863 | ||
1863 | ret = cache_alloc(ca); | 1864 | ret = cache_alloc(ca); |
1864 | if (ret != 0) | 1865 | if (ret != 0) { |
1866 | if (ret == -ENOMEM) | ||
1867 | err = "cache_alloc(): -ENOMEM"; | ||
1868 | else | ||
1869 | err = "cache_alloc(): unknown error"; | ||
1865 | goto err; | 1870 | goto err; |
1871 | } | ||
1866 | 1872 | ||
1867 | if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) { | 1873 | if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) { |
1868 | err = "error calling kobject_add"; | 1874 | err = "error calling kobject_add"; |
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 97e446d54a15..6a2e8dd44a1b 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c | |||
@@ -289,15 +289,13 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) | |||
289 | pb->bio_submitted = true; | 289 | pb->bio_submitted = true; |
290 | 290 | ||
291 | /* | 291 | /* |
292 | * Map reads as normal only if corrupt_bio_byte set. | 292 | * Error reads if neither corrupt_bio_byte or drop_writes are set. |
293 | * Otherwise, flakey_end_io() will decide if the reads should be modified. | ||
293 | */ | 294 | */ |
294 | if (bio_data_dir(bio) == READ) { | 295 | if (bio_data_dir(bio) == READ) { |
295 | /* If flags were specified, only corrupt those that match. */ | 296 | if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags)) |
296 | if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) && | ||
297 | all_corrupt_bio_flags_match(bio, fc)) | ||
298 | goto map_bio; | ||
299 | else | ||
300 | return -EIO; | 297 | return -EIO; |
298 | goto map_bio; | ||
301 | } | 299 | } |
302 | 300 | ||
303 | /* | 301 | /* |
@@ -334,14 +332,21 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error) | |||
334 | struct flakey_c *fc = ti->private; | 332 | struct flakey_c *fc = ti->private; |
335 | struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); | 333 | struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); |
336 | 334 | ||
337 | /* | ||
338 | * Corrupt successful READs while in down state. | ||
339 | */ | ||
340 | if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { | 335 | if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { |
341 | if (fc->corrupt_bio_byte) | 336 | if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) && |
337 | all_corrupt_bio_flags_match(bio, fc)) { | ||
338 | /* | ||
339 | * Corrupt successful matching READs while in down state. | ||
340 | */ | ||
342 | corrupt_bio_data(bio, fc); | 341 | corrupt_bio_data(bio, fc); |
343 | else | 342 | |
343 | } else if (!test_bit(DROP_WRITES, &fc->flags)) { | ||
344 | /* | ||
345 | * Error read during the down_interval if drop_writes | ||
346 | * wasn't configured. | ||
347 | */ | ||
344 | return -EIO; | 348 | return -EIO; |
349 | } | ||
345 | } | 350 | } |
346 | 351 | ||
347 | return error; | 352 | return error; |
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 4ca2d1df5b44..07fc1ad42ec5 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c | |||
@@ -291,9 +291,10 @@ static void header_from_disk(struct log_header_core *core, struct log_header_dis | |||
291 | core->nr_regions = le64_to_cpu(disk->nr_regions); | 291 | core->nr_regions = le64_to_cpu(disk->nr_regions); |
292 | } | 292 | } |
293 | 293 | ||
294 | static int rw_header(struct log_c *lc, int rw) | 294 | static int rw_header(struct log_c *lc, int op) |
295 | { | 295 | { |
296 | lc->io_req.bi_op = rw; | 296 | lc->io_req.bi_op = op; |
297 | lc->io_req.bi_op_flags = 0; | ||
297 | 298 | ||
298 | return dm_io(&lc->io_req, 1, &lc->header_location, NULL); | 299 | return dm_io(&lc->io_req, 1, &lc->header_location, NULL); |
299 | } | 300 | } |
@@ -316,7 +317,7 @@ static int read_header(struct log_c *log) | |||
316 | { | 317 | { |
317 | int r; | 318 | int r; |
318 | 319 | ||
319 | r = rw_header(log, READ); | 320 | r = rw_header(log, REQ_OP_READ); |
320 | if (r) | 321 | if (r) |
321 | return r; | 322 | return r; |
322 | 323 | ||
@@ -630,7 +631,7 @@ static int disk_resume(struct dm_dirty_log *log) | |||
630 | header_to_disk(&lc->header, lc->disk_header); | 631 | header_to_disk(&lc->header, lc->disk_header); |
631 | 632 | ||
632 | /* write the new header */ | 633 | /* write the new header */ |
633 | r = rw_header(lc, WRITE); | 634 | r = rw_header(lc, REQ_OP_WRITE); |
634 | if (!r) { | 635 | if (!r) { |
635 | r = flush_header(lc); | 636 | r = flush_header(lc); |
636 | if (r) | 637 | if (r) |
@@ -698,7 +699,7 @@ static int disk_flush(struct dm_dirty_log *log) | |||
698 | log_clear_bit(lc, lc->clean_bits, i); | 699 | log_clear_bit(lc, lc->clean_bits, i); |
699 | } | 700 | } |
700 | 701 | ||
701 | r = rw_header(lc, WRITE); | 702 | r = rw_header(lc, REQ_OP_WRITE); |
702 | if (r) | 703 | if (r) |
703 | fail_log_device(lc); | 704 | fail_log_device(lc); |
704 | else { | 705 | else { |
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 48a5dd740f3b..2206d4477dbb 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -1726,6 +1726,7 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) | |||
1726 | break; | 1726 | break; |
1727 | 1727 | ||
1728 | if (req_op(next) == REQ_OP_DISCARD || | 1728 | if (req_op(next) == REQ_OP_DISCARD || |
1729 | req_op(next) == REQ_OP_SECURE_ERASE || | ||
1729 | req_op(next) == REQ_OP_FLUSH) | 1730 | req_op(next) == REQ_OP_FLUSH) |
1730 | break; | 1731 | break; |
1731 | 1732 | ||
@@ -2150,6 +2151,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
2150 | struct mmc_card *card = md->queue.card; | 2151 | struct mmc_card *card = md->queue.card; |
2151 | struct mmc_host *host = card->host; | 2152 | struct mmc_host *host = card->host; |
2152 | unsigned long flags; | 2153 | unsigned long flags; |
2154 | bool req_is_special = mmc_req_is_special(req); | ||
2153 | 2155 | ||
2154 | if (req && !mq->mqrq_prev->req) | 2156 | if (req && !mq->mqrq_prev->req) |
2155 | /* claim host only for the first request */ | 2157 | /* claim host only for the first request */ |
@@ -2190,8 +2192,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
2190 | } | 2192 | } |
2191 | 2193 | ||
2192 | out: | 2194 | out: |
2193 | if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || | 2195 | if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || req_is_special) |
2194 | mmc_req_is_special(req)) | ||
2195 | /* | 2196 | /* |
2196 | * Release host when there are no more requests | 2197 | * Release host when there are no more requests |
2197 | * and after special request(discard, flush) is done. | 2198 | * and after special request(discard, flush) is done. |
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index bf14642a576a..708057261b38 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -33,7 +33,8 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) | |||
33 | /* | 33 | /* |
34 | * We only like normal block requests and discards. | 34 | * We only like normal block requests and discards. |
35 | */ | 35 | */ |
36 | if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD) { | 36 | if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD && |
37 | req_op(req) != REQ_OP_SECURE_ERASE) { | ||
37 | blk_dump_rq_flags(req, "MMC bad request"); | 38 | blk_dump_rq_flags(req, "MMC bad request"); |
38 | return BLKPREP_KILL; | 39 | return BLKPREP_KILL; |
39 | } | 40 | } |
@@ -64,6 +65,8 @@ static int mmc_queue_thread(void *d) | |||
64 | spin_unlock_irq(q->queue_lock); | 65 | spin_unlock_irq(q->queue_lock); |
65 | 66 | ||
66 | if (req || mq->mqrq_prev->req) { | 67 | if (req || mq->mqrq_prev->req) { |
68 | bool req_is_special = mmc_req_is_special(req); | ||
69 | |||
67 | set_current_state(TASK_RUNNING); | 70 | set_current_state(TASK_RUNNING); |
68 | mq->issue_fn(mq, req); | 71 | mq->issue_fn(mq, req); |
69 | cond_resched(); | 72 | cond_resched(); |
@@ -79,7 +82,7 @@ static int mmc_queue_thread(void *d) | |||
79 | * has been finished. Do not assign it to previous | 82 | * has been finished. Do not assign it to previous |
80 | * request. | 83 | * request. |
81 | */ | 84 | */ |
82 | if (mmc_req_is_special(req)) | 85 | if (req_is_special) |
83 | mq->mqrq_cur->req = NULL; | 86 | mq->mqrq_cur->req = NULL; |
84 | 87 | ||
85 | mq->mqrq_prev->brq.mrq.data = NULL; | 88 | mq->mqrq_prev->brq.mrq.data = NULL; |
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index d62531124d54..fee5e1271465 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h | |||
@@ -4,7 +4,9 @@ | |||
4 | static inline bool mmc_req_is_special(struct request *req) | 4 | static inline bool mmc_req_is_special(struct request *req) |
5 | { | 5 | { |
6 | return req && | 6 | return req && |
7 | (req_op(req) == REQ_OP_FLUSH || req_op(req) == REQ_OP_DISCARD); | 7 | (req_op(req) == REQ_OP_FLUSH || |
8 | req_op(req) == REQ_OP_DISCARD || | ||
9 | req_op(req) == REQ_OP_SECURE_ERASE); | ||
8 | } | 10 | } |
9 | 11 | ||
10 | struct request; | 12 | struct request; |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 7ff2e820bbf4..2feacc70bf61 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -81,10 +81,12 @@ EXPORT_SYMBOL_GPL(nvme_cancel_request); | |||
81 | bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, | 81 | bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, |
82 | enum nvme_ctrl_state new_state) | 82 | enum nvme_ctrl_state new_state) |
83 | { | 83 | { |
84 | enum nvme_ctrl_state old_state = ctrl->state; | 84 | enum nvme_ctrl_state old_state; |
85 | bool changed = false; | 85 | bool changed = false; |
86 | 86 | ||
87 | spin_lock_irq(&ctrl->lock); | 87 | spin_lock_irq(&ctrl->lock); |
88 | |||
89 | old_state = ctrl->state; | ||
88 | switch (new_state) { | 90 | switch (new_state) { |
89 | case NVME_CTRL_LIVE: | 91 | case NVME_CTRL_LIVE: |
90 | switch (old_state) { | 92 | switch (old_state) { |
@@ -140,11 +142,12 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, | |||
140 | default: | 142 | default: |
141 | break; | 143 | break; |
142 | } | 144 | } |
143 | spin_unlock_irq(&ctrl->lock); | ||
144 | 145 | ||
145 | if (changed) | 146 | if (changed) |
146 | ctrl->state = new_state; | 147 | ctrl->state = new_state; |
147 | 148 | ||
149 | spin_unlock_irq(&ctrl->lock); | ||
150 | |||
148 | return changed; | 151 | return changed; |
149 | } | 152 | } |
150 | EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); | 153 | EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); |
@@ -608,7 +611,7 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, | |||
608 | 611 | ||
609 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, | 612 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, |
610 | NVME_QID_ANY, 0, 0); | 613 | NVME_QID_ANY, 0, 0); |
611 | if (ret >= 0) | 614 | if (ret >= 0 && result) |
612 | *result = le32_to_cpu(cqe.result); | 615 | *result = le32_to_cpu(cqe.result); |
613 | return ret; | 616 | return ret; |
614 | } | 617 | } |
@@ -628,7 +631,7 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, | |||
628 | 631 | ||
629 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, | 632 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, |
630 | NVME_QID_ANY, 0, 0); | 633 | NVME_QID_ANY, 0, 0); |
631 | if (ret >= 0) | 634 | if (ret >= 0 && result) |
632 | *result = le32_to_cpu(cqe.result); | 635 | *result = le32_to_cpu(cqe.result); |
633 | return ret; | 636 | return ret; |
634 | } | 637 | } |
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index eafa6138a6b8..98f12223c734 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -1069,7 +1069,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, | |||
1069 | nvec = maxvec; | 1069 | nvec = maxvec; |
1070 | 1070 | ||
1071 | for (;;) { | 1071 | for (;;) { |
1072 | if (!(flags & PCI_IRQ_NOAFFINITY)) { | 1072 | if (flags & PCI_IRQ_AFFINITY) { |
1073 | dev->irq_affinity = irq_create_affinity_mask(&nvec); | 1073 | dev->irq_affinity = irq_create_affinity_mask(&nvec); |
1074 | if (nvec < minvec) | 1074 | if (nvec < minvec) |
1075 | return -ENOSPC; | 1075 | return -ENOSPC; |
@@ -1105,7 +1105,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, | |||
1105 | **/ | 1105 | **/ |
1106 | int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) | 1106 | int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) |
1107 | { | 1107 | { |
1108 | return __pci_enable_msi_range(dev, minvec, maxvec, PCI_IRQ_NOAFFINITY); | 1108 | return __pci_enable_msi_range(dev, minvec, maxvec, 0); |
1109 | } | 1109 | } |
1110 | EXPORT_SYMBOL(pci_enable_msi_range); | 1110 | EXPORT_SYMBOL(pci_enable_msi_range); |
1111 | 1111 | ||
@@ -1120,7 +1120,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev, | |||
1120 | return -ERANGE; | 1120 | return -ERANGE; |
1121 | 1121 | ||
1122 | for (;;) { | 1122 | for (;;) { |
1123 | if (!(flags & PCI_IRQ_NOAFFINITY)) { | 1123 | if (flags & PCI_IRQ_AFFINITY) { |
1124 | dev->irq_affinity = irq_create_affinity_mask(&nvec); | 1124 | dev->irq_affinity = irq_create_affinity_mask(&nvec); |
1125 | if (nvec < minvec) | 1125 | if (nvec < minvec) |
1126 | return -ENOSPC; | 1126 | return -ENOSPC; |
@@ -1160,8 +1160,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev, | |||
1160 | int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, | 1160 | int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, |
1161 | int minvec, int maxvec) | 1161 | int minvec, int maxvec) |
1162 | { | 1162 | { |
1163 | return __pci_enable_msix_range(dev, entries, minvec, maxvec, | 1163 | return __pci_enable_msix_range(dev, entries, minvec, maxvec, 0); |
1164 | PCI_IRQ_NOAFFINITY); | ||
1165 | } | 1164 | } |
1166 | EXPORT_SYMBOL(pci_enable_msix_range); | 1165 | EXPORT_SYMBOL(pci_enable_msix_range); |
1167 | 1166 | ||
@@ -1187,22 +1186,25 @@ int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, | |||
1187 | { | 1186 | { |
1188 | int vecs = -ENOSPC; | 1187 | int vecs = -ENOSPC; |
1189 | 1188 | ||
1190 | if (!(flags & PCI_IRQ_NOMSIX)) { | 1189 | if (flags & PCI_IRQ_MSIX) { |
1191 | vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, | 1190 | vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, |
1192 | flags); | 1191 | flags); |
1193 | if (vecs > 0) | 1192 | if (vecs > 0) |
1194 | return vecs; | 1193 | return vecs; |
1195 | } | 1194 | } |
1196 | 1195 | ||
1197 | if (!(flags & PCI_IRQ_NOMSI)) { | 1196 | if (flags & PCI_IRQ_MSI) { |
1198 | vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags); | 1197 | vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags); |
1199 | if (vecs > 0) | 1198 | if (vecs > 0) |
1200 | return vecs; | 1199 | return vecs; |
1201 | } | 1200 | } |
1202 | 1201 | ||
1203 | /* use legacy irq if allowed */ | 1202 | /* use legacy irq if allowed */ |
1204 | if (!(flags & PCI_IRQ_NOLEGACY) && min_vecs == 1) | 1203 | if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1) { |
1204 | pci_intx(dev, 1); | ||
1205 | return 1; | 1205 | return 1; |
1206 | } | ||
1207 | |||
1206 | return vecs; | 1208 | return vecs; |
1207 | } | 1209 | } |
1208 | EXPORT_SYMBOL(pci_alloc_irq_vectors); | 1210 | EXPORT_SYMBOL(pci_alloc_irq_vectors); |
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 3788ed74c9ab..a32b41783b77 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c | |||
@@ -740,12 +740,22 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev, | |||
740 | } | 740 | } |
741 | 741 | ||
742 | /* Bind cpufreq callbacks to thermal cooling device ops */ | 742 | /* Bind cpufreq callbacks to thermal cooling device ops */ |
743 | |||
743 | static struct thermal_cooling_device_ops cpufreq_cooling_ops = { | 744 | static struct thermal_cooling_device_ops cpufreq_cooling_ops = { |
744 | .get_max_state = cpufreq_get_max_state, | 745 | .get_max_state = cpufreq_get_max_state, |
745 | .get_cur_state = cpufreq_get_cur_state, | 746 | .get_cur_state = cpufreq_get_cur_state, |
746 | .set_cur_state = cpufreq_set_cur_state, | 747 | .set_cur_state = cpufreq_set_cur_state, |
747 | }; | 748 | }; |
748 | 749 | ||
750 | static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = { | ||
751 | .get_max_state = cpufreq_get_max_state, | ||
752 | .get_cur_state = cpufreq_get_cur_state, | ||
753 | .set_cur_state = cpufreq_set_cur_state, | ||
754 | .get_requested_power = cpufreq_get_requested_power, | ||
755 | .state2power = cpufreq_state2power, | ||
756 | .power2state = cpufreq_power2state, | ||
757 | }; | ||
758 | |||
749 | /* Notifier for cpufreq policy change */ | 759 | /* Notifier for cpufreq policy change */ |
750 | static struct notifier_block thermal_cpufreq_notifier_block = { | 760 | static struct notifier_block thermal_cpufreq_notifier_block = { |
751 | .notifier_call = cpufreq_thermal_notifier, | 761 | .notifier_call = cpufreq_thermal_notifier, |
@@ -795,6 +805,7 @@ __cpufreq_cooling_register(struct device_node *np, | |||
795 | struct cpumask temp_mask; | 805 | struct cpumask temp_mask; |
796 | unsigned int freq, i, num_cpus; | 806 | unsigned int freq, i, num_cpus; |
797 | int ret; | 807 | int ret; |
808 | struct thermal_cooling_device_ops *cooling_ops; | ||
798 | 809 | ||
799 | cpumask_and(&temp_mask, clip_cpus, cpu_online_mask); | 810 | cpumask_and(&temp_mask, clip_cpus, cpu_online_mask); |
800 | policy = cpufreq_cpu_get(cpumask_first(&temp_mask)); | 811 | policy = cpufreq_cpu_get(cpumask_first(&temp_mask)); |
@@ -850,10 +861,6 @@ __cpufreq_cooling_register(struct device_node *np, | |||
850 | cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus); | 861 | cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus); |
851 | 862 | ||
852 | if (capacitance) { | 863 | if (capacitance) { |
853 | cpufreq_cooling_ops.get_requested_power = | ||
854 | cpufreq_get_requested_power; | ||
855 | cpufreq_cooling_ops.state2power = cpufreq_state2power; | ||
856 | cpufreq_cooling_ops.power2state = cpufreq_power2state; | ||
857 | cpufreq_dev->plat_get_static_power = plat_static_func; | 864 | cpufreq_dev->plat_get_static_power = plat_static_func; |
858 | 865 | ||
859 | ret = build_dyn_power_table(cpufreq_dev, capacitance); | 866 | ret = build_dyn_power_table(cpufreq_dev, capacitance); |
@@ -861,6 +868,10 @@ __cpufreq_cooling_register(struct device_node *np, | |||
861 | cool_dev = ERR_PTR(ret); | 868 | cool_dev = ERR_PTR(ret); |
862 | goto free_table; | 869 | goto free_table; |
863 | } | 870 | } |
871 | |||
872 | cooling_ops = &cpufreq_power_cooling_ops; | ||
873 | } else { | ||
874 | cooling_ops = &cpufreq_cooling_ops; | ||
864 | } | 875 | } |
865 | 876 | ||
866 | ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); | 877 | ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); |
@@ -885,7 +896,7 @@ __cpufreq_cooling_register(struct device_node *np, | |||
885 | cpufreq_dev->id); | 896 | cpufreq_dev->id); |
886 | 897 | ||
887 | cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, | 898 | cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, |
888 | &cpufreq_cooling_ops); | 899 | cooling_ops); |
889 | if (IS_ERR(cool_dev)) | 900 | if (IS_ERR(cool_dev)) |
890 | goto remove_idr; | 901 | goto remove_idr; |
891 | 902 | ||
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index c5547bd711db..e473548b5d28 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c | |||
@@ -471,8 +471,6 @@ MODULE_DEVICE_TABLE(of, of_imx_thermal_match); | |||
471 | 471 | ||
472 | static int imx_thermal_probe(struct platform_device *pdev) | 472 | static int imx_thermal_probe(struct platform_device *pdev) |
473 | { | 473 | { |
474 | const struct of_device_id *of_id = | ||
475 | of_match_device(of_imx_thermal_match, &pdev->dev); | ||
476 | struct imx_thermal_data *data; | 474 | struct imx_thermal_data *data; |
477 | struct regmap *map; | 475 | struct regmap *map; |
478 | int measure_freq; | 476 | int measure_freq; |
@@ -490,7 +488,7 @@ static int imx_thermal_probe(struct platform_device *pdev) | |||
490 | } | 488 | } |
491 | data->tempmon = map; | 489 | data->tempmon = map; |
492 | 490 | ||
493 | data->socdata = of_id->data; | 491 | data->socdata = of_device_get_match_data(&pdev->dev); |
494 | 492 | ||
495 | /* make sure the IRQ flag is clear before enabling irq on i.MX6SX */ | 493 | /* make sure the IRQ flag is clear before enabling irq on i.MX6SX */ |
496 | if (data->socdata->version == TEMPMON_IMX6SX) { | 494 | if (data->socdata->version == TEMPMON_IMX6SX) { |
diff --git a/drivers/thermal/int340x_thermal/int3406_thermal.c b/drivers/thermal/int340x_thermal/int3406_thermal.c index a578cd257db4..1891f34ab7fc 100644 --- a/drivers/thermal/int340x_thermal/int3406_thermal.c +++ b/drivers/thermal/int340x_thermal/int3406_thermal.c | |||
@@ -225,7 +225,6 @@ static struct platform_driver int3406_thermal_driver = { | |||
225 | .remove = int3406_thermal_remove, | 225 | .remove = int3406_thermal_remove, |
226 | .driver = { | 226 | .driver = { |
227 | .name = "int3406 thermal", | 227 | .name = "int3406 thermal", |
228 | .owner = THIS_MODULE, | ||
229 | .acpi_match_table = int3406_thermal_match, | 228 | .acpi_match_table = int3406_thermal_match, |
230 | }, | 229 | }, |
231 | }; | 230 | }; |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 9d6320e8ff3e..6e29d053843d 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -88,7 +88,7 @@ struct vhost_scsi_cmd { | |||
88 | struct scatterlist *tvc_prot_sgl; | 88 | struct scatterlist *tvc_prot_sgl; |
89 | struct page **tvc_upages; | 89 | struct page **tvc_upages; |
90 | /* Pointer to response header iovec */ | 90 | /* Pointer to response header iovec */ |
91 | struct iovec *tvc_resp_iov; | 91 | struct iovec tvc_resp_iov; |
92 | /* Pointer to vhost_scsi for our device */ | 92 | /* Pointer to vhost_scsi for our device */ |
93 | struct vhost_scsi *tvc_vhost; | 93 | struct vhost_scsi *tvc_vhost; |
94 | /* Pointer to vhost_virtqueue for the cmd */ | 94 | /* Pointer to vhost_virtqueue for the cmd */ |
@@ -547,7 +547,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) | |||
547 | memcpy(v_rsp.sense, cmd->tvc_sense_buf, | 547 | memcpy(v_rsp.sense, cmd->tvc_sense_buf, |
548 | se_cmd->scsi_sense_length); | 548 | se_cmd->scsi_sense_length); |
549 | 549 | ||
550 | iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov, | 550 | iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov, |
551 | cmd->tvc_in_iovs, sizeof(v_rsp)); | 551 | cmd->tvc_in_iovs, sizeof(v_rsp)); |
552 | ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter); | 552 | ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter); |
553 | if (likely(ret == sizeof(v_rsp))) { | 553 | if (likely(ret == sizeof(v_rsp))) { |
@@ -1044,7 +1044,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | |||
1044 | } | 1044 | } |
1045 | cmd->tvc_vhost = vs; | 1045 | cmd->tvc_vhost = vs; |
1046 | cmd->tvc_vq = vq; | 1046 | cmd->tvc_vq = vq; |
1047 | cmd->tvc_resp_iov = &vq->iov[out]; | 1047 | cmd->tvc_resp_iov = vq->iov[out]; |
1048 | cmd->tvc_in_iovs = in; | 1048 | cmd->tvc_in_iovs = in; |
1049 | 1049 | ||
1050 | pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", | 1050 | pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", |
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 7487971f9f78..c1010f018bd8 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c | |||
@@ -316,7 +316,7 @@ static int xenbus_write_transaction(unsigned msg_type, | |||
316 | rc = -ENOMEM; | 316 | rc = -ENOMEM; |
317 | goto out; | 317 | goto out; |
318 | } | 318 | } |
319 | } else { | 319 | } else if (msg_type == XS_TRANSACTION_END) { |
320 | list_for_each_entry(trans, &u->transactions, list) | 320 | list_for_each_entry(trans, &u->transactions, list) |
321 | if (trans->handle.id == u->u.msg.tx_id) | 321 | if (trans->handle.id == u->u.msg.tx_id) |
322 | break; | 322 | break; |
diff --git a/fs/block_dev.c b/fs/block_dev.c index c3cdde87cc8c..08ae99343d92 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -249,7 +249,8 @@ struct super_block *freeze_bdev(struct block_device *bdev) | |||
249 | * thaw_bdev drops it. | 249 | * thaw_bdev drops it. |
250 | */ | 250 | */ |
251 | sb = get_super(bdev); | 251 | sb = get_super(bdev); |
252 | drop_super(sb); | 252 | if (sb) |
253 | drop_super(sb); | ||
253 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | 254 | mutex_unlock(&bdev->bd_fsfreeze_mutex); |
254 | return sb; | 255 | return sb; |
255 | } | 256 | } |
@@ -646,7 +647,7 @@ static struct dentry *bd_mount(struct file_system_type *fs_type, | |||
646 | { | 647 | { |
647 | struct dentry *dent; | 648 | struct dentry *dent; |
648 | dent = mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, BDEVFS_MAGIC); | 649 | dent = mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, BDEVFS_MAGIC); |
649 | if (dent) | 650 | if (!IS_ERR(dent)) |
650 | dent->d_sb->s_iflags |= SB_I_CGROUPWB; | 651 | dent->d_sb->s_iflags |= SB_I_CGROUPWB; |
651 | return dent; | 652 | return dent; |
652 | } | 653 | } |
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index d64d2a515cb2..ccb401eebc11 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c | |||
@@ -1699,11 +1699,11 @@ static int f2fs_write_end(struct file *file, | |||
1699 | trace_f2fs_write_end(inode, pos, len, copied); | 1699 | trace_f2fs_write_end(inode, pos, len, copied); |
1700 | 1700 | ||
1701 | set_page_dirty(page); | 1701 | set_page_dirty(page); |
1702 | f2fs_put_page(page, 1); | ||
1703 | 1702 | ||
1704 | if (pos + copied > i_size_read(inode)) | 1703 | if (pos + copied > i_size_read(inode)) |
1705 | f2fs_i_size_write(inode, pos + copied); | 1704 | f2fs_i_size_write(inode, pos + copied); |
1706 | 1705 | ||
1706 | f2fs_put_page(page, 1); | ||
1707 | f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); | 1707 | f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); |
1708 | return copied; | 1708 | return copied; |
1709 | } | 1709 | } |
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 675fa79d86f6..14f5fe2b841e 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h | |||
@@ -538,7 +538,7 @@ struct f2fs_nm_info { | |||
538 | /* NAT cache management */ | 538 | /* NAT cache management */ |
539 | struct radix_tree_root nat_root;/* root of the nat entry cache */ | 539 | struct radix_tree_root nat_root;/* root of the nat entry cache */ |
540 | struct radix_tree_root nat_set_root;/* root of the nat set cache */ | 540 | struct radix_tree_root nat_set_root;/* root of the nat set cache */ |
541 | struct percpu_rw_semaphore nat_tree_lock; /* protect nat_tree_lock */ | 541 | struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */ |
542 | struct list_head nat_entries; /* cached nat entry list (clean) */ | 542 | struct list_head nat_entries; /* cached nat entry list (clean) */ |
543 | unsigned int nat_cnt; /* the # of cached nat entries */ | 543 | unsigned int nat_cnt; /* the # of cached nat entries */ |
544 | unsigned int dirty_nat_cnt; /* total num of nat entries in set */ | 544 | unsigned int dirty_nat_cnt; /* total num of nat entries in set */ |
@@ -787,7 +787,7 @@ struct f2fs_sb_info { | |||
787 | struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ | 787 | struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ |
788 | struct inode *meta_inode; /* cache meta blocks */ | 788 | struct inode *meta_inode; /* cache meta blocks */ |
789 | struct mutex cp_mutex; /* checkpoint procedure lock */ | 789 | struct mutex cp_mutex; /* checkpoint procedure lock */ |
790 | struct percpu_rw_semaphore cp_rwsem; /* blocking FS operations */ | 790 | struct rw_semaphore cp_rwsem; /* blocking FS operations */ |
791 | struct rw_semaphore node_write; /* locking node writes */ | 791 | struct rw_semaphore node_write; /* locking node writes */ |
792 | wait_queue_head_t cp_wait; | 792 | wait_queue_head_t cp_wait; |
793 | unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ | 793 | unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ |
@@ -1074,22 +1074,22 @@ static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) | |||
1074 | 1074 | ||
1075 | static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) | 1075 | static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) |
1076 | { | 1076 | { |
1077 | percpu_down_read(&sbi->cp_rwsem); | 1077 | down_read(&sbi->cp_rwsem); |
1078 | } | 1078 | } |
1079 | 1079 | ||
1080 | static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) | 1080 | static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) |
1081 | { | 1081 | { |
1082 | percpu_up_read(&sbi->cp_rwsem); | 1082 | up_read(&sbi->cp_rwsem); |
1083 | } | 1083 | } |
1084 | 1084 | ||
1085 | static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) | 1085 | static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) |
1086 | { | 1086 | { |
1087 | percpu_down_write(&sbi->cp_rwsem); | 1087 | down_write(&sbi->cp_rwsem); |
1088 | } | 1088 | } |
1089 | 1089 | ||
1090 | static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) | 1090 | static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) |
1091 | { | 1091 | { |
1092 | percpu_up_write(&sbi->cp_rwsem); | 1092 | up_write(&sbi->cp_rwsem); |
1093 | } | 1093 | } |
1094 | 1094 | ||
1095 | static inline int __get_cp_reason(struct f2fs_sb_info *sbi) | 1095 | static inline int __get_cp_reason(struct f2fs_sb_info *sbi) |
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 0e493f63ea41..47abb96098e4 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c | |||
@@ -2086,15 +2086,19 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, | |||
2086 | if (unlikely(f2fs_readonly(src->i_sb))) | 2086 | if (unlikely(f2fs_readonly(src->i_sb))) |
2087 | return -EROFS; | 2087 | return -EROFS; |
2088 | 2088 | ||
2089 | if (S_ISDIR(src->i_mode) || S_ISDIR(dst->i_mode)) | 2089 | if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode)) |
2090 | return -EISDIR; | 2090 | return -EINVAL; |
2091 | 2091 | ||
2092 | if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst)) | 2092 | if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst)) |
2093 | return -EOPNOTSUPP; | 2093 | return -EOPNOTSUPP; |
2094 | 2094 | ||
2095 | inode_lock(src); | 2095 | inode_lock(src); |
2096 | if (src != dst) | 2096 | if (src != dst) { |
2097 | inode_lock(dst); | 2097 | if (!inode_trylock(dst)) { |
2098 | ret = -EBUSY; | ||
2099 | goto out; | ||
2100 | } | ||
2101 | } | ||
2098 | 2102 | ||
2099 | ret = -EINVAL; | 2103 | ret = -EINVAL; |
2100 | if (pos_in + len > src->i_size || pos_in + len < pos_in) | 2104 | if (pos_in + len > src->i_size || pos_in + len < pos_in) |
@@ -2152,6 +2156,7 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, | |||
2152 | out_unlock: | 2156 | out_unlock: |
2153 | if (src != dst) | 2157 | if (src != dst) |
2154 | inode_unlock(dst); | 2158 | inode_unlock(dst); |
2159 | out: | ||
2155 | inode_unlock(src); | 2160 | inode_unlock(src); |
2156 | return ret; | 2161 | return ret; |
2157 | } | 2162 | } |
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index b2fa4b615925..f75d197d5beb 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c | |||
@@ -206,14 +206,14 @@ int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid) | |||
206 | struct nat_entry *e; | 206 | struct nat_entry *e; |
207 | bool need = false; | 207 | bool need = false; |
208 | 208 | ||
209 | percpu_down_read(&nm_i->nat_tree_lock); | 209 | down_read(&nm_i->nat_tree_lock); |
210 | e = __lookup_nat_cache(nm_i, nid); | 210 | e = __lookup_nat_cache(nm_i, nid); |
211 | if (e) { | 211 | if (e) { |
212 | if (!get_nat_flag(e, IS_CHECKPOINTED) && | 212 | if (!get_nat_flag(e, IS_CHECKPOINTED) && |
213 | !get_nat_flag(e, HAS_FSYNCED_INODE)) | 213 | !get_nat_flag(e, HAS_FSYNCED_INODE)) |
214 | need = true; | 214 | need = true; |
215 | } | 215 | } |
216 | percpu_up_read(&nm_i->nat_tree_lock); | 216 | up_read(&nm_i->nat_tree_lock); |
217 | return need; | 217 | return need; |
218 | } | 218 | } |
219 | 219 | ||
@@ -223,11 +223,11 @@ bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) | |||
223 | struct nat_entry *e; | 223 | struct nat_entry *e; |
224 | bool is_cp = true; | 224 | bool is_cp = true; |
225 | 225 | ||
226 | percpu_down_read(&nm_i->nat_tree_lock); | 226 | down_read(&nm_i->nat_tree_lock); |
227 | e = __lookup_nat_cache(nm_i, nid); | 227 | e = __lookup_nat_cache(nm_i, nid); |
228 | if (e && !get_nat_flag(e, IS_CHECKPOINTED)) | 228 | if (e && !get_nat_flag(e, IS_CHECKPOINTED)) |
229 | is_cp = false; | 229 | is_cp = false; |
230 | percpu_up_read(&nm_i->nat_tree_lock); | 230 | up_read(&nm_i->nat_tree_lock); |
231 | return is_cp; | 231 | return is_cp; |
232 | } | 232 | } |
233 | 233 | ||
@@ -237,13 +237,13 @@ bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) | |||
237 | struct nat_entry *e; | 237 | struct nat_entry *e; |
238 | bool need_update = true; | 238 | bool need_update = true; |
239 | 239 | ||
240 | percpu_down_read(&nm_i->nat_tree_lock); | 240 | down_read(&nm_i->nat_tree_lock); |
241 | e = __lookup_nat_cache(nm_i, ino); | 241 | e = __lookup_nat_cache(nm_i, ino); |
242 | if (e && get_nat_flag(e, HAS_LAST_FSYNC) && | 242 | if (e && get_nat_flag(e, HAS_LAST_FSYNC) && |
243 | (get_nat_flag(e, IS_CHECKPOINTED) || | 243 | (get_nat_flag(e, IS_CHECKPOINTED) || |
244 | get_nat_flag(e, HAS_FSYNCED_INODE))) | 244 | get_nat_flag(e, HAS_FSYNCED_INODE))) |
245 | need_update = false; | 245 | need_update = false; |
246 | percpu_up_read(&nm_i->nat_tree_lock); | 246 | up_read(&nm_i->nat_tree_lock); |
247 | return need_update; | 247 | return need_update; |
248 | } | 248 | } |
249 | 249 | ||
@@ -284,7 +284,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, | |||
284 | struct f2fs_nm_info *nm_i = NM_I(sbi); | 284 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
285 | struct nat_entry *e; | 285 | struct nat_entry *e; |
286 | 286 | ||
287 | percpu_down_write(&nm_i->nat_tree_lock); | 287 | down_write(&nm_i->nat_tree_lock); |
288 | e = __lookup_nat_cache(nm_i, ni->nid); | 288 | e = __lookup_nat_cache(nm_i, ni->nid); |
289 | if (!e) { | 289 | if (!e) { |
290 | e = grab_nat_entry(nm_i, ni->nid); | 290 | e = grab_nat_entry(nm_i, ni->nid); |
@@ -334,7 +334,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, | |||
334 | set_nat_flag(e, HAS_FSYNCED_INODE, true); | 334 | set_nat_flag(e, HAS_FSYNCED_INODE, true); |
335 | set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); | 335 | set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); |
336 | } | 336 | } |
337 | percpu_up_write(&nm_i->nat_tree_lock); | 337 | up_write(&nm_i->nat_tree_lock); |
338 | } | 338 | } |
339 | 339 | ||
340 | int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) | 340 | int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) |
@@ -342,7 +342,8 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) | |||
342 | struct f2fs_nm_info *nm_i = NM_I(sbi); | 342 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
343 | int nr = nr_shrink; | 343 | int nr = nr_shrink; |
344 | 344 | ||
345 | percpu_down_write(&nm_i->nat_tree_lock); | 345 | if (!down_write_trylock(&nm_i->nat_tree_lock)) |
346 | return 0; | ||
346 | 347 | ||
347 | while (nr_shrink && !list_empty(&nm_i->nat_entries)) { | 348 | while (nr_shrink && !list_empty(&nm_i->nat_entries)) { |
348 | struct nat_entry *ne; | 349 | struct nat_entry *ne; |
@@ -351,7 +352,7 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) | |||
351 | __del_from_nat_cache(nm_i, ne); | 352 | __del_from_nat_cache(nm_i, ne); |
352 | nr_shrink--; | 353 | nr_shrink--; |
353 | } | 354 | } |
354 | percpu_up_write(&nm_i->nat_tree_lock); | 355 | up_write(&nm_i->nat_tree_lock); |
355 | return nr - nr_shrink; | 356 | return nr - nr_shrink; |
356 | } | 357 | } |
357 | 358 | ||
@@ -373,13 +374,13 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) | |||
373 | ni->nid = nid; | 374 | ni->nid = nid; |
374 | 375 | ||
375 | /* Check nat cache */ | 376 | /* Check nat cache */ |
376 | percpu_down_read(&nm_i->nat_tree_lock); | 377 | down_read(&nm_i->nat_tree_lock); |
377 | e = __lookup_nat_cache(nm_i, nid); | 378 | e = __lookup_nat_cache(nm_i, nid); |
378 | if (e) { | 379 | if (e) { |
379 | ni->ino = nat_get_ino(e); | 380 | ni->ino = nat_get_ino(e); |
380 | ni->blk_addr = nat_get_blkaddr(e); | 381 | ni->blk_addr = nat_get_blkaddr(e); |
381 | ni->version = nat_get_version(e); | 382 | ni->version = nat_get_version(e); |
382 | percpu_up_read(&nm_i->nat_tree_lock); | 383 | up_read(&nm_i->nat_tree_lock); |
383 | return; | 384 | return; |
384 | } | 385 | } |
385 | 386 | ||
@@ -403,11 +404,11 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) | |||
403 | node_info_from_raw_nat(ni, &ne); | 404 | node_info_from_raw_nat(ni, &ne); |
404 | f2fs_put_page(page, 1); | 405 | f2fs_put_page(page, 1); |
405 | cache: | 406 | cache: |
406 | percpu_up_read(&nm_i->nat_tree_lock); | 407 | up_read(&nm_i->nat_tree_lock); |
407 | /* cache nat entry */ | 408 | /* cache nat entry */ |
408 | percpu_down_write(&nm_i->nat_tree_lock); | 409 | down_write(&nm_i->nat_tree_lock); |
409 | cache_nat_entry(sbi, nid, &ne); | 410 | cache_nat_entry(sbi, nid, &ne); |
410 | percpu_up_write(&nm_i->nat_tree_lock); | 411 | up_write(&nm_i->nat_tree_lock); |
411 | } | 412 | } |
412 | 413 | ||
413 | /* | 414 | /* |
@@ -1788,7 +1789,7 @@ void build_free_nids(struct f2fs_sb_info *sbi) | |||
1788 | ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, | 1789 | ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, |
1789 | META_NAT, true); | 1790 | META_NAT, true); |
1790 | 1791 | ||
1791 | percpu_down_read(&nm_i->nat_tree_lock); | 1792 | down_read(&nm_i->nat_tree_lock); |
1792 | 1793 | ||
1793 | while (1) { | 1794 | while (1) { |
1794 | struct page *page = get_current_nat_page(sbi, nid); | 1795 | struct page *page = get_current_nat_page(sbi, nid); |
@@ -1820,7 +1821,7 @@ void build_free_nids(struct f2fs_sb_info *sbi) | |||
1820 | remove_free_nid(nm_i, nid); | 1821 | remove_free_nid(nm_i, nid); |
1821 | } | 1822 | } |
1822 | up_read(&curseg->journal_rwsem); | 1823 | up_read(&curseg->journal_rwsem); |
1823 | percpu_up_read(&nm_i->nat_tree_lock); | 1824 | up_read(&nm_i->nat_tree_lock); |
1824 | 1825 | ||
1825 | ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), | 1826 | ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), |
1826 | nm_i->ra_nid_pages, META_NAT, false); | 1827 | nm_i->ra_nid_pages, META_NAT, false); |
@@ -2209,7 +2210,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi) | |||
2209 | if (!nm_i->dirty_nat_cnt) | 2210 | if (!nm_i->dirty_nat_cnt) |
2210 | return; | 2211 | return; |
2211 | 2212 | ||
2212 | percpu_down_write(&nm_i->nat_tree_lock); | 2213 | down_write(&nm_i->nat_tree_lock); |
2213 | 2214 | ||
2214 | /* | 2215 | /* |
2215 | * if there are no enough space in journal to store dirty nat | 2216 | * if there are no enough space in journal to store dirty nat |
@@ -2232,7 +2233,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi) | |||
2232 | list_for_each_entry_safe(set, tmp, &sets, set_list) | 2233 | list_for_each_entry_safe(set, tmp, &sets, set_list) |
2233 | __flush_nat_entry_set(sbi, set); | 2234 | __flush_nat_entry_set(sbi, set); |
2234 | 2235 | ||
2235 | percpu_up_write(&nm_i->nat_tree_lock); | 2236 | up_write(&nm_i->nat_tree_lock); |
2236 | 2237 | ||
2237 | f2fs_bug_on(sbi, nm_i->dirty_nat_cnt); | 2238 | f2fs_bug_on(sbi, nm_i->dirty_nat_cnt); |
2238 | } | 2239 | } |
@@ -2268,8 +2269,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi) | |||
2268 | 2269 | ||
2269 | mutex_init(&nm_i->build_lock); | 2270 | mutex_init(&nm_i->build_lock); |
2270 | spin_lock_init(&nm_i->free_nid_list_lock); | 2271 | spin_lock_init(&nm_i->free_nid_list_lock); |
2271 | if (percpu_init_rwsem(&nm_i->nat_tree_lock)) | 2272 | init_rwsem(&nm_i->nat_tree_lock); |
2272 | return -ENOMEM; | ||
2273 | 2273 | ||
2274 | nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); | 2274 | nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); |
2275 | nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); | 2275 | nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); |
@@ -2326,7 +2326,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi) | |||
2326 | spin_unlock(&nm_i->free_nid_list_lock); | 2326 | spin_unlock(&nm_i->free_nid_list_lock); |
2327 | 2327 | ||
2328 | /* destroy nat cache */ | 2328 | /* destroy nat cache */ |
2329 | percpu_down_write(&nm_i->nat_tree_lock); | 2329 | down_write(&nm_i->nat_tree_lock); |
2330 | while ((found = __gang_lookup_nat_cache(nm_i, | 2330 | while ((found = __gang_lookup_nat_cache(nm_i, |
2331 | nid, NATVEC_SIZE, natvec))) { | 2331 | nid, NATVEC_SIZE, natvec))) { |
2332 | unsigned idx; | 2332 | unsigned idx; |
@@ -2351,9 +2351,8 @@ void destroy_node_manager(struct f2fs_sb_info *sbi) | |||
2351 | kmem_cache_free(nat_entry_set_slab, setvec[idx]); | 2351 | kmem_cache_free(nat_entry_set_slab, setvec[idx]); |
2352 | } | 2352 | } |
2353 | } | 2353 | } |
2354 | percpu_up_write(&nm_i->nat_tree_lock); | 2354 | up_write(&nm_i->nat_tree_lock); |
2355 | 2355 | ||
2356 | percpu_free_rwsem(&nm_i->nat_tree_lock); | ||
2357 | kfree(nm_i->nat_bitmap); | 2356 | kfree(nm_i->nat_bitmap); |
2358 | sbi->nm_info = NULL; | 2357 | sbi->nm_info = NULL; |
2359 | kfree(nm_i); | 2358 | kfree(nm_i); |
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 1b86d3f638ef..7f863a645ab1 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c | |||
@@ -706,8 +706,6 @@ static void destroy_percpu_info(struct f2fs_sb_info *sbi) | |||
706 | percpu_counter_destroy(&sbi->nr_pages[i]); | 706 | percpu_counter_destroy(&sbi->nr_pages[i]); |
707 | percpu_counter_destroy(&sbi->alloc_valid_block_count); | 707 | percpu_counter_destroy(&sbi->alloc_valid_block_count); |
708 | percpu_counter_destroy(&sbi->total_valid_inode_count); | 708 | percpu_counter_destroy(&sbi->total_valid_inode_count); |
709 | |||
710 | percpu_free_rwsem(&sbi->cp_rwsem); | ||
711 | } | 709 | } |
712 | 710 | ||
713 | static void f2fs_put_super(struct super_block *sb) | 711 | static void f2fs_put_super(struct super_block *sb) |
@@ -1483,9 +1481,6 @@ static int init_percpu_info(struct f2fs_sb_info *sbi) | |||
1483 | { | 1481 | { |
1484 | int i, err; | 1482 | int i, err; |
1485 | 1483 | ||
1486 | if (percpu_init_rwsem(&sbi->cp_rwsem)) | ||
1487 | return -ENOMEM; | ||
1488 | |||
1489 | for (i = 0; i < NR_COUNT_TYPE; i++) { | 1484 | for (i = 0; i < NR_COUNT_TYPE; i++) { |
1490 | err = percpu_counter_init(&sbi->nr_pages[i], 0, GFP_KERNEL); | 1485 | err = percpu_counter_init(&sbi->nr_pages[i], 0, GFP_KERNEL); |
1491 | if (err) | 1486 | if (err) |
@@ -1686,6 +1681,7 @@ try_onemore: | |||
1686 | sbi->write_io[i].bio = NULL; | 1681 | sbi->write_io[i].bio = NULL; |
1687 | } | 1682 | } |
1688 | 1683 | ||
1684 | init_rwsem(&sbi->cp_rwsem); | ||
1689 | init_waitqueue_head(&sbi->cp_wait); | 1685 | init_waitqueue_head(&sbi->cp_wait); |
1690 | init_sb_info(sbi); | 1686 | init_sb_info(sbi); |
1691 | 1687 | ||
diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c index b45345d701e7..51157da3f76e 100644 --- a/fs/ubifs/tnc_commit.c +++ b/fs/ubifs/tnc_commit.c | |||
@@ -370,7 +370,7 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt) | |||
370 | 370 | ||
371 | p = c->gap_lebs; | 371 | p = c->gap_lebs; |
372 | do { | 372 | do { |
373 | ubifs_assert(p < c->gap_lebs + sizeof(int) * c->lst.idx_lebs); | 373 | ubifs_assert(p < c->gap_lebs + c->lst.idx_lebs); |
374 | written = layout_leb_in_gaps(c, p); | 374 | written = layout_leb_in_gaps(c, p); |
375 | if (written < 0) { | 375 | if (written < 0) { |
376 | err = written; | 376 | err = written; |
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index e237811f09ce..11a004114eba 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c | |||
@@ -575,7 +575,8 @@ static int ubifs_xattr_get(const struct xattr_handler *handler, | |||
575 | dbg_gen("xattr '%s', ino %lu ('%pd'), buf size %zd", name, | 575 | dbg_gen("xattr '%s', ino %lu ('%pd'), buf size %zd", name, |
576 | inode->i_ino, dentry, size); | 576 | inode->i_ino, dentry, size); |
577 | 577 | ||
578 | return __ubifs_getxattr(inode, name, buffer, size); | 578 | name = xattr_full_name(handler, name); |
579 | return __ubifs_getxattr(inode, name, buffer, size); | ||
579 | } | 580 | } |
580 | 581 | ||
581 | static int ubifs_xattr_set(const struct xattr_handler *handler, | 582 | static int ubifs_xattr_set(const struct xattr_handler *handler, |
@@ -586,6 +587,8 @@ static int ubifs_xattr_set(const struct xattr_handler *handler, | |||
586 | dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd", | 587 | dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd", |
587 | name, inode->i_ino, dentry, size); | 588 | name, inode->i_ino, dentry, size); |
588 | 589 | ||
590 | name = xattr_full_name(handler, name); | ||
591 | |||
589 | if (value) | 592 | if (value) |
590 | return __ubifs_setxattr(inode, name, value, size, flags); | 593 | return __ubifs_setxattr(inode, name, value, size, flags); |
591 | else | 594 | else |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 59ffaa68b11b..23ddf4b46a9b 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -71,7 +71,8 @@ static inline bool bio_has_data(struct bio *bio) | |||
71 | { | 71 | { |
72 | if (bio && | 72 | if (bio && |
73 | bio->bi_iter.bi_size && | 73 | bio->bi_iter.bi_size && |
74 | bio_op(bio) != REQ_OP_DISCARD) | 74 | bio_op(bio) != REQ_OP_DISCARD && |
75 | bio_op(bio) != REQ_OP_SECURE_ERASE) | ||
75 | return true; | 76 | return true; |
76 | 77 | ||
77 | return false; | 78 | return false; |
@@ -79,7 +80,9 @@ static inline bool bio_has_data(struct bio *bio) | |||
79 | 80 | ||
80 | static inline bool bio_no_advance_iter(struct bio *bio) | 81 | static inline bool bio_no_advance_iter(struct bio *bio) |
81 | { | 82 | { |
82 | return bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_WRITE_SAME; | 83 | return bio_op(bio) == REQ_OP_DISCARD || |
84 | bio_op(bio) == REQ_OP_SECURE_ERASE || | ||
85 | bio_op(bio) == REQ_OP_WRITE_SAME; | ||
83 | } | 86 | } |
84 | 87 | ||
85 | static inline bool bio_is_rw(struct bio *bio) | 88 | static inline bool bio_is_rw(struct bio *bio) |
@@ -199,6 +202,9 @@ static inline unsigned bio_segments(struct bio *bio) | |||
199 | if (bio_op(bio) == REQ_OP_DISCARD) | 202 | if (bio_op(bio) == REQ_OP_DISCARD) |
200 | return 1; | 203 | return 1; |
201 | 204 | ||
205 | if (bio_op(bio) == REQ_OP_SECURE_ERASE) | ||
206 | return 1; | ||
207 | |||
202 | if (bio_op(bio) == REQ_OP_WRITE_SAME) | 208 | if (bio_op(bio) == REQ_OP_WRITE_SAME) |
203 | return 1; | 209 | return 1; |
204 | 210 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 2c210b6a7bcf..e79055c8b577 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -882,7 +882,7 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | |||
882 | static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, | 882 | static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, |
883 | int op) | 883 | int op) |
884 | { | 884 | { |
885 | if (unlikely(op == REQ_OP_DISCARD)) | 885 | if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) |
886 | return min(q->limits.max_discard_sectors, UINT_MAX >> 9); | 886 | return min(q->limits.max_discard_sectors, UINT_MAX >> 9); |
887 | 887 | ||
888 | if (unlikely(op == REQ_OP_WRITE_SAME)) | 888 | if (unlikely(op == REQ_OP_WRITE_SAME)) |
@@ -913,7 +913,9 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq, | |||
913 | if (unlikely(rq->cmd_type != REQ_TYPE_FS)) | 913 | if (unlikely(rq->cmd_type != REQ_TYPE_FS)) |
914 | return q->limits.max_hw_sectors; | 914 | return q->limits.max_hw_sectors; |
915 | 915 | ||
916 | if (!q->limits.chunk_sectors || (req_op(rq) == REQ_OP_DISCARD)) | 916 | if (!q->limits.chunk_sectors || |
917 | req_op(rq) == REQ_OP_DISCARD || | ||
918 | req_op(rq) == REQ_OP_SECURE_ERASE) | ||
917 | return blk_queue_get_max_sectors(q, req_op(rq)); | 919 | return blk_queue_get_max_sectors(q, req_op(rq)); |
918 | 920 | ||
919 | return min(blk_max_size_offset(q, offset), | 921 | return min(blk_max_size_offset(q, offset), |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 2599a980340f..fbc1fa625c3e 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -1251,10 +1251,12 @@ resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno); | |||
1251 | int pci_set_vga_state(struct pci_dev *pdev, bool decode, | 1251 | int pci_set_vga_state(struct pci_dev *pdev, bool decode, |
1252 | unsigned int command_bits, u32 flags); | 1252 | unsigned int command_bits, u32 flags); |
1253 | 1253 | ||
1254 | #define PCI_IRQ_NOLEGACY (1 << 0) /* don't use legacy interrupts */ | 1254 | #define PCI_IRQ_LEGACY (1 << 0) /* allow legacy interrupts */ |
1255 | #define PCI_IRQ_NOMSI (1 << 1) /* don't use MSI interrupts */ | 1255 | #define PCI_IRQ_MSI (1 << 1) /* allow MSI interrupts */ |
1256 | #define PCI_IRQ_NOMSIX (1 << 2) /* don't use MSI-X interrupts */ | 1256 | #define PCI_IRQ_MSIX (1 << 2) /* allow MSI-X interrupts */ |
1257 | #define PCI_IRQ_NOAFFINITY (1 << 3) /* don't auto-assign affinity */ | 1257 | #define PCI_IRQ_AFFINITY (1 << 3) /* auto-assign affinity */ |
1258 | #define PCI_IRQ_ALL_TYPES \ | ||
1259 | (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX) | ||
1258 | 1260 | ||
1259 | /* kmem_cache style wrapper around pci_alloc_consistent() */ | 1261 | /* kmem_cache style wrapper around pci_alloc_consistent() */ |
1260 | 1262 | ||
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index 9a37c541822f..b5486e648607 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h | |||
@@ -9,8 +9,8 @@ | |||
9 | 9 | ||
10 | DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); | 10 | DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); |
11 | 11 | ||
12 | DECLARE_PER_CPU(int, xen_vcpu_id); | 12 | DECLARE_PER_CPU(uint32_t, xen_vcpu_id); |
13 | static inline int xen_vcpu_nr(int cpu) | 13 | static inline uint32_t xen_vcpu_nr(int cpu) |
14 | { | 14 | { |
15 | return per_cpu(xen_vcpu_id, cpu); | 15 | return per_cpu(xen_vcpu_id, cpu); |
16 | } | 16 | } |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 7598e6ca817a..dbafc5df03f3 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -223,7 +223,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
223 | what |= MASK_TC_BIT(op_flags, META); | 223 | what |= MASK_TC_BIT(op_flags, META); |
224 | what |= MASK_TC_BIT(op_flags, PREFLUSH); | 224 | what |= MASK_TC_BIT(op_flags, PREFLUSH); |
225 | what |= MASK_TC_BIT(op_flags, FUA); | 225 | what |= MASK_TC_BIT(op_flags, FUA); |
226 | if (op == REQ_OP_DISCARD) | 226 | if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE) |
227 | what |= BLK_TC_ACT(BLK_TC_DISCARD); | 227 | what |= BLK_TC_ACT(BLK_TC_DISCARD); |
228 | if (op == REQ_OP_FLUSH) | 228 | if (op == REQ_OP_FLUSH) |
229 | what |= BLK_TC_ACT(BLK_TC_FLUSH); | 229 | what |= BLK_TC_ACT(BLK_TC_FLUSH); |
diff --git a/mm/usercopy.c b/mm/usercopy.c index 8ebae91a6b55..a3cc3052f830 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c | |||
@@ -83,7 +83,7 @@ static bool overlaps(const void *ptr, unsigned long n, unsigned long low, | |||
83 | unsigned long check_high = check_low + n; | 83 | unsigned long check_high = check_low + n; |
84 | 84 | ||
85 | /* Does not overlap if entirely above or entirely below. */ | 85 | /* Does not overlap if entirely above or entirely below. */ |
86 | if (check_low >= high || check_high < low) | 86 | if (check_low >= high || check_high <= low) |
87 | return false; | 87 | return false; |
88 | 88 | ||
89 | return true; | 89 | return true; |
@@ -124,7 +124,7 @@ static inline const char *check_kernel_text_object(const void *ptr, | |||
124 | static inline const char *check_bogus_address(const void *ptr, unsigned long n) | 124 | static inline const char *check_bogus_address(const void *ptr, unsigned long n) |
125 | { | 125 | { |
126 | /* Reject if object wraps past end of memory. */ | 126 | /* Reject if object wraps past end of memory. */ |
127 | if (ptr + n < ptr) | 127 | if ((unsigned long)ptr + n < (unsigned long)ptr) |
128 | return "<wrapped address>"; | 128 | return "<wrapped address>"; |
129 | 129 | ||
130 | /* Reject if NULL or ZERO-allocation. */ | 130 | /* Reject if NULL or ZERO-allocation. */ |
diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c index 448ed96b3b4f..1c14c2595158 100644 --- a/tools/gpio/gpio-event-mon.c +++ b/tools/gpio/gpio-event-mon.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * gpio-hammer - example swiss army knife to shake GPIO lines on a system | 2 | * gpio-event-mon - monitor GPIO line events from userspace |
3 | * | 3 | * |
4 | * Copyright (C) 2016 Linus Walleij | 4 | * Copyright (C) 2016 Linus Walleij |
5 | * | 5 | * |