diff options
134 files changed, 1379 insertions, 1624 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 66ab548ee469..c2016557b294 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -10206,6 +10206,13 @@ S: Maintained | |||
10206 | F: Documentation/usb/ohci.txt | 10206 | F: Documentation/usb/ohci.txt |
10207 | F: drivers/usb/host/ohci* | 10207 | F: drivers/usb/host/ohci* |
10208 | 10208 | ||
10209 | USB OTG FSM (Finite State Machine) | ||
10210 | M: Peter Chen <Peter.Chen@freescale.com> | ||
10211 | T: git git://github.com/hzpeterchen/linux-usb.git | ||
10212 | L: linux-usb@vger.kernel.org | ||
10213 | S: Maintained | ||
10214 | F: drivers/usb/common/usb-otg-fsm.c | ||
10215 | |||
10209 | USB OVER IP DRIVER | 10216 | USB OVER IP DRIVER |
10210 | M: Valentina Manea <valentina.manea.m@gmail.com> | 10217 | M: Valentina Manea <valentina.manea.m@gmail.com> |
10211 | M: Shuah Khan <shuah.kh@samsung.com> | 10218 | M: Shuah Khan <shuah.kh@samsung.com> |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 0 | 2 | PATCHLEVEL = 0 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc4 | 4 | EXTRAVERSION = -rc5 |
5 | NAME = Hurr durr I'ma sheep | 5 | NAME = Hurr durr I'ma sheep |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index e55408e96559..1d60bebea4b8 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -246,12 +246,9 @@ static int __get_cpu_architecture(void) | |||
246 | if (cpu_arch) | 246 | if (cpu_arch) |
247 | cpu_arch += CPU_ARCH_ARMv3; | 247 | cpu_arch += CPU_ARCH_ARMv3; |
248 | } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { | 248 | } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { |
249 | unsigned int mmfr0; | ||
250 | |||
251 | /* Revised CPUID format. Read the Memory Model Feature | 249 | /* Revised CPUID format. Read the Memory Model Feature |
252 | * Register 0 and check for VMSAv7 or PMSAv7 */ | 250 | * Register 0 and check for VMSAv7 or PMSAv7 */ |
253 | asm("mrc p15, 0, %0, c0, c1, 4" | 251 | unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0); |
254 | : "=r" (mmfr0)); | ||
255 | if ((mmfr0 & 0x0000000f) >= 0x00000003 || | 252 | if ((mmfr0 & 0x0000000f) >= 0x00000003 || |
256 | (mmfr0 & 0x000000f0) >= 0x00000030) | 253 | (mmfr0 & 0x000000f0) >= 0x00000030) |
257 | cpu_arch = CPU_ARCH_ARMv7; | 254 | cpu_arch = CPU_ARCH_ARMv7; |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c6c7696b8db9..8f15f70622a6 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -1131,23 +1131,22 @@ static void __init l2c310_of_parse(const struct device_node *np, | |||
1131 | } | 1131 | } |
1132 | 1132 | ||
1133 | ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); | 1133 | ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); |
1134 | if (ret) | 1134 | if (!ret) { |
1135 | return; | 1135 | switch (assoc) { |
1136 | 1136 | case 16: | |
1137 | switch (assoc) { | 1137 | *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; |
1138 | case 16: | 1138 | *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; |
1139 | *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; | 1139 | *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; |
1140 | *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; | 1140 | break; |
1141 | *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; | 1141 | case 8: |
1142 | break; | 1142 | *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; |
1143 | case 8: | 1143 | *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; |
1144 | *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; | 1144 | break; |
1145 | *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; | 1145 | default: |
1146 | break; | 1146 | pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", |
1147 | default: | 1147 | assoc); |
1148 | pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", | 1148 | break; |
1149 | assoc); | 1149 | } |
1150 | break; | ||
1151 | } | 1150 | } |
1152 | 1151 | ||
1153 | prefetch = l2x0_saved_regs.prefetch_ctrl; | 1152 | prefetch = l2x0_saved_regs.prefetch_ctrl; |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 170a116d1b29..c27447653903 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -171,7 +171,7 @@ static int __dma_supported(struct device *dev, u64 mask, bool warn) | |||
171 | */ | 171 | */ |
172 | if (sizeof(mask) != sizeof(dma_addr_t) && | 172 | if (sizeof(mask) != sizeof(dma_addr_t) && |
173 | mask > (dma_addr_t)~0 && | 173 | mask > (dma_addr_t)~0 && |
174 | dma_to_pfn(dev, ~0) < max_pfn) { | 174 | dma_to_pfn(dev, ~0) < max_pfn - 1) { |
175 | if (warn) { | 175 | if (warn) { |
176 | dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", | 176 | dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", |
177 | mask); | 177 | mask); |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index a982dc3190df..6333d9c17875 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -552,6 +552,7 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
552 | 552 | ||
553 | pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n", | 553 | pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n", |
554 | inf->name, fsr, addr); | 554 | inf->name, fsr, addr); |
555 | show_pte(current->mm, addr); | ||
555 | 556 | ||
556 | info.si_signo = inf->sig; | 557 | info.si_signo = inf->sig; |
557 | info.si_errno = 0; | 558 | info.si_errno = 0; |
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c index 004e35cdcfff..cf30daff8932 100644 --- a/arch/arm/mm/pageattr.c +++ b/arch/arm/mm/pageattr.c | |||
@@ -49,7 +49,10 @@ static int change_memory_common(unsigned long addr, int numpages, | |||
49 | WARN_ON_ONCE(1); | 49 | WARN_ON_ONCE(1); |
50 | } | 50 | } |
51 | 51 | ||
52 | if (!is_module_address(start) || !is_module_address(end - 1)) | 52 | if (start < MODULES_VADDR || start >= MODULES_END) |
53 | return -EINVAL; | ||
54 | |||
55 | if (end < MODULES_VADDR || start >= MODULES_END) | ||
53 | return -EINVAL; | 56 | return -EINVAL; |
54 | 57 | ||
55 | data.set_mask = set_mask; | 58 | data.set_mask = set_mask; |
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 9a8fd84f8fb2..941c375616e2 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h | |||
@@ -39,7 +39,11 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); | |||
39 | 39 | ||
40 | #include <asm/memory.h> | 40 | #include <asm/memory.h> |
41 | 41 | ||
42 | #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) | 42 | #define cpu_switch_mm(pgd,mm) \ |
43 | do { \ | ||
44 | BUG_ON(pgd == swapper_pg_dir); \ | ||
45 | cpu_do_switch_mm(virt_to_phys(pgd),mm); \ | ||
46 | } while (0) | ||
43 | 47 | ||
44 | #define cpu_get_pgd() \ | 48 | #define cpu_get_pgd() \ |
45 | ({ \ | 49 | ({ \ |
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 2b8d70164428..ab21e0d58278 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c | |||
@@ -337,7 +337,11 @@ core_initcall(arm64_dmi_init); | |||
337 | 337 | ||
338 | static void efi_set_pgd(struct mm_struct *mm) | 338 | static void efi_set_pgd(struct mm_struct *mm) |
339 | { | 339 | { |
340 | cpu_switch_mm(mm->pgd, mm); | 340 | if (mm == &init_mm) |
341 | cpu_set_reserved_ttbr0(); | ||
342 | else | ||
343 | cpu_switch_mm(mm->pgd, mm); | ||
344 | |||
341 | flush_tlb_all(); | 345 | flush_tlb_all(); |
342 | if (icache_is_aivivt()) | 346 | if (icache_is_aivivt()) |
343 | __flush_icache_all(); | 347 | __flush_icache_all(); |
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 58e0c2bdde04..ef7d112f5ce0 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
@@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p) | |||
51 | } | 51 | } |
52 | early_param("coherent_pool", early_coherent_pool); | 52 | early_param("coherent_pool", early_coherent_pool); |
53 | 53 | ||
54 | static void *__alloc_from_pool(size_t size, struct page **ret_page) | 54 | static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) |
55 | { | 55 | { |
56 | unsigned long val; | 56 | unsigned long val; |
57 | void *ptr = NULL; | 57 | void *ptr = NULL; |
@@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) | |||
67 | 67 | ||
68 | *ret_page = phys_to_page(phys); | 68 | *ret_page = phys_to_page(phys); |
69 | ptr = (void *)val; | 69 | ptr = (void *)val; |
70 | if (flags & __GFP_ZERO) | ||
71 | memset(ptr, 0, size); | ||
70 | } | 72 | } |
71 | 73 | ||
72 | return ptr; | 74 | return ptr; |
@@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, | |||
101 | flags |= GFP_DMA; | 103 | flags |= GFP_DMA; |
102 | if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { | 104 | if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { |
103 | struct page *page; | 105 | struct page *page; |
106 | void *addr; | ||
104 | 107 | ||
105 | size = PAGE_ALIGN(size); | 108 | size = PAGE_ALIGN(size); |
106 | page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, | 109 | page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, |
@@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, | |||
109 | return NULL; | 112 | return NULL; |
110 | 113 | ||
111 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); | 114 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); |
112 | return page_address(page); | 115 | addr = page_address(page); |
116 | if (flags & __GFP_ZERO) | ||
117 | memset(addr, 0, size); | ||
118 | return addr; | ||
113 | } else { | 119 | } else { |
114 | return swiotlb_alloc_coherent(dev, size, dma_handle, flags); | 120 | return swiotlb_alloc_coherent(dev, size, dma_handle, flags); |
115 | } | 121 | } |
@@ -146,7 +152,7 @@ static void *__dma_alloc(struct device *dev, size_t size, | |||
146 | 152 | ||
147 | if (!coherent && !(flags & __GFP_WAIT)) { | 153 | if (!coherent && !(flags & __GFP_WAIT)) { |
148 | struct page *page = NULL; | 154 | struct page *page = NULL; |
149 | void *addr = __alloc_from_pool(size, &page); | 155 | void *addr = __alloc_from_pool(size, &page, flags); |
150 | 156 | ||
151 | if (addr) | 157 | if (addr) |
152 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); | 158 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); |
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h index 4f6725ff4c33..f5b6537306f0 100644 --- a/arch/sparc/include/asm/hypervisor.h +++ b/arch/sparc/include/asm/hypervisor.h | |||
@@ -2957,6 +2957,17 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num, | |||
2957 | unsigned long reg_val); | 2957 | unsigned long reg_val); |
2958 | #endif | 2958 | #endif |
2959 | 2959 | ||
2960 | |||
2961 | #define HV_FAST_M7_GET_PERFREG 0x43 | ||
2962 | #define HV_FAST_M7_SET_PERFREG 0x44 | ||
2963 | |||
2964 | #ifndef __ASSEMBLY__ | ||
2965 | unsigned long sun4v_m7_get_perfreg(unsigned long reg_num, | ||
2966 | unsigned long *reg_val); | ||
2967 | unsigned long sun4v_m7_set_perfreg(unsigned long reg_num, | ||
2968 | unsigned long reg_val); | ||
2969 | #endif | ||
2970 | |||
2960 | /* Function numbers for HV_CORE_TRAP. */ | 2971 | /* Function numbers for HV_CORE_TRAP. */ |
2961 | #define HV_CORE_SET_VER 0x00 | 2972 | #define HV_CORE_SET_VER 0x00 |
2962 | #define HV_CORE_PUTCHAR 0x01 | 2973 | #define HV_CORE_PUTCHAR 0x01 |
@@ -2981,6 +2992,7 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num, | |||
2981 | #define HV_GRP_SDIO 0x0108 | 2992 | #define HV_GRP_SDIO 0x0108 |
2982 | #define HV_GRP_SDIO_ERR 0x0109 | 2993 | #define HV_GRP_SDIO_ERR 0x0109 |
2983 | #define HV_GRP_REBOOT_DATA 0x0110 | 2994 | #define HV_GRP_REBOOT_DATA 0x0110 |
2995 | #define HV_GRP_M7_PERF 0x0114 | ||
2984 | #define HV_GRP_NIAG_PERF 0x0200 | 2996 | #define HV_GRP_NIAG_PERF 0x0200 |
2985 | #define HV_GRP_FIRE_PERF 0x0201 | 2997 | #define HV_GRP_FIRE_PERF 0x0201 |
2986 | #define HV_GRP_N2_CPU 0x0202 | 2998 | #define HV_GRP_N2_CPU 0x0202 |
diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c index 5c55145bfbf0..662500fa555f 100644 --- a/arch/sparc/kernel/hvapi.c +++ b/arch/sparc/kernel/hvapi.c | |||
@@ -48,6 +48,7 @@ static struct api_info api_table[] = { | |||
48 | { .group = HV_GRP_VT_CPU, }, | 48 | { .group = HV_GRP_VT_CPU, }, |
49 | { .group = HV_GRP_T5_CPU, }, | 49 | { .group = HV_GRP_T5_CPU, }, |
50 | { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API }, | 50 | { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API }, |
51 | { .group = HV_GRP_M7_PERF, }, | ||
51 | }; | 52 | }; |
52 | 53 | ||
53 | static DEFINE_SPINLOCK(hvapi_lock); | 54 | static DEFINE_SPINLOCK(hvapi_lock); |
diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S index caedf8320416..afbaba52d2f1 100644 --- a/arch/sparc/kernel/hvcalls.S +++ b/arch/sparc/kernel/hvcalls.S | |||
@@ -837,3 +837,19 @@ ENTRY(sun4v_t5_set_perfreg) | |||
837 | retl | 837 | retl |
838 | nop | 838 | nop |
839 | ENDPROC(sun4v_t5_set_perfreg) | 839 | ENDPROC(sun4v_t5_set_perfreg) |
840 | |||
841 | ENTRY(sun4v_m7_get_perfreg) | ||
842 | mov %o1, %o4 | ||
843 | mov HV_FAST_M7_GET_PERFREG, %o5 | ||
844 | ta HV_FAST_TRAP | ||
845 | stx %o1, [%o4] | ||
846 | retl | ||
847 | nop | ||
848 | ENDPROC(sun4v_m7_get_perfreg) | ||
849 | |||
850 | ENTRY(sun4v_m7_set_perfreg) | ||
851 | mov HV_FAST_M7_SET_PERFREG, %o5 | ||
852 | ta HV_FAST_TRAP | ||
853 | retl | ||
854 | nop | ||
855 | ENDPROC(sun4v_m7_set_perfreg) | ||
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index 7e967c8018c8..eb978c77c76a 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c | |||
@@ -217,6 +217,31 @@ static const struct pcr_ops n5_pcr_ops = { | |||
217 | .pcr_nmi_disable = PCR_N4_PICNPT, | 217 | .pcr_nmi_disable = PCR_N4_PICNPT, |
218 | }; | 218 | }; |
219 | 219 | ||
220 | static u64 m7_pcr_read(unsigned long reg_num) | ||
221 | { | ||
222 | unsigned long val; | ||
223 | |||
224 | (void) sun4v_m7_get_perfreg(reg_num, &val); | ||
225 | |||
226 | return val; | ||
227 | } | ||
228 | |||
229 | static void m7_pcr_write(unsigned long reg_num, u64 val) | ||
230 | { | ||
231 | (void) sun4v_m7_set_perfreg(reg_num, val); | ||
232 | } | ||
233 | |||
234 | static const struct pcr_ops m7_pcr_ops = { | ||
235 | .read_pcr = m7_pcr_read, | ||
236 | .write_pcr = m7_pcr_write, | ||
237 | .read_pic = n4_pic_read, | ||
238 | .write_pic = n4_pic_write, | ||
239 | .nmi_picl_value = n4_picl_value, | ||
240 | .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE | | ||
241 | PCR_N4_UTRACE | PCR_N4_TOE | | ||
242 | (26 << PCR_N4_SL_SHIFT)), | ||
243 | .pcr_nmi_disable = PCR_N4_PICNPT, | ||
244 | }; | ||
220 | 245 | ||
221 | static unsigned long perf_hsvc_group; | 246 | static unsigned long perf_hsvc_group; |
222 | static unsigned long perf_hsvc_major; | 247 | static unsigned long perf_hsvc_major; |
@@ -248,6 +273,10 @@ static int __init register_perf_hsvc(void) | |||
248 | perf_hsvc_group = HV_GRP_T5_CPU; | 273 | perf_hsvc_group = HV_GRP_T5_CPU; |
249 | break; | 274 | break; |
250 | 275 | ||
276 | case SUN4V_CHIP_SPARC_M7: | ||
277 | perf_hsvc_group = HV_GRP_M7_PERF; | ||
278 | break; | ||
279 | |||
251 | default: | 280 | default: |
252 | return -ENODEV; | 281 | return -ENODEV; |
253 | } | 282 | } |
@@ -293,6 +322,10 @@ static int __init setup_sun4v_pcr_ops(void) | |||
293 | pcr_ops = &n5_pcr_ops; | 322 | pcr_ops = &n5_pcr_ops; |
294 | break; | 323 | break; |
295 | 324 | ||
325 | case SUN4V_CHIP_SPARC_M7: | ||
326 | pcr_ops = &m7_pcr_ops; | ||
327 | break; | ||
328 | |||
296 | default: | 329 | default: |
297 | ret = -ENODEV; | 330 | ret = -ENODEV; |
298 | break; | 331 | break; |
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 46a5e4508752..86eebfa3b158 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -792,6 +792,42 @@ static const struct sparc_pmu niagara4_pmu = { | |||
792 | .num_pic_regs = 4, | 792 | .num_pic_regs = 4, |
793 | }; | 793 | }; |
794 | 794 | ||
795 | static void sparc_m7_write_pmc(int idx, u64 val) | ||
796 | { | ||
797 | u64 pcr; | ||
798 | |||
799 | pcr = pcr_ops->read_pcr(idx); | ||
800 | /* ensure ov and ntc are reset */ | ||
801 | pcr &= ~(PCR_N4_OV | PCR_N4_NTC); | ||
802 | |||
803 | pcr_ops->write_pic(idx, val & 0xffffffff); | ||
804 | |||
805 | pcr_ops->write_pcr(idx, pcr); | ||
806 | } | ||
807 | |||
808 | static const struct sparc_pmu sparc_m7_pmu = { | ||
809 | .event_map = niagara4_event_map, | ||
810 | .cache_map = &niagara4_cache_map, | ||
811 | .max_events = ARRAY_SIZE(niagara4_perfmon_event_map), | ||
812 | .read_pmc = sparc_vt_read_pmc, | ||
813 | .write_pmc = sparc_m7_write_pmc, | ||
814 | .upper_shift = 5, | ||
815 | .lower_shift = 5, | ||
816 | .event_mask = 0x7ff, | ||
817 | .user_bit = PCR_N4_UTRACE, | ||
818 | .priv_bit = PCR_N4_STRACE, | ||
819 | |||
820 | /* We explicitly don't support hypervisor tracing. */ | ||
821 | .hv_bit = 0, | ||
822 | |||
823 | .irq_bit = PCR_N4_TOE, | ||
824 | .upper_nop = 0, | ||
825 | .lower_nop = 0, | ||
826 | .flags = 0, | ||
827 | .max_hw_events = 4, | ||
828 | .num_pcrs = 4, | ||
829 | .num_pic_regs = 4, | ||
830 | }; | ||
795 | static const struct sparc_pmu *sparc_pmu __read_mostly; | 831 | static const struct sparc_pmu *sparc_pmu __read_mostly; |
796 | 832 | ||
797 | static u64 event_encoding(u64 event_id, int idx) | 833 | static u64 event_encoding(u64 event_id, int idx) |
@@ -960,6 +996,8 @@ out: | |||
960 | cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; | 996 | cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; |
961 | } | 997 | } |
962 | 998 | ||
999 | static void sparc_pmu_start(struct perf_event *event, int flags); | ||
1000 | |||
963 | /* On this PMU each PIC has it's own PCR control register. */ | 1001 | /* On this PMU each PIC has it's own PCR control register. */ |
964 | static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) | 1002 | static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) |
965 | { | 1003 | { |
@@ -972,20 +1010,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) | |||
972 | struct perf_event *cp = cpuc->event[i]; | 1010 | struct perf_event *cp = cpuc->event[i]; |
973 | struct hw_perf_event *hwc = &cp->hw; | 1011 | struct hw_perf_event *hwc = &cp->hw; |
974 | int idx = hwc->idx; | 1012 | int idx = hwc->idx; |
975 | u64 enc; | ||
976 | 1013 | ||
977 | if (cpuc->current_idx[i] != PIC_NO_INDEX) | 1014 | if (cpuc->current_idx[i] != PIC_NO_INDEX) |
978 | continue; | 1015 | continue; |
979 | 1016 | ||
980 | sparc_perf_event_set_period(cp, hwc, idx); | ||
981 | cpuc->current_idx[i] = idx; | 1017 | cpuc->current_idx[i] = idx; |
982 | 1018 | ||
983 | enc = perf_event_get_enc(cpuc->events[i]); | 1019 | sparc_pmu_start(cp, PERF_EF_RELOAD); |
984 | cpuc->pcr[idx] &= ~mask_for_index(idx); | ||
985 | if (hwc->state & PERF_HES_STOPPED) | ||
986 | cpuc->pcr[idx] |= nop_for_index(idx); | ||
987 | else | ||
988 | cpuc->pcr[idx] |= event_encoding(enc, idx); | ||
989 | } | 1020 | } |
990 | out: | 1021 | out: |
991 | for (i = 0; i < cpuc->n_events; i++) { | 1022 | for (i = 0; i < cpuc->n_events; i++) { |
@@ -1101,7 +1132,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags) | |||
1101 | int i; | 1132 | int i; |
1102 | 1133 | ||
1103 | local_irq_save(flags); | 1134 | local_irq_save(flags); |
1104 | perf_pmu_disable(event->pmu); | ||
1105 | 1135 | ||
1106 | for (i = 0; i < cpuc->n_events; i++) { | 1136 | for (i = 0; i < cpuc->n_events; i++) { |
1107 | if (event == cpuc->event[i]) { | 1137 | if (event == cpuc->event[i]) { |
@@ -1127,7 +1157,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags) | |||
1127 | } | 1157 | } |
1128 | } | 1158 | } |
1129 | 1159 | ||
1130 | perf_pmu_enable(event->pmu); | ||
1131 | local_irq_restore(flags); | 1160 | local_irq_restore(flags); |
1132 | } | 1161 | } |
1133 | 1162 | ||
@@ -1361,7 +1390,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags) | |||
1361 | unsigned long flags; | 1390 | unsigned long flags; |
1362 | 1391 | ||
1363 | local_irq_save(flags); | 1392 | local_irq_save(flags); |
1364 | perf_pmu_disable(event->pmu); | ||
1365 | 1393 | ||
1366 | n0 = cpuc->n_events; | 1394 | n0 = cpuc->n_events; |
1367 | if (n0 >= sparc_pmu->max_hw_events) | 1395 | if (n0 >= sparc_pmu->max_hw_events) |
@@ -1394,7 +1422,6 @@ nocheck: | |||
1394 | 1422 | ||
1395 | ret = 0; | 1423 | ret = 0; |
1396 | out: | 1424 | out: |
1397 | perf_pmu_enable(event->pmu); | ||
1398 | local_irq_restore(flags); | 1425 | local_irq_restore(flags); |
1399 | return ret; | 1426 | return ret; |
1400 | } | 1427 | } |
@@ -1667,6 +1694,10 @@ static bool __init supported_pmu(void) | |||
1667 | sparc_pmu = &niagara4_pmu; | 1694 | sparc_pmu = &niagara4_pmu; |
1668 | return true; | 1695 | return true; |
1669 | } | 1696 | } |
1697 | if (!strcmp(sparc_pmu_type, "sparc-m7")) { | ||
1698 | sparc_pmu = &sparc_m7_pmu; | ||
1699 | return true; | ||
1700 | } | ||
1670 | return false; | 1701 | return false; |
1671 | } | 1702 | } |
1672 | 1703 | ||
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 0be7bf978cb1..46a59643bb1c 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c | |||
@@ -287,6 +287,8 @@ void arch_trigger_all_cpu_backtrace(bool include_self) | |||
287 | printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", | 287 | printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", |
288 | gp->tpc, gp->o7, gp->i7, gp->rpc); | 288 | gp->tpc, gp->o7, gp->i7, gp->rpc); |
289 | } | 289 | } |
290 | |||
291 | touch_nmi_watchdog(); | ||
290 | } | 292 | } |
291 | 293 | ||
292 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); | 294 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); |
@@ -362,6 +364,8 @@ static void pmu_snapshot_all_cpus(void) | |||
362 | (cpu == this_cpu ? '*' : ' '), cpu, | 364 | (cpu == this_cpu ? '*' : ' '), cpu, |
363 | pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], | 365 | pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], |
364 | pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); | 366 | pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); |
367 | |||
368 | touch_nmi_watchdog(); | ||
365 | } | 369 | } |
366 | 370 | ||
367 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); | 371 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); |
diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S index b7f6334e159f..857ad4f8905f 100644 --- a/arch/sparc/lib/memmove.S +++ b/arch/sparc/lib/memmove.S | |||
@@ -8,9 +8,11 @@ | |||
8 | 8 | ||
9 | .text | 9 | .text |
10 | ENTRY(memmove) /* o0=dst o1=src o2=len */ | 10 | ENTRY(memmove) /* o0=dst o1=src o2=len */ |
11 | mov %o0, %g1 | 11 | brz,pn %o2, 99f |
12 | mov %o0, %g1 | ||
13 | |||
12 | cmp %o0, %o1 | 14 | cmp %o0, %o1 |
13 | bleu,pt %xcc, memcpy | 15 | bleu,pt %xcc, 2f |
14 | add %o1, %o2, %g7 | 16 | add %o1, %o2, %g7 |
15 | cmp %g7, %o0 | 17 | cmp %g7, %o0 |
16 | bleu,pt %xcc, memcpy | 18 | bleu,pt %xcc, memcpy |
@@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */ | |||
24 | stb %g7, [%o0] | 26 | stb %g7, [%o0] |
25 | bne,pt %icc, 1b | 27 | bne,pt %icc, 1b |
26 | sub %o0, 1, %o0 | 28 | sub %o0, 1, %o0 |
27 | 29 | 99: | |
28 | retl | 30 | retl |
29 | mov %g1, %o0 | 31 | mov %g1, %o0 |
32 | |||
33 | /* We can't just call memcpy for these memmove cases. On some | ||
34 | * chips the memcpy uses cache initializing stores and when dst | ||
35 | * and src are close enough, those can clobber the source data | ||
36 | * before we've loaded it in. | ||
37 | */ | ||
38 | 2: or %o0, %o1, %g7 | ||
39 | or %o2, %g7, %g7 | ||
40 | andcc %g7, 0x7, %g0 | ||
41 | bne,pn %xcc, 4f | ||
42 | nop | ||
43 | |||
44 | 3: ldx [%o1], %g7 | ||
45 | add %o1, 8, %o1 | ||
46 | subcc %o2, 8, %o2 | ||
47 | add %o0, 8, %o0 | ||
48 | bne,pt %icc, 3b | ||
49 | stx %g7, [%o0 - 0x8] | ||
50 | ba,a,pt %xcc, 99b | ||
51 | |||
52 | 4: ldub [%o1], %g7 | ||
53 | add %o1, 1, %o1 | ||
54 | subcc %o2, 1, %o2 | ||
55 | add %o0, 1, %o0 | ||
56 | bne,pt %icc, 4b | ||
57 | stb %g7, [%o0 - 0x1] | ||
58 | ba,a,pt %xcc, 99b | ||
30 | ENDPROC(memmove) | 59 | ENDPROC(memmove) |
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index fa1195dae425..164e3f8d3c3d 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h | |||
@@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock; | |||
93 | extern int (*pcibios_enable_irq)(struct pci_dev *dev); | 93 | extern int (*pcibios_enable_irq)(struct pci_dev *dev); |
94 | extern void (*pcibios_disable_irq)(struct pci_dev *dev); | 94 | extern void (*pcibios_disable_irq)(struct pci_dev *dev); |
95 | 95 | ||
96 | extern bool mp_should_keep_irq(struct device *dev); | ||
97 | |||
96 | struct pci_raw_ops { | 98 | struct pci_raw_ops { |
97 | int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, | 99 | int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, |
98 | int reg, int len, u32 *val); | 100 | int reg, int len, u32 *val); |
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 3d2612b68694..2fb384724ebb 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c | |||
@@ -513,31 +513,6 @@ void __init pcibios_set_cache_line_size(void) | |||
513 | } | 513 | } |
514 | } | 514 | } |
515 | 515 | ||
516 | /* | ||
517 | * Some device drivers assume dev->irq won't change after calling | ||
518 | * pci_disable_device(). So delay releasing of IRQ resource to driver | ||
519 | * unbinding time. Otherwise it will break PM subsystem and drivers | ||
520 | * like xen-pciback etc. | ||
521 | */ | ||
522 | static int pci_irq_notifier(struct notifier_block *nb, unsigned long action, | ||
523 | void *data) | ||
524 | { | ||
525 | struct pci_dev *dev = to_pci_dev(data); | ||
526 | |||
527 | if (action != BUS_NOTIFY_UNBOUND_DRIVER) | ||
528 | return NOTIFY_DONE; | ||
529 | |||
530 | if (pcibios_disable_irq) | ||
531 | pcibios_disable_irq(dev); | ||
532 | |||
533 | return NOTIFY_OK; | ||
534 | } | ||
535 | |||
536 | static struct notifier_block pci_irq_nb = { | ||
537 | .notifier_call = pci_irq_notifier, | ||
538 | .priority = INT_MIN, | ||
539 | }; | ||
540 | |||
541 | int __init pcibios_init(void) | 516 | int __init pcibios_init(void) |
542 | { | 517 | { |
543 | if (!raw_pci_ops) { | 518 | if (!raw_pci_ops) { |
@@ -550,9 +525,6 @@ int __init pcibios_init(void) | |||
550 | 525 | ||
551 | if (pci_bf_sort >= pci_force_bf) | 526 | if (pci_bf_sort >= pci_force_bf) |
552 | pci_sort_breadthfirst(); | 527 | pci_sort_breadthfirst(); |
553 | |||
554 | bus_register_notifier(&pci_bus_type, &pci_irq_nb); | ||
555 | |||
556 | return 0; | 528 | return 0; |
557 | } | 529 | } |
558 | 530 | ||
@@ -711,6 +683,12 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) | |||
711 | return 0; | 683 | return 0; |
712 | } | 684 | } |
713 | 685 | ||
686 | void pcibios_disable_device (struct pci_dev *dev) | ||
687 | { | ||
688 | if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq) | ||
689 | pcibios_disable_irq(dev); | ||
690 | } | ||
691 | |||
714 | int pci_ext_cfg_avail(void) | 692 | int pci_ext_cfg_avail(void) |
715 | { | 693 | { |
716 | if (raw_pci_ext_ops) | 694 | if (raw_pci_ext_ops) |
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c index efb849323c74..852aa4c92da0 100644 --- a/arch/x86/pci/intel_mid_pci.c +++ b/arch/x86/pci/intel_mid_pci.c | |||
@@ -234,10 +234,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) | |||
234 | 234 | ||
235 | static void intel_mid_pci_irq_disable(struct pci_dev *dev) | 235 | static void intel_mid_pci_irq_disable(struct pci_dev *dev) |
236 | { | 236 | { |
237 | if (dev->irq_managed && dev->irq > 0) { | 237 | if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed && |
238 | dev->irq > 0) { | ||
238 | mp_unmap_irq(dev->irq); | 239 | mp_unmap_irq(dev->irq); |
239 | dev->irq_managed = 0; | 240 | dev->irq_managed = 0; |
240 | dev->irq = 0; | ||
241 | } | 241 | } |
242 | } | 242 | } |
243 | 243 | ||
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index e71b3dbd87b8..5dc6ca5e1741 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
@@ -1256,9 +1256,22 @@ static int pirq_enable_irq(struct pci_dev *dev) | |||
1256 | return 0; | 1256 | return 0; |
1257 | } | 1257 | } |
1258 | 1258 | ||
1259 | bool mp_should_keep_irq(struct device *dev) | ||
1260 | { | ||
1261 | if (dev->power.is_prepared) | ||
1262 | return true; | ||
1263 | #ifdef CONFIG_PM | ||
1264 | if (dev->power.runtime_status == RPM_SUSPENDING) | ||
1265 | return true; | ||
1266 | #endif | ||
1267 | |||
1268 | return false; | ||
1269 | } | ||
1270 | |||
1259 | static void pirq_disable_irq(struct pci_dev *dev) | 1271 | static void pirq_disable_irq(struct pci_dev *dev) |
1260 | { | 1272 | { |
1261 | if (io_apic_assign_pci_irqs && dev->irq_managed && dev->irq) { | 1273 | if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) && |
1274 | dev->irq_managed && dev->irq) { | ||
1262 | mp_unmap_irq(dev->irq); | 1275 | mp_unmap_irq(dev->irq); |
1263 | dev->irq = 0; | 1276 | dev->irq = 0; |
1264 | dev->irq_managed = 0; | 1277 | dev->irq_managed = 0; |
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index e7f718d6918a..b1def411c0b8 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c | |||
@@ -485,6 +485,14 @@ void acpi_pci_irq_disable(struct pci_dev *dev) | |||
485 | if (!pin || !dev->irq_managed || dev->irq <= 0) | 485 | if (!pin || !dev->irq_managed || dev->irq <= 0) |
486 | return; | 486 | return; |
487 | 487 | ||
488 | /* Keep IOAPIC pin configuration when suspending */ | ||
489 | if (dev->dev.power.is_prepared) | ||
490 | return; | ||
491 | #ifdef CONFIG_PM | ||
492 | if (dev->dev.power.runtime_status == RPM_SUSPENDING) | ||
493 | return; | ||
494 | #endif | ||
495 | |||
488 | entry = acpi_pci_irq_lookup(dev, pin); | 496 | entry = acpi_pci_irq_lookup(dev, pin); |
489 | if (!entry) | 497 | if (!entry) |
490 | return; | 498 | return; |
@@ -505,6 +513,5 @@ void acpi_pci_irq_disable(struct pci_dev *dev) | |||
505 | if (gsi >= 0) { | 513 | if (gsi >= 0) { |
506 | acpi_unregister_gsi(gsi); | 514 | acpi_unregister_gsi(gsi); |
507 | dev->irq_managed = 0; | 515 | dev->irq_managed = 0; |
508 | dev->irq = 0; | ||
509 | } | 516 | } |
510 | } | 517 | } |
diff --git a/drivers/cpuidle/cpuidle-mvebu-v7.c b/drivers/cpuidle/cpuidle-mvebu-v7.c index 38e68618513a..980151f34707 100644 --- a/drivers/cpuidle/cpuidle-mvebu-v7.c +++ b/drivers/cpuidle/cpuidle-mvebu-v7.c | |||
@@ -37,11 +37,11 @@ static int mvebu_v7_enter_idle(struct cpuidle_device *dev, | |||
37 | deepidle = true; | 37 | deepidle = true; |
38 | 38 | ||
39 | ret = mvebu_v7_cpu_suspend(deepidle); | 39 | ret = mvebu_v7_cpu_suspend(deepidle); |
40 | cpu_pm_exit(); | ||
41 | |||
40 | if (ret) | 42 | if (ret) |
41 | return ret; | 43 | return ret; |
42 | 44 | ||
43 | cpu_pm_exit(); | ||
44 | |||
45 | return index; | 45 | return index; |
46 | } | 46 | } |
47 | 47 | ||
@@ -50,17 +50,17 @@ static struct cpuidle_driver armadaxp_idle_driver = { | |||
50 | .states[0] = ARM_CPUIDLE_WFI_STATE, | 50 | .states[0] = ARM_CPUIDLE_WFI_STATE, |
51 | .states[1] = { | 51 | .states[1] = { |
52 | .enter = mvebu_v7_enter_idle, | 52 | .enter = mvebu_v7_enter_idle, |
53 | .exit_latency = 10, | 53 | .exit_latency = 100, |
54 | .power_usage = 50, | 54 | .power_usage = 50, |
55 | .target_residency = 100, | 55 | .target_residency = 1000, |
56 | .name = "MV CPU IDLE", | 56 | .name = "MV CPU IDLE", |
57 | .desc = "CPU power down", | 57 | .desc = "CPU power down", |
58 | }, | 58 | }, |
59 | .states[2] = { | 59 | .states[2] = { |
60 | .enter = mvebu_v7_enter_idle, | 60 | .enter = mvebu_v7_enter_idle, |
61 | .exit_latency = 100, | 61 | .exit_latency = 1000, |
62 | .power_usage = 5, | 62 | .power_usage = 5, |
63 | .target_residency = 1000, | 63 | .target_residency = 10000, |
64 | .flags = MVEBU_V7_FLAG_DEEP_IDLE, | 64 | .flags = MVEBU_V7_FLAG_DEEP_IDLE, |
65 | .name = "MV CPU DEEP IDLE", | 65 | .name = "MV CPU DEEP IDLE", |
66 | .desc = "CPU and L2 Fabric power down", | 66 | .desc = "CPU and L2 Fabric power down", |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 4a5fd245014e..83aa55d6fa5d 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -97,6 +97,12 @@ | |||
97 | 97 | ||
98 | #define DRIVER_NAME "pl08xdmac" | 98 | #define DRIVER_NAME "pl08xdmac" |
99 | 99 | ||
100 | #define PL80X_DMA_BUSWIDTHS \ | ||
101 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ | ||
102 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | ||
103 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | ||
104 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | ||
105 | |||
100 | static struct amba_driver pl08x_amba_driver; | 106 | static struct amba_driver pl08x_amba_driver; |
101 | struct pl08x_driver_data; | 107 | struct pl08x_driver_data; |
102 | 108 | ||
@@ -2070,6 +2076,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
2070 | pl08x->memcpy.device_pause = pl08x_pause; | 2076 | pl08x->memcpy.device_pause = pl08x_pause; |
2071 | pl08x->memcpy.device_resume = pl08x_resume; | 2077 | pl08x->memcpy.device_resume = pl08x_resume; |
2072 | pl08x->memcpy.device_terminate_all = pl08x_terminate_all; | 2078 | pl08x->memcpy.device_terminate_all = pl08x_terminate_all; |
2079 | pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS; | ||
2080 | pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS; | ||
2081 | pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM); | ||
2082 | pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | ||
2073 | 2083 | ||
2074 | /* Initialize slave engine */ | 2084 | /* Initialize slave engine */ |
2075 | dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); | 2085 | dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); |
@@ -2086,6 +2096,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
2086 | pl08x->slave.device_pause = pl08x_pause; | 2096 | pl08x->slave.device_pause = pl08x_pause; |
2087 | pl08x->slave.device_resume = pl08x_resume; | 2097 | pl08x->slave.device_resume = pl08x_resume; |
2088 | pl08x->slave.device_terminate_all = pl08x_terminate_all; | 2098 | pl08x->slave.device_terminate_all = pl08x_terminate_all; |
2099 | pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS; | ||
2100 | pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS; | ||
2101 | pl08x->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
2102 | pl08x->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | ||
2089 | 2103 | ||
2090 | /* Get the platform data */ | 2104 | /* Get the platform data */ |
2091 | pl08x->pd = dev_get_platdata(&adev->dev); | 2105 | pl08x->pd = dev_get_platdata(&adev->dev); |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 1e1a4c567542..0b4fc6fb48ce 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -238,93 +238,126 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) | |||
238 | } | 238 | } |
239 | 239 | ||
240 | /* | 240 | /* |
241 | * atc_get_current_descriptors - | 241 | * atc_get_desc_by_cookie - get the descriptor of a cookie |
242 | * locate the descriptor which equal to physical address in DSCR | 242 | * @atchan: the DMA channel |
243 | * @atchan: the channel we want to start | 243 | * @cookie: the cookie to get the descriptor for |
244 | * @dscr_addr: physical descriptor address in DSCR | ||
245 | */ | 244 | */ |
246 | static struct at_desc *atc_get_current_descriptors(struct at_dma_chan *atchan, | 245 | static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan, |
247 | u32 dscr_addr) | 246 | dma_cookie_t cookie) |
248 | { | 247 | { |
249 | struct at_desc *desc, *_desc, *child, *desc_cur = NULL; | 248 | struct at_desc *desc, *_desc; |
250 | 249 | ||
251 | list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { | 250 | list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) { |
252 | if (desc->lli.dscr == dscr_addr) { | 251 | if (desc->txd.cookie == cookie) |
253 | desc_cur = desc; | 252 | return desc; |
254 | break; | 253 | } |
255 | } | ||
256 | 254 | ||
257 | list_for_each_entry(child, &desc->tx_list, desc_node) { | 255 | list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { |
258 | if (child->lli.dscr == dscr_addr) { | 256 | if (desc->txd.cookie == cookie) |
259 | desc_cur = child; | 257 | return desc; |
260 | break; | ||
261 | } | ||
262 | } | ||
263 | } | 258 | } |
264 | 259 | ||
265 | return desc_cur; | 260 | return NULL; |
266 | } | 261 | } |
267 | 262 | ||
268 | /* | 263 | /** |
269 | * atc_get_bytes_left - | 264 | * atc_calc_bytes_left - calculates the number of bytes left according to the |
270 | * Get the number of bytes residue in dma buffer, | 265 | * value read from CTRLA. |
271 | * @chan: the channel we want to start | 266 | * |
267 | * @current_len: the number of bytes left before reading CTRLA | ||
268 | * @ctrla: the value of CTRLA | ||
269 | * @desc: the descriptor containing the transfer width | ||
270 | */ | ||
271 | static inline int atc_calc_bytes_left(int current_len, u32 ctrla, | ||
272 | struct at_desc *desc) | ||
273 | { | ||
274 | return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width); | ||
275 | } | ||
276 | |||
277 | /** | ||
278 | * atc_calc_bytes_left_from_reg - calculates the number of bytes left according | ||
279 | * to the current value of CTRLA. | ||
280 | * | ||
281 | * @current_len: the number of bytes left before reading CTRLA | ||
282 | * @atchan: the channel to read CTRLA for | ||
283 | * @desc: the descriptor containing the transfer width | ||
284 | */ | ||
285 | static inline int atc_calc_bytes_left_from_reg(int current_len, | ||
286 | struct at_dma_chan *atchan, struct at_desc *desc) | ||
287 | { | ||
288 | u32 ctrla = channel_readl(atchan, CTRLA); | ||
289 | |||
290 | return atc_calc_bytes_left(current_len, ctrla, desc); | ||
291 | } | ||
292 | |||
293 | /** | ||
294 | * atc_get_bytes_left - get the number of bytes residue for a cookie | ||
295 | * @chan: DMA channel | ||
296 | * @cookie: transaction identifier to check status of | ||
272 | */ | 297 | */ |
273 | static int atc_get_bytes_left(struct dma_chan *chan) | 298 | static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) |
274 | { | 299 | { |
275 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 300 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
276 | struct at_dma *atdma = to_at_dma(chan->device); | ||
277 | int chan_id = atchan->chan_common.chan_id; | ||
278 | struct at_desc *desc_first = atc_first_active(atchan); | 301 | struct at_desc *desc_first = atc_first_active(atchan); |
279 | struct at_desc *desc_cur; | 302 | struct at_desc *desc; |
280 | int ret = 0, count = 0; | 303 | int ret; |
304 | u32 ctrla, dscr; | ||
281 | 305 | ||
282 | /* | 306 | /* |
283 | * Initialize necessary values in the first time. | 307 | * If the cookie doesn't match to the currently running transfer then |
284 | * remain_desc record remain desc length. | 308 | * we can return the total length of the associated DMA transfer, |
309 | * because it is still queued. | ||
285 | */ | 310 | */ |
286 | if (atchan->remain_desc == 0) | 311 | desc = atc_get_desc_by_cookie(atchan, cookie); |
287 | /* First descriptor embedds the transaction length */ | 312 | if (desc == NULL) |
288 | atchan->remain_desc = desc_first->len; | 313 | return -EINVAL; |
314 | else if (desc != desc_first) | ||
315 | return desc->total_len; | ||
289 | 316 | ||
290 | /* | 317 | /* cookie matches to the currently running transfer */ |
291 | * This happens when current descriptor transfer complete. | 318 | ret = desc_first->total_len; |
292 | * The residual buffer size should reduce current descriptor length. | ||
293 | */ | ||
294 | if (unlikely(test_bit(ATC_IS_BTC, &atchan->status))) { | ||
295 | clear_bit(ATC_IS_BTC, &atchan->status); | ||
296 | desc_cur = atc_get_current_descriptors(atchan, | ||
297 | channel_readl(atchan, DSCR)); | ||
298 | if (!desc_cur) { | ||
299 | ret = -EINVAL; | ||
300 | goto out; | ||
301 | } | ||
302 | 319 | ||
303 | count = (desc_cur->lli.ctrla & ATC_BTSIZE_MAX) | 320 | if (desc_first->lli.dscr) { |
304 | << desc_first->tx_width; | 321 | /* hardware linked list transfer */ |
305 | if (atchan->remain_desc < count) { | 322 | |
306 | ret = -EINVAL; | 323 | /* |
307 | goto out; | 324 | * Calculate the residue by removing the length of the child |
325 | * descriptors already transferred from the total length. | ||
326 | * To get the current child descriptor we can use the value of | ||
327 | * the channel's DSCR register and compare it against the value | ||
328 | * of the hardware linked list structure of each child | ||
329 | * descriptor. | ||
330 | */ | ||
331 | |||
332 | ctrla = channel_readl(atchan, CTRLA); | ||
333 | rmb(); /* ensure CTRLA is read before DSCR */ | ||
334 | dscr = channel_readl(atchan, DSCR); | ||
335 | |||
336 | /* for the first descriptor we can be more accurate */ | ||
337 | if (desc_first->lli.dscr == dscr) | ||
338 | return atc_calc_bytes_left(ret, ctrla, desc_first); | ||
339 | |||
340 | ret -= desc_first->len; | ||
341 | list_for_each_entry(desc, &desc_first->tx_list, desc_node) { | ||
342 | if (desc->lli.dscr == dscr) | ||
343 | break; | ||
344 | |||
345 | ret -= desc->len; | ||
308 | } | 346 | } |
309 | 347 | ||
310 | atchan->remain_desc -= count; | ||
311 | ret = atchan->remain_desc; | ||
312 | } else { | ||
313 | /* | 348 | /* |
314 | * Get residual bytes when current | 349 | * For the last descriptor in the chain we can calculate |
315 | * descriptor transfer in progress. | 350 | * the remaining bytes using the channel's register. |
351 | * Note that the transfer width of the first and last | ||
352 | * descriptor may differ. | ||
316 | */ | 353 | */ |
317 | count = (channel_readl(atchan, CTRLA) & ATC_BTSIZE_MAX) | 354 | if (!desc->lli.dscr) |
318 | << (desc_first->tx_width); | 355 | ret = atc_calc_bytes_left_from_reg(ret, atchan, desc); |
319 | ret = atchan->remain_desc - count; | 356 | } else { |
357 | /* single transfer */ | ||
358 | ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first); | ||
320 | } | 359 | } |
321 | /* | ||
322 | * Check fifo empty. | ||
323 | */ | ||
324 | if (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id))) | ||
325 | atc_issue_pending(chan); | ||
326 | 360 | ||
327 | out: | ||
328 | return ret; | 361 | return ret; |
329 | } | 362 | } |
330 | 363 | ||
@@ -539,8 +572,6 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id) | |||
539 | /* Give information to tasklet */ | 572 | /* Give information to tasklet */ |
540 | set_bit(ATC_IS_ERROR, &atchan->status); | 573 | set_bit(ATC_IS_ERROR, &atchan->status); |
541 | } | 574 | } |
542 | if (pending & AT_DMA_BTC(i)) | ||
543 | set_bit(ATC_IS_BTC, &atchan->status); | ||
544 | tasklet_schedule(&atchan->tasklet); | 575 | tasklet_schedule(&atchan->tasklet); |
545 | ret = IRQ_HANDLED; | 576 | ret = IRQ_HANDLED; |
546 | } | 577 | } |
@@ -653,14 +684,18 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
653 | desc->lli.ctrlb = ctrlb; | 684 | desc->lli.ctrlb = ctrlb; |
654 | 685 | ||
655 | desc->txd.cookie = 0; | 686 | desc->txd.cookie = 0; |
687 | desc->len = xfer_count << src_width; | ||
656 | 688 | ||
657 | atc_desc_chain(&first, &prev, desc); | 689 | atc_desc_chain(&first, &prev, desc); |
658 | } | 690 | } |
659 | 691 | ||
660 | /* First descriptor of the chain embedds additional information */ | 692 | /* First descriptor of the chain embedds additional information */ |
661 | first->txd.cookie = -EBUSY; | 693 | first->txd.cookie = -EBUSY; |
662 | first->len = len; | 694 | first->total_len = len; |
695 | |||
696 | /* set transfer width for the calculation of the residue */ | ||
663 | first->tx_width = src_width; | 697 | first->tx_width = src_width; |
698 | prev->tx_width = src_width; | ||
664 | 699 | ||
665 | /* set end-of-link to the last link descriptor of list*/ | 700 | /* set end-of-link to the last link descriptor of list*/ |
666 | set_desc_eol(desc); | 701 | set_desc_eol(desc); |
@@ -752,6 +787,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
752 | | ATC_SRC_WIDTH(mem_width) | 787 | | ATC_SRC_WIDTH(mem_width) |
753 | | len >> mem_width; | 788 | | len >> mem_width; |
754 | desc->lli.ctrlb = ctrlb; | 789 | desc->lli.ctrlb = ctrlb; |
790 | desc->len = len; | ||
755 | 791 | ||
756 | atc_desc_chain(&first, &prev, desc); | 792 | atc_desc_chain(&first, &prev, desc); |
757 | total_len += len; | 793 | total_len += len; |
@@ -792,6 +828,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
792 | | ATC_DST_WIDTH(mem_width) | 828 | | ATC_DST_WIDTH(mem_width) |
793 | | len >> reg_width; | 829 | | len >> reg_width; |
794 | desc->lli.ctrlb = ctrlb; | 830 | desc->lli.ctrlb = ctrlb; |
831 | desc->len = len; | ||
795 | 832 | ||
796 | atc_desc_chain(&first, &prev, desc); | 833 | atc_desc_chain(&first, &prev, desc); |
797 | total_len += len; | 834 | total_len += len; |
@@ -806,8 +843,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
806 | 843 | ||
807 | /* First descriptor of the chain embedds additional information */ | 844 | /* First descriptor of the chain embedds additional information */ |
808 | first->txd.cookie = -EBUSY; | 845 | first->txd.cookie = -EBUSY; |
809 | first->len = total_len; | 846 | first->total_len = total_len; |
847 | |||
848 | /* set transfer width for the calculation of the residue */ | ||
810 | first->tx_width = reg_width; | 849 | first->tx_width = reg_width; |
850 | prev->tx_width = reg_width; | ||
811 | 851 | ||
812 | /* first link descriptor of list is responsible of flags */ | 852 | /* first link descriptor of list is responsible of flags */ |
813 | first->txd.flags = flags; /* client is in control of this ack */ | 853 | first->txd.flags = flags; /* client is in control of this ack */ |
@@ -872,6 +912,7 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, | |||
872 | | ATC_FC_MEM2PER | 912 | | ATC_FC_MEM2PER |
873 | | ATC_SIF(atchan->mem_if) | 913 | | ATC_SIF(atchan->mem_if) |
874 | | ATC_DIF(atchan->per_if); | 914 | | ATC_DIF(atchan->per_if); |
915 | desc->len = period_len; | ||
875 | break; | 916 | break; |
876 | 917 | ||
877 | case DMA_DEV_TO_MEM: | 918 | case DMA_DEV_TO_MEM: |
@@ -883,6 +924,7 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, | |||
883 | | ATC_FC_PER2MEM | 924 | | ATC_FC_PER2MEM |
884 | | ATC_SIF(atchan->per_if) | 925 | | ATC_SIF(atchan->per_if) |
885 | | ATC_DIF(atchan->mem_if); | 926 | | ATC_DIF(atchan->mem_if); |
927 | desc->len = period_len; | ||
886 | break; | 928 | break; |
887 | 929 | ||
888 | default: | 930 | default: |
@@ -964,7 +1006,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |||
964 | 1006 | ||
965 | /* First descriptor of the chain embedds additional information */ | 1007 | /* First descriptor of the chain embedds additional information */ |
966 | first->txd.cookie = -EBUSY; | 1008 | first->txd.cookie = -EBUSY; |
967 | first->len = buf_len; | 1009 | first->total_len = buf_len; |
968 | first->tx_width = reg_width; | 1010 | first->tx_width = reg_width; |
969 | 1011 | ||
970 | return &first->txd; | 1012 | return &first->txd; |
@@ -1118,7 +1160,7 @@ atc_tx_status(struct dma_chan *chan, | |||
1118 | spin_lock_irqsave(&atchan->lock, flags); | 1160 | spin_lock_irqsave(&atchan->lock, flags); |
1119 | 1161 | ||
1120 | /* Get number of bytes left in the active transactions */ | 1162 | /* Get number of bytes left in the active transactions */ |
1121 | bytes = atc_get_bytes_left(chan); | 1163 | bytes = atc_get_bytes_left(chan, cookie); |
1122 | 1164 | ||
1123 | spin_unlock_irqrestore(&atchan->lock, flags); | 1165 | spin_unlock_irqrestore(&atchan->lock, flags); |
1124 | 1166 | ||
@@ -1214,7 +1256,6 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) | |||
1214 | 1256 | ||
1215 | spin_lock_irqsave(&atchan->lock, flags); | 1257 | spin_lock_irqsave(&atchan->lock, flags); |
1216 | atchan->descs_allocated = i; | 1258 | atchan->descs_allocated = i; |
1217 | atchan->remain_desc = 0; | ||
1218 | list_splice(&tmp_list, &atchan->free_list); | 1259 | list_splice(&tmp_list, &atchan->free_list); |
1219 | dma_cookie_init(chan); | 1260 | dma_cookie_init(chan); |
1220 | spin_unlock_irqrestore(&atchan->lock, flags); | 1261 | spin_unlock_irqrestore(&atchan->lock, flags); |
@@ -1257,7 +1298,6 @@ static void atc_free_chan_resources(struct dma_chan *chan) | |||
1257 | list_splice_init(&atchan->free_list, &list); | 1298 | list_splice_init(&atchan->free_list, &list); |
1258 | atchan->descs_allocated = 0; | 1299 | atchan->descs_allocated = 0; |
1259 | atchan->status = 0; | 1300 | atchan->status = 0; |
1260 | atchan->remain_desc = 0; | ||
1261 | 1301 | ||
1262 | dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); | 1302 | dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); |
1263 | } | 1303 | } |
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index d6bba6c636c2..2727ca560572 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
@@ -181,8 +181,9 @@ struct at_lli { | |||
181 | * @at_lli: hardware lli structure | 181 | * @at_lli: hardware lli structure |
182 | * @txd: support for the async_tx api | 182 | * @txd: support for the async_tx api |
183 | * @desc_node: node on the channed descriptors list | 183 | * @desc_node: node on the channed descriptors list |
184 | * @len: total transaction bytecount | 184 | * @len: descriptor byte count |
185 | * @tx_width: transfer width | 185 | * @tx_width: transfer width |
186 | * @total_len: total transaction byte count | ||
186 | */ | 187 | */ |
187 | struct at_desc { | 188 | struct at_desc { |
188 | /* FIRST values the hardware uses */ | 189 | /* FIRST values the hardware uses */ |
@@ -194,6 +195,7 @@ struct at_desc { | |||
194 | struct list_head desc_node; | 195 | struct list_head desc_node; |
195 | size_t len; | 196 | size_t len; |
196 | u32 tx_width; | 197 | u32 tx_width; |
198 | size_t total_len; | ||
197 | }; | 199 | }; |
198 | 200 | ||
199 | static inline struct at_desc * | 201 | static inline struct at_desc * |
@@ -213,7 +215,6 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd) | |||
213 | enum atc_status { | 215 | enum atc_status { |
214 | ATC_IS_ERROR = 0, | 216 | ATC_IS_ERROR = 0, |
215 | ATC_IS_PAUSED = 1, | 217 | ATC_IS_PAUSED = 1, |
216 | ATC_IS_BTC = 2, | ||
217 | ATC_IS_CYCLIC = 24, | 218 | ATC_IS_CYCLIC = 24, |
218 | }; | 219 | }; |
219 | 220 | ||
@@ -231,7 +232,6 @@ enum atc_status { | |||
231 | * @save_cfg: configuration register that is saved on suspend/resume cycle | 232 | * @save_cfg: configuration register that is saved on suspend/resume cycle |
232 | * @save_dscr: for cyclic operations, preserve next descriptor address in | 233 | * @save_dscr: for cyclic operations, preserve next descriptor address in |
233 | * the cyclic list on suspend/resume cycle | 234 | * the cyclic list on suspend/resume cycle |
234 | * @remain_desc: to save remain desc length | ||
235 | * @dma_sconfig: configuration for slave transfers, passed via | 235 | * @dma_sconfig: configuration for slave transfers, passed via |
236 | * .device_config | 236 | * .device_config |
237 | * @lock: serializes enqueue/dequeue operations to descriptors lists | 237 | * @lock: serializes enqueue/dequeue operations to descriptors lists |
@@ -251,7 +251,6 @@ struct at_dma_chan { | |||
251 | struct tasklet_struct tasklet; | 251 | struct tasklet_struct tasklet; |
252 | u32 save_cfg; | 252 | u32 save_cfg; |
253 | u32 save_dscr; | 253 | u32 save_dscr; |
254 | u32 remain_desc; | ||
255 | struct dma_slave_config dma_sconfig; | 254 | struct dma_slave_config dma_sconfig; |
256 | 255 | ||
257 | spinlock_t lock; | 256 | spinlock_t lock; |
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index 6565a361e7e5..b2c3ae071429 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c | |||
@@ -26,6 +26,8 @@ | |||
26 | 26 | ||
27 | #include "internal.h" | 27 | #include "internal.h" |
28 | 28 | ||
29 | #define DRV_NAME "dw_dmac" | ||
30 | |||
29 | static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, | 31 | static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, |
30 | struct of_dma *ofdma) | 32 | struct of_dma *ofdma) |
31 | { | 33 | { |
@@ -284,7 +286,7 @@ static struct platform_driver dw_driver = { | |||
284 | .remove = dw_remove, | 286 | .remove = dw_remove, |
285 | .shutdown = dw_shutdown, | 287 | .shutdown = dw_shutdown, |
286 | .driver = { | 288 | .driver = { |
287 | .name = "dw_dmac", | 289 | .name = DRV_NAME, |
288 | .pm = &dw_dev_pm_ops, | 290 | .pm = &dw_dev_pm_ops, |
289 | .of_match_table = of_match_ptr(dw_dma_of_id_table), | 291 | .of_match_table = of_match_ptr(dw_dma_of_id_table), |
290 | .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), | 292 | .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), |
@@ -305,3 +307,4 @@ module_exit(dw_exit); | |||
305 | 307 | ||
306 | MODULE_LICENSE("GPL v2"); | 308 | MODULE_LICENSE("GPL v2"); |
307 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver"); | 309 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver"); |
310 | MODULE_ALIAS("platform:" DRV_NAME); | ||
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 18c0a131e4e4..66a0efb9651d 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -531,6 +531,10 @@ static int sdma_run_channel0(struct sdma_engine *sdma) | |||
531 | dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); | 531 | dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); |
532 | } | 532 | } |
533 | 533 | ||
534 | /* Set bits of CONFIG register with dynamic context switching */ | ||
535 | if (readl(sdma->regs + SDMA_H_CONFIG) == 0) | ||
536 | writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); | ||
537 | |||
534 | return ret ? 0 : -ETIMEDOUT; | 538 | return ret ? 0 : -ETIMEDOUT; |
535 | } | 539 | } |
536 | 540 | ||
@@ -1394,9 +1398,6 @@ static int sdma_init(struct sdma_engine *sdma) | |||
1394 | 1398 | ||
1395 | writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); | 1399 | writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); |
1396 | 1400 | ||
1397 | /* Set bits of CONFIG register with given context switching mode */ | ||
1398 | writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); | ||
1399 | |||
1400 | /* Initializes channel's priorities */ | 1401 | /* Initializes channel's priorities */ |
1401 | sdma_set_channel_priority(&sdma->channel[0], 7); | 1402 | sdma_set_channel_priority(&sdma->channel[0], 7); |
1402 | 1403 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 910ff8ab9c9c..d8135adb2238 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | |||
@@ -645,6 +645,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, | |||
645 | pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id); | 645 | pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id); |
646 | pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id); | 646 | pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id); |
647 | 647 | ||
648 | init_sdma_vm(dqm, q, qpd); | ||
648 | retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, | 649 | retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, |
649 | &q->gart_mqd_addr, &q->properties); | 650 | &q->gart_mqd_addr, &q->properties); |
650 | if (retval != 0) { | 651 | if (retval != 0) { |
@@ -652,7 +653,14 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, | |||
652 | return retval; | 653 | return retval; |
653 | } | 654 | } |
654 | 655 | ||
655 | init_sdma_vm(dqm, q, qpd); | 656 | retval = mqd->load_mqd(mqd, q->mqd, 0, |
657 | 0, NULL); | ||
658 | if (retval != 0) { | ||
659 | deallocate_sdma_queue(dqm, q->sdma_id); | ||
660 | mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); | ||
661 | return retval; | ||
662 | } | ||
663 | |||
656 | return 0; | 664 | return 0; |
657 | } | 665 | } |
658 | 666 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index e415a2a9207e..c7d298e62c96 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | |||
@@ -44,7 +44,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, | |||
44 | BUG_ON(!kq || !dev); | 44 | BUG_ON(!kq || !dev); |
45 | BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ); | 45 | BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ); |
46 | 46 | ||
47 | pr_debug("kfd: In func %s initializing queue type %d size %d\n", | 47 | pr_debug("amdkfd: In func %s initializing queue type %d size %d\n", |
48 | __func__, KFD_QUEUE_TYPE_HIQ, queue_size); | 48 | __func__, KFD_QUEUE_TYPE_HIQ, queue_size); |
49 | 49 | ||
50 | nop.opcode = IT_NOP; | 50 | nop.opcode = IT_NOP; |
@@ -69,12 +69,16 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, | |||
69 | 69 | ||
70 | prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); | 70 | prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); |
71 | 71 | ||
72 | if (prop.doorbell_ptr == NULL) | 72 | if (prop.doorbell_ptr == NULL) { |
73 | pr_err("amdkfd: error init doorbell"); | ||
73 | goto err_get_kernel_doorbell; | 74 | goto err_get_kernel_doorbell; |
75 | } | ||
74 | 76 | ||
75 | retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq); | 77 | retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq); |
76 | if (retval != 0) | 78 | if (retval != 0) { |
79 | pr_err("amdkfd: error init pq queues size (%d)\n", queue_size); | ||
77 | goto err_pq_allocate_vidmem; | 80 | goto err_pq_allocate_vidmem; |
81 | } | ||
78 | 82 | ||
79 | kq->pq_kernel_addr = kq->pq->cpu_ptr; | 83 | kq->pq_kernel_addr = kq->pq->cpu_ptr; |
80 | kq->pq_gpu_addr = kq->pq->gpu_addr; | 84 | kq->pq_gpu_addr = kq->pq->gpu_addr; |
@@ -165,10 +169,8 @@ err_rptr_allocate_vidmem: | |||
165 | err_eop_allocate_vidmem: | 169 | err_eop_allocate_vidmem: |
166 | kfd_gtt_sa_free(dev, kq->pq); | 170 | kfd_gtt_sa_free(dev, kq->pq); |
167 | err_pq_allocate_vidmem: | 171 | err_pq_allocate_vidmem: |
168 | pr_err("kfd: error init pq\n"); | ||
169 | kfd_release_kernel_doorbell(dev, prop.doorbell_ptr); | 172 | kfd_release_kernel_doorbell(dev, prop.doorbell_ptr); |
170 | err_get_kernel_doorbell: | 173 | err_get_kernel_doorbell: |
171 | pr_err("kfd: error init doorbell"); | ||
172 | return false; | 174 | return false; |
173 | 175 | ||
174 | } | 176 | } |
@@ -187,6 +189,8 @@ static void uninitialize(struct kernel_queue *kq) | |||
187 | else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ) | 189 | else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ) |
188 | kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj); | 190 | kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj); |
189 | 191 | ||
192 | kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj); | ||
193 | |||
190 | kfd_gtt_sa_free(kq->dev, kq->rptr_mem); | 194 | kfd_gtt_sa_free(kq->dev, kq->rptr_mem); |
191 | kfd_gtt_sa_free(kq->dev, kq->wptr_mem); | 195 | kfd_gtt_sa_free(kq->dev, kq->wptr_mem); |
192 | kq->ops_asic_specific.uninitialize(kq); | 196 | kq->ops_asic_specific.uninitialize(kq); |
@@ -211,7 +215,7 @@ static int acquire_packet_buffer(struct kernel_queue *kq, | |||
211 | queue_address = (unsigned int *)kq->pq_kernel_addr; | 215 | queue_address = (unsigned int *)kq->pq_kernel_addr; |
212 | queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t); | 216 | queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t); |
213 | 217 | ||
214 | pr_debug("kfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n", | 218 | pr_debug("amdkfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n", |
215 | __func__, rptr, wptr, queue_address); | 219 | __func__, rptr, wptr, queue_address); |
216 | 220 | ||
217 | available_size = (rptr - 1 - wptr + queue_size_dwords) % | 221 | available_size = (rptr - 1 - wptr + queue_size_dwords) % |
@@ -296,7 +300,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, | |||
296 | } | 300 | } |
297 | 301 | ||
298 | if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) { | 302 | if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) { |
299 | pr_err("kfd: failed to init kernel queue\n"); | 303 | pr_err("amdkfd: failed to init kernel queue\n"); |
300 | kfree(kq); | 304 | kfree(kq); |
301 | return NULL; | 305 | return NULL; |
302 | } | 306 | } |
@@ -319,7 +323,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev) | |||
319 | 323 | ||
320 | BUG_ON(!dev); | 324 | BUG_ON(!dev); |
321 | 325 | ||
322 | pr_err("kfd: starting kernel queue test\n"); | 326 | pr_err("amdkfd: starting kernel queue test\n"); |
323 | 327 | ||
324 | kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); | 328 | kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); |
325 | BUG_ON(!kq); | 329 | BUG_ON(!kq); |
@@ -330,7 +334,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev) | |||
330 | buffer[i] = kq->nop_packet; | 334 | buffer[i] = kq->nop_packet; |
331 | kq->ops.submit_packet(kq); | 335 | kq->ops.submit_packet(kq); |
332 | 336 | ||
333 | pr_err("kfd: ending kernel queue test\n"); | 337 | pr_err("amdkfd: ending kernel queue test\n"); |
334 | } | 338 | } |
335 | 339 | ||
336 | 340 | ||
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index a5e74612100e..0a6780367d28 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig | |||
@@ -50,7 +50,7 @@ config DRM_EXYNOS_DSI | |||
50 | 50 | ||
51 | config DRM_EXYNOS_DP | 51 | config DRM_EXYNOS_DP |
52 | bool "EXYNOS DRM DP driver support" | 52 | bool "EXYNOS DRM DP driver support" |
53 | depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) | 53 | depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) |
54 | default DRM_EXYNOS | 54 | default DRM_EXYNOS |
55 | select DRM_PANEL | 55 | select DRM_PANEL |
56 | help | 56 | help |
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index 63f02e2380ae..970046199608 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c | |||
@@ -888,8 +888,8 @@ static int decon_probe(struct platform_device *pdev) | |||
888 | of_node_put(i80_if_timings); | 888 | of_node_put(i80_if_timings); |
889 | 889 | ||
890 | ctx->regs = of_iomap(dev->of_node, 0); | 890 | ctx->regs = of_iomap(dev->of_node, 0); |
891 | if (IS_ERR(ctx->regs)) { | 891 | if (!ctx->regs) { |
892 | ret = PTR_ERR(ctx->regs); | 892 | ret = -ENOMEM; |
893 | goto err_del_component; | 893 | goto err_del_component; |
894 | } | 894 | } |
895 | 895 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c deleted file mode 100644 index ba9b3d5ed672..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ /dev/null | |||
@@ -1,245 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
3 | * Authors: | ||
4 | * Inki Dae <inki.dae@samsung.com> | ||
5 | * Joonyoung Shim <jy0922.shim@samsung.com> | ||
6 | * Seung-Woo Kim <sw0312.kim@samsung.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <drm/drmP.h> | ||
15 | #include <drm/drm_crtc_helper.h> | ||
16 | |||
17 | #include <drm/exynos_drm.h> | ||
18 | #include "exynos_drm_drv.h" | ||
19 | #include "exynos_drm_encoder.h" | ||
20 | #include "exynos_drm_connector.h" | ||
21 | |||
22 | #define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\ | ||
23 | drm_connector) | ||
24 | |||
25 | struct exynos_drm_connector { | ||
26 | struct drm_connector drm_connector; | ||
27 | uint32_t encoder_id; | ||
28 | struct exynos_drm_display *display; | ||
29 | }; | ||
30 | |||
31 | static int exynos_drm_connector_get_modes(struct drm_connector *connector) | ||
32 | { | ||
33 | struct exynos_drm_connector *exynos_connector = | ||
34 | to_exynos_connector(connector); | ||
35 | struct exynos_drm_display *display = exynos_connector->display; | ||
36 | struct edid *edid = NULL; | ||
37 | unsigned int count = 0; | ||
38 | int ret; | ||
39 | |||
40 | /* | ||
41 | * if get_edid() exists then get_edid() callback of hdmi side | ||
42 | * is called to get edid data through i2c interface else | ||
43 | * get timing from the FIMD driver(display controller). | ||
44 | * | ||
45 | * P.S. in case of lcd panel, count is always 1 if success | ||
46 | * because lcd panel has only one mode. | ||
47 | */ | ||
48 | if (display->ops->get_edid) { | ||
49 | edid = display->ops->get_edid(display, connector); | ||
50 | if (IS_ERR_OR_NULL(edid)) { | ||
51 | ret = PTR_ERR(edid); | ||
52 | edid = NULL; | ||
53 | DRM_ERROR("Panel operation get_edid failed %d\n", ret); | ||
54 | goto out; | ||
55 | } | ||
56 | |||
57 | count = drm_add_edid_modes(connector, edid); | ||
58 | if (!count) { | ||
59 | DRM_ERROR("Add edid modes failed %d\n", count); | ||
60 | goto out; | ||
61 | } | ||
62 | |||
63 | drm_mode_connector_update_edid_property(connector, edid); | ||
64 | } else { | ||
65 | struct exynos_drm_panel_info *panel; | ||
66 | struct drm_display_mode *mode = drm_mode_create(connector->dev); | ||
67 | if (!mode) { | ||
68 | DRM_ERROR("failed to create a new display mode.\n"); | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | if (display->ops->get_panel) | ||
73 | panel = display->ops->get_panel(display); | ||
74 | else { | ||
75 | drm_mode_destroy(connector->dev, mode); | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | drm_display_mode_from_videomode(&panel->vm, mode); | ||
80 | mode->width_mm = panel->width_mm; | ||
81 | mode->height_mm = panel->height_mm; | ||
82 | connector->display_info.width_mm = mode->width_mm; | ||
83 | connector->display_info.height_mm = mode->height_mm; | ||
84 | |||
85 | mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; | ||
86 | drm_mode_set_name(mode); | ||
87 | drm_mode_probed_add(connector, mode); | ||
88 | |||
89 | count = 1; | ||
90 | } | ||
91 | |||
92 | out: | ||
93 | kfree(edid); | ||
94 | return count; | ||
95 | } | ||
96 | |||
97 | static int exynos_drm_connector_mode_valid(struct drm_connector *connector, | ||
98 | struct drm_display_mode *mode) | ||
99 | { | ||
100 | struct exynos_drm_connector *exynos_connector = | ||
101 | to_exynos_connector(connector); | ||
102 | struct exynos_drm_display *display = exynos_connector->display; | ||
103 | int ret = MODE_BAD; | ||
104 | |||
105 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
106 | |||
107 | if (display->ops->check_mode) | ||
108 | if (!display->ops->check_mode(display, mode)) | ||
109 | ret = MODE_OK; | ||
110 | |||
111 | return ret; | ||
112 | } | ||
113 | |||
114 | static struct drm_encoder *exynos_drm_best_encoder( | ||
115 | struct drm_connector *connector) | ||
116 | { | ||
117 | struct drm_device *dev = connector->dev; | ||
118 | struct exynos_drm_connector *exynos_connector = | ||
119 | to_exynos_connector(connector); | ||
120 | return drm_encoder_find(dev, exynos_connector->encoder_id); | ||
121 | } | ||
122 | |||
123 | static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { | ||
124 | .get_modes = exynos_drm_connector_get_modes, | ||
125 | .mode_valid = exynos_drm_connector_mode_valid, | ||
126 | .best_encoder = exynos_drm_best_encoder, | ||
127 | }; | ||
128 | |||
129 | static int exynos_drm_connector_fill_modes(struct drm_connector *connector, | ||
130 | unsigned int max_width, unsigned int max_height) | ||
131 | { | ||
132 | struct exynos_drm_connector *exynos_connector = | ||
133 | to_exynos_connector(connector); | ||
134 | struct exynos_drm_display *display = exynos_connector->display; | ||
135 | unsigned int width, height; | ||
136 | |||
137 | width = max_width; | ||
138 | height = max_height; | ||
139 | |||
140 | /* | ||
141 | * if specific driver want to find desired_mode using maxmum | ||
142 | * resolution then get max width and height from that driver. | ||
143 | */ | ||
144 | if (display->ops->get_max_resol) | ||
145 | display->ops->get_max_resol(display, &width, &height); | ||
146 | |||
147 | return drm_helper_probe_single_connector_modes(connector, width, | ||
148 | height); | ||
149 | } | ||
150 | |||
151 | /* get detection status of display device. */ | ||
152 | static enum drm_connector_status | ||
153 | exynos_drm_connector_detect(struct drm_connector *connector, bool force) | ||
154 | { | ||
155 | struct exynos_drm_connector *exynos_connector = | ||
156 | to_exynos_connector(connector); | ||
157 | struct exynos_drm_display *display = exynos_connector->display; | ||
158 | enum drm_connector_status status = connector_status_disconnected; | ||
159 | |||
160 | if (display->ops->is_connected) { | ||
161 | if (display->ops->is_connected(display)) | ||
162 | status = connector_status_connected; | ||
163 | else | ||
164 | status = connector_status_disconnected; | ||
165 | } | ||
166 | |||
167 | return status; | ||
168 | } | ||
169 | |||
170 | static void exynos_drm_connector_destroy(struct drm_connector *connector) | ||
171 | { | ||
172 | struct exynos_drm_connector *exynos_connector = | ||
173 | to_exynos_connector(connector); | ||
174 | |||
175 | drm_connector_unregister(connector); | ||
176 | drm_connector_cleanup(connector); | ||
177 | kfree(exynos_connector); | ||
178 | } | ||
179 | |||
180 | static struct drm_connector_funcs exynos_connector_funcs = { | ||
181 | .dpms = drm_helper_connector_dpms, | ||
182 | .fill_modes = exynos_drm_connector_fill_modes, | ||
183 | .detect = exynos_drm_connector_detect, | ||
184 | .destroy = exynos_drm_connector_destroy, | ||
185 | }; | ||
186 | |||
187 | struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, | ||
188 | struct drm_encoder *encoder) | ||
189 | { | ||
190 | struct exynos_drm_connector *exynos_connector; | ||
191 | struct exynos_drm_display *display = exynos_drm_get_display(encoder); | ||
192 | struct drm_connector *connector; | ||
193 | int type; | ||
194 | int err; | ||
195 | |||
196 | exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL); | ||
197 | if (!exynos_connector) | ||
198 | return NULL; | ||
199 | |||
200 | connector = &exynos_connector->drm_connector; | ||
201 | |||
202 | switch (display->type) { | ||
203 | case EXYNOS_DISPLAY_TYPE_HDMI: | ||
204 | type = DRM_MODE_CONNECTOR_HDMIA; | ||
205 | connector->interlace_allowed = true; | ||
206 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
207 | break; | ||
208 | case EXYNOS_DISPLAY_TYPE_VIDI: | ||
209 | type = DRM_MODE_CONNECTOR_VIRTUAL; | ||
210 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
211 | break; | ||
212 | default: | ||
213 | type = DRM_MODE_CONNECTOR_Unknown; | ||
214 | break; | ||
215 | } | ||
216 | |||
217 | drm_connector_init(dev, connector, &exynos_connector_funcs, type); | ||
218 | drm_connector_helper_add(connector, &exynos_connector_helper_funcs); | ||
219 | |||
220 | err = drm_connector_register(connector); | ||
221 | if (err) | ||
222 | goto err_connector; | ||
223 | |||
224 | exynos_connector->encoder_id = encoder->base.id; | ||
225 | exynos_connector->display = display; | ||
226 | connector->dpms = DRM_MODE_DPMS_OFF; | ||
227 | connector->encoder = encoder; | ||
228 | |||
229 | err = drm_mode_connector_attach_encoder(connector, encoder); | ||
230 | if (err) { | ||
231 | DRM_ERROR("failed to attach a connector to a encoder\n"); | ||
232 | goto err_sysfs; | ||
233 | } | ||
234 | |||
235 | DRM_DEBUG_KMS("connector has been created\n"); | ||
236 | |||
237 | return connector; | ||
238 | |||
239 | err_sysfs: | ||
240 | drm_connector_unregister(connector); | ||
241 | err_connector: | ||
242 | drm_connector_cleanup(connector); | ||
243 | kfree(exynos_connector); | ||
244 | return NULL; | ||
245 | } | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h deleted file mode 100644 index 4eb20d78379a..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.h +++ /dev/null | |||
@@ -1,20 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
3 | * Authors: | ||
4 | * Inki Dae <inki.dae@samsung.com> | ||
5 | * Joonyoung Shim <jy0922.shim@samsung.com> | ||
6 | * Seung-Woo Kim <sw0312.kim@samsung.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | */ | ||
13 | |||
14 | #ifndef _EXYNOS_DRM_CONNECTOR_H_ | ||
15 | #define _EXYNOS_DRM_CONNECTOR_H_ | ||
16 | |||
17 | struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, | ||
18 | struct drm_encoder *encoder); | ||
19 | |||
20 | #endif | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 925fc69af1a0..c300e22da8ac 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
@@ -284,14 +284,9 @@ static void fimd_clear_channel(struct fimd_context *ctx) | |||
284 | } | 284 | } |
285 | } | 285 | } |
286 | 286 | ||
287 | static int fimd_ctx_initialize(struct fimd_context *ctx, | 287 | static int fimd_iommu_attach_devices(struct fimd_context *ctx, |
288 | struct drm_device *drm_dev) | 288 | struct drm_device *drm_dev) |
289 | { | 289 | { |
290 | struct exynos_drm_private *priv; | ||
291 | priv = drm_dev->dev_private; | ||
292 | |||
293 | ctx->drm_dev = drm_dev; | ||
294 | ctx->pipe = priv->pipe++; | ||
295 | 290 | ||
296 | /* attach this sub driver to iommu mapping if supported. */ | 291 | /* attach this sub driver to iommu mapping if supported. */ |
297 | if (is_drm_iommu_supported(ctx->drm_dev)) { | 292 | if (is_drm_iommu_supported(ctx->drm_dev)) { |
@@ -313,7 +308,7 @@ static int fimd_ctx_initialize(struct fimd_context *ctx, | |||
313 | return 0; | 308 | return 0; |
314 | } | 309 | } |
315 | 310 | ||
316 | static void fimd_ctx_remove(struct fimd_context *ctx) | 311 | static void fimd_iommu_detach_devices(struct fimd_context *ctx) |
317 | { | 312 | { |
318 | /* detach this sub driver from iommu mapping if supported. */ | 313 | /* detach this sub driver from iommu mapping if supported. */ |
319 | if (is_drm_iommu_supported(ctx->drm_dev)) | 314 | if (is_drm_iommu_supported(ctx->drm_dev)) |
@@ -1056,25 +1051,23 @@ static int fimd_bind(struct device *dev, struct device *master, void *data) | |||
1056 | { | 1051 | { |
1057 | struct fimd_context *ctx = dev_get_drvdata(dev); | 1052 | struct fimd_context *ctx = dev_get_drvdata(dev); |
1058 | struct drm_device *drm_dev = data; | 1053 | struct drm_device *drm_dev = data; |
1054 | struct exynos_drm_private *priv = drm_dev->dev_private; | ||
1059 | int ret; | 1055 | int ret; |
1060 | 1056 | ||
1061 | ret = fimd_ctx_initialize(ctx, drm_dev); | 1057 | ctx->drm_dev = drm_dev; |
1062 | if (ret) { | 1058 | ctx->pipe = priv->pipe++; |
1063 | DRM_ERROR("fimd_ctx_initialize failed.\n"); | ||
1064 | return ret; | ||
1065 | } | ||
1066 | 1059 | ||
1067 | ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe, | 1060 | ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe, |
1068 | EXYNOS_DISPLAY_TYPE_LCD, | 1061 | EXYNOS_DISPLAY_TYPE_LCD, |
1069 | &fimd_crtc_ops, ctx); | 1062 | &fimd_crtc_ops, ctx); |
1070 | if (IS_ERR(ctx->crtc)) { | ||
1071 | fimd_ctx_remove(ctx); | ||
1072 | return PTR_ERR(ctx->crtc); | ||
1073 | } | ||
1074 | 1063 | ||
1075 | if (ctx->display) | 1064 | if (ctx->display) |
1076 | exynos_drm_create_enc_conn(drm_dev, ctx->display); | 1065 | exynos_drm_create_enc_conn(drm_dev, ctx->display); |
1077 | 1066 | ||
1067 | ret = fimd_iommu_attach_devices(ctx, drm_dev); | ||
1068 | if (ret) | ||
1069 | return ret; | ||
1070 | |||
1078 | return 0; | 1071 | return 0; |
1079 | 1072 | ||
1080 | } | 1073 | } |
@@ -1086,10 +1079,10 @@ static void fimd_unbind(struct device *dev, struct device *master, | |||
1086 | 1079 | ||
1087 | fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF); | 1080 | fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF); |
1088 | 1081 | ||
1082 | fimd_iommu_detach_devices(ctx); | ||
1083 | |||
1089 | if (ctx->display) | 1084 | if (ctx->display) |
1090 | exynos_dpi_remove(ctx->display); | 1085 | exynos_dpi_remove(ctx->display); |
1091 | |||
1092 | fimd_ctx_remove(ctx); | ||
1093 | } | 1086 | } |
1094 | 1087 | ||
1095 | static const struct component_ops fimd_component_ops = { | 1088 | static const struct component_ops fimd_component_ops = { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index a5616872eee7..8ad5b7294eb4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c | |||
@@ -175,7 +175,7 @@ static int exynos_disable_plane(struct drm_plane *plane) | |||
175 | struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); | 175 | struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); |
176 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc); | 176 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc); |
177 | 177 | ||
178 | if (exynos_crtc->ops->win_disable) | 178 | if (exynos_crtc && exynos_crtc->ops->win_disable) |
179 | exynos_crtc->ops->win_disable(exynos_crtc, | 179 | exynos_crtc->ops->win_disable(exynos_crtc, |
180 | exynos_plane->zpos); | 180 | exynos_plane->zpos); |
181 | 181 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9943c20a741d..6d22128d97b1 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <drm/i915_drm.h> | 37 | #include <drm/i915_drm.h> |
38 | #include "i915_drv.h" | 38 | #include "i915_drv.h" |
39 | #include "i915_trace.h" | 39 | #include "i915_trace.h" |
40 | #include <drm/drm_atomic.h> | ||
40 | #include <drm/drm_atomic_helper.h> | 41 | #include <drm/drm_atomic_helper.h> |
41 | #include <drm/drm_dp_helper.h> | 42 | #include <drm/drm_dp_helper.h> |
42 | #include <drm/drm_crtc_helper.h> | 43 | #include <drm/drm_crtc_helper.h> |
@@ -2416,6 +2417,14 @@ out_unref_obj: | |||
2416 | return false; | 2417 | return false; |
2417 | } | 2418 | } |
2418 | 2419 | ||
2420 | /* Update plane->state->fb to match plane->fb after driver-internal updates */ | ||
2421 | static void | ||
2422 | update_state_fb(struct drm_plane *plane) | ||
2423 | { | ||
2424 | if (plane->fb != plane->state->fb) | ||
2425 | drm_atomic_set_fb_for_plane(plane->state, plane->fb); | ||
2426 | } | ||
2427 | |||
2419 | static void | 2428 | static void |
2420 | intel_find_plane_obj(struct intel_crtc *intel_crtc, | 2429 | intel_find_plane_obj(struct intel_crtc *intel_crtc, |
2421 | struct intel_initial_plane_config *plane_config) | 2430 | struct intel_initial_plane_config *plane_config) |
@@ -2462,6 +2471,8 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc, | |||
2462 | break; | 2471 | break; |
2463 | } | 2472 | } |
2464 | } | 2473 | } |
2474 | |||
2475 | update_state_fb(intel_crtc->base.primary); | ||
2465 | } | 2476 | } |
2466 | 2477 | ||
2467 | static void i9xx_update_primary_plane(struct drm_crtc *crtc, | 2478 | static void i9xx_update_primary_plane(struct drm_crtc *crtc, |
@@ -6602,6 +6613,10 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, | |||
6602 | struct drm_framebuffer *fb; | 6613 | struct drm_framebuffer *fb; |
6603 | struct intel_framebuffer *intel_fb; | 6614 | struct intel_framebuffer *intel_fb; |
6604 | 6615 | ||
6616 | val = I915_READ(DSPCNTR(plane)); | ||
6617 | if (!(val & DISPLAY_PLANE_ENABLE)) | ||
6618 | return; | ||
6619 | |||
6605 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); | 6620 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); |
6606 | if (!intel_fb) { | 6621 | if (!intel_fb) { |
6607 | DRM_DEBUG_KMS("failed to alloc fb\n"); | 6622 | DRM_DEBUG_KMS("failed to alloc fb\n"); |
@@ -6610,8 +6625,6 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, | |||
6610 | 6625 | ||
6611 | fb = &intel_fb->base; | 6626 | fb = &intel_fb->base; |
6612 | 6627 | ||
6613 | val = I915_READ(DSPCNTR(plane)); | ||
6614 | |||
6615 | if (INTEL_INFO(dev)->gen >= 4) | 6628 | if (INTEL_INFO(dev)->gen >= 4) |
6616 | if (val & DISPPLANE_TILED) | 6629 | if (val & DISPPLANE_TILED) |
6617 | plane_config->tiling = I915_TILING_X; | 6630 | plane_config->tiling = I915_TILING_X; |
@@ -6650,6 +6663,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, | |||
6650 | plane_config->size); | 6663 | plane_config->size); |
6651 | 6664 | ||
6652 | crtc->base.primary->fb = fb; | 6665 | crtc->base.primary->fb = fb; |
6666 | update_state_fb(crtc->base.primary); | ||
6653 | } | 6667 | } |
6654 | 6668 | ||
6655 | static void chv_crtc_clock_get(struct intel_crtc *crtc, | 6669 | static void chv_crtc_clock_get(struct intel_crtc *crtc, |
@@ -7643,6 +7657,9 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, | |||
7643 | fb = &intel_fb->base; | 7657 | fb = &intel_fb->base; |
7644 | 7658 | ||
7645 | val = I915_READ(PLANE_CTL(pipe, 0)); | 7659 | val = I915_READ(PLANE_CTL(pipe, 0)); |
7660 | if (!(val & PLANE_CTL_ENABLE)) | ||
7661 | goto error; | ||
7662 | |||
7646 | if (val & PLANE_CTL_TILED_MASK) | 7663 | if (val & PLANE_CTL_TILED_MASK) |
7647 | plane_config->tiling = I915_TILING_X; | 7664 | plane_config->tiling = I915_TILING_X; |
7648 | 7665 | ||
@@ -7687,6 +7704,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, | |||
7687 | plane_config->size); | 7704 | plane_config->size); |
7688 | 7705 | ||
7689 | crtc->base.primary->fb = fb; | 7706 | crtc->base.primary->fb = fb; |
7707 | update_state_fb(crtc->base.primary); | ||
7690 | return; | 7708 | return; |
7691 | 7709 | ||
7692 | error: | 7710 | error: |
@@ -7730,6 +7748,10 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, | |||
7730 | struct drm_framebuffer *fb; | 7748 | struct drm_framebuffer *fb; |
7731 | struct intel_framebuffer *intel_fb; | 7749 | struct intel_framebuffer *intel_fb; |
7732 | 7750 | ||
7751 | val = I915_READ(DSPCNTR(pipe)); | ||
7752 | if (!(val & DISPLAY_PLANE_ENABLE)) | ||
7753 | return; | ||
7754 | |||
7733 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); | 7755 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); |
7734 | if (!intel_fb) { | 7756 | if (!intel_fb) { |
7735 | DRM_DEBUG_KMS("failed to alloc fb\n"); | 7757 | DRM_DEBUG_KMS("failed to alloc fb\n"); |
@@ -7738,8 +7760,6 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, | |||
7738 | 7760 | ||
7739 | fb = &intel_fb->base; | 7761 | fb = &intel_fb->base; |
7740 | 7762 | ||
7741 | val = I915_READ(DSPCNTR(pipe)); | ||
7742 | |||
7743 | if (INTEL_INFO(dev)->gen >= 4) | 7763 | if (INTEL_INFO(dev)->gen >= 4) |
7744 | if (val & DISPPLANE_TILED) | 7764 | if (val & DISPPLANE_TILED) |
7745 | plane_config->tiling = I915_TILING_X; | 7765 | plane_config->tiling = I915_TILING_X; |
@@ -7778,6 +7798,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, | |||
7778 | plane_config->size); | 7798 | plane_config->size); |
7779 | 7799 | ||
7780 | crtc->base.primary->fb = fb; | 7800 | crtc->base.primary->fb = fb; |
7801 | update_state_fb(crtc->base.primary); | ||
7781 | } | 7802 | } |
7782 | 7803 | ||
7783 | static bool ironlake_get_pipe_config(struct intel_crtc *crtc, | 7804 | static bool ironlake_get_pipe_config(struct intel_crtc *crtc, |
@@ -9816,6 +9837,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
9816 | drm_gem_object_reference(&obj->base); | 9837 | drm_gem_object_reference(&obj->base); |
9817 | 9838 | ||
9818 | crtc->primary->fb = fb; | 9839 | crtc->primary->fb = fb; |
9840 | update_state_fb(crtc->primary); | ||
9819 | 9841 | ||
9820 | work->pending_flip_obj = obj; | 9842 | work->pending_flip_obj = obj; |
9821 | 9843 | ||
@@ -9884,6 +9906,7 @@ cleanup_unpin: | |||
9884 | cleanup_pending: | 9906 | cleanup_pending: |
9885 | atomic_dec(&intel_crtc->unpin_work_count); | 9907 | atomic_dec(&intel_crtc->unpin_work_count); |
9886 | crtc->primary->fb = old_fb; | 9908 | crtc->primary->fb = old_fb; |
9909 | update_state_fb(crtc->primary); | ||
9887 | drm_gem_object_unreference(&work->old_fb_obj->base); | 9910 | drm_gem_object_unreference(&work->old_fb_obj->base); |
9888 | drm_gem_object_unreference(&obj->base); | 9911 | drm_gem_object_unreference(&obj->base); |
9889 | mutex_unlock(&dev->struct_mutex); | 9912 | mutex_unlock(&dev->struct_mutex); |
@@ -13718,6 +13741,7 @@ void intel_modeset_gem_init(struct drm_device *dev) | |||
13718 | to_intel_crtc(c)->pipe); | 13741 | to_intel_crtc(c)->pipe); |
13719 | drm_framebuffer_unreference(c->primary->fb); | 13742 | drm_framebuffer_unreference(c->primary->fb); |
13720 | c->primary->fb = NULL; | 13743 | c->primary->fb = NULL; |
13744 | update_state_fb(c->primary); | ||
13721 | } | 13745 | } |
13722 | } | 13746 | } |
13723 | mutex_unlock(&dev->struct_mutex); | 13747 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 29bd539af183..6efa8f38ff54 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | |||
@@ -340,11 +340,13 @@ nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
340 | 340 | ||
341 | /* switch mmio to cpu's native endianness */ | 341 | /* switch mmio to cpu's native endianness */ |
342 | #ifndef __BIG_ENDIAN | 342 | #ifndef __BIG_ENDIAN |
343 | if (ioread32_native(map + 0x000004) != 0x00000000) | 343 | if (ioread32_native(map + 0x000004) != 0x00000000) { |
344 | #else | 344 | #else |
345 | if (ioread32_native(map + 0x000004) == 0x00000000) | 345 | if (ioread32_native(map + 0x000004) == 0x00000000) { |
346 | #endif | 346 | #endif |
347 | iowrite32_native(0x01000001, map + 0x000004); | 347 | iowrite32_native(0x01000001, map + 0x000004); |
348 | ioread32_native(map); | ||
349 | } | ||
348 | 350 | ||
349 | /* read boot0 and strapping information */ | 351 | /* read boot0 and strapping information */ |
350 | boot0 = ioread32_native(map + 0x000000); | 352 | boot0 = ioread32_native(map + 0x000000); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c index 539561ed3281..108d048da764 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c | |||
@@ -142,6 +142,49 @@ gm100_identify(struct nvkm_device *device) | |||
142 | device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass; | 142 | device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass; |
143 | #endif | 143 | #endif |
144 | break; | 144 | break; |
145 | case 0x126: | ||
146 | device->cname = "GM206"; | ||
147 | device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass; | ||
148 | device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass; | ||
149 | device->oclass[NVDEV_SUBDEV_I2C ] = gm204_i2c_oclass; | ||
150 | device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass; | ||
151 | #if 0 | ||
152 | /* looks to be some non-trivial changes */ | ||
153 | device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass; | ||
154 | /* priv ring says no to 0x10eb14 writes */ | ||
155 | device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass; | ||
156 | #endif | ||
157 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | ||
158 | device->oclass[NVDEV_SUBDEV_DEVINIT] = gm204_devinit_oclass; | ||
159 | device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass; | ||
160 | device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass; | ||
161 | device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass; | ||
162 | device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass; | ||
163 | device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass; | ||
164 | device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass; | ||
165 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | ||
166 | device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass; | ||
167 | device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass; | ||
168 | device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass; | ||
169 | #if 0 | ||
170 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | ||
171 | #endif | ||
172 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass; | ||
173 | #if 0 | ||
174 | device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass; | ||
175 | device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass; | ||
176 | device->oclass[NVDEV_ENGINE_GR ] = gm107_gr_oclass; | ||
177 | #endif | ||
178 | device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass; | ||
179 | #if 0 | ||
180 | device->oclass[NVDEV_ENGINE_CE0 ] = &gm204_ce0_oclass; | ||
181 | device->oclass[NVDEV_ENGINE_CE1 ] = &gm204_ce1_oclass; | ||
182 | device->oclass[NVDEV_ENGINE_CE2 ] = &gm204_ce2_oclass; | ||
183 | device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass; | ||
184 | device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass; | ||
185 | device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass; | ||
186 | #endif | ||
187 | break; | ||
145 | default: | 188 | default: |
146 | nv_fatal(device, "unknown Maxwell chipset\n"); | 189 | nv_fatal(device, "unknown Maxwell chipset\n"); |
147 | return -EINVAL; | 190 | return -EINVAL; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c index b038b6eb51db..043e4296084c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c | |||
@@ -502,72 +502,57 @@ nv04_fifo_intr(struct nvkm_subdev *subdev) | |||
502 | { | 502 | { |
503 | struct nvkm_device *device = nv_device(subdev); | 503 | struct nvkm_device *device = nv_device(subdev); |
504 | struct nv04_fifo_priv *priv = (void *)subdev; | 504 | struct nv04_fifo_priv *priv = (void *)subdev; |
505 | uint32_t status, reassign; | 505 | u32 mask = nv_rd32(priv, NV03_PFIFO_INTR_EN_0); |
506 | int cnt = 0; | 506 | u32 stat = nv_rd32(priv, NV03_PFIFO_INTR_0) & mask; |
507 | u32 reassign, chid, get, sem; | ||
507 | 508 | ||
508 | reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1; | 509 | reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1; |
509 | while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { | 510 | nv_wr32(priv, NV03_PFIFO_CACHES, 0); |
510 | uint32_t chid, get; | ||
511 | |||
512 | nv_wr32(priv, NV03_PFIFO_CACHES, 0); | ||
513 | |||
514 | chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; | ||
515 | get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET); | ||
516 | 511 | ||
517 | if (status & NV_PFIFO_INTR_CACHE_ERROR) { | 512 | chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; |
518 | nv04_fifo_cache_error(device, priv, chid, get); | 513 | get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET); |
519 | status &= ~NV_PFIFO_INTR_CACHE_ERROR; | ||
520 | } | ||
521 | 514 | ||
522 | if (status & NV_PFIFO_INTR_DMA_PUSHER) { | 515 | if (stat & NV_PFIFO_INTR_CACHE_ERROR) { |
523 | nv04_fifo_dma_pusher(device, priv, chid); | 516 | nv04_fifo_cache_error(device, priv, chid, get); |
524 | status &= ~NV_PFIFO_INTR_DMA_PUSHER; | 517 | stat &= ~NV_PFIFO_INTR_CACHE_ERROR; |
525 | } | 518 | } |
526 | 519 | ||
527 | if (status & NV_PFIFO_INTR_SEMAPHORE) { | 520 | if (stat & NV_PFIFO_INTR_DMA_PUSHER) { |
528 | uint32_t sem; | 521 | nv04_fifo_dma_pusher(device, priv, chid); |
522 | stat &= ~NV_PFIFO_INTR_DMA_PUSHER; | ||
523 | } | ||
529 | 524 | ||
530 | status &= ~NV_PFIFO_INTR_SEMAPHORE; | 525 | if (stat & NV_PFIFO_INTR_SEMAPHORE) { |
531 | nv_wr32(priv, NV03_PFIFO_INTR_0, | 526 | stat &= ~NV_PFIFO_INTR_SEMAPHORE; |
532 | NV_PFIFO_INTR_SEMAPHORE); | 527 | nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE); |
533 | 528 | ||
534 | sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); | 529 | sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); |
535 | nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); | 530 | nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); |
536 | 531 | ||
537 | nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); | 532 | nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); |
538 | nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); | 533 | nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); |
539 | } | 534 | } |
540 | 535 | ||
541 | if (device->card_type == NV_50) { | 536 | if (device->card_type == NV_50) { |
542 | if (status & 0x00000010) { | 537 | if (stat & 0x00000010) { |
543 | status &= ~0x00000010; | 538 | stat &= ~0x00000010; |
544 | nv_wr32(priv, 0x002100, 0x00000010); | 539 | nv_wr32(priv, 0x002100, 0x00000010); |
545 | } | ||
546 | |||
547 | if (status & 0x40000000) { | ||
548 | nv_wr32(priv, 0x002100, 0x40000000); | ||
549 | nvkm_fifo_uevent(&priv->base); | ||
550 | status &= ~0x40000000; | ||
551 | } | ||
552 | } | 540 | } |
553 | 541 | ||
554 | if (status) { | 542 | if (stat & 0x40000000) { |
555 | nv_warn(priv, "unknown intr 0x%08x, ch %d\n", | 543 | nv_wr32(priv, 0x002100, 0x40000000); |
556 | status, chid); | 544 | nvkm_fifo_uevent(&priv->base); |
557 | nv_wr32(priv, NV03_PFIFO_INTR_0, status); | 545 | stat &= ~0x40000000; |
558 | status = 0; | ||
559 | } | 546 | } |
560 | |||
561 | nv_wr32(priv, NV03_PFIFO_CACHES, reassign); | ||
562 | } | 547 | } |
563 | 548 | ||
564 | if (status) { | 549 | if (stat) { |
565 | nv_error(priv, "still angry after %d spins, halt\n", cnt); | 550 | nv_warn(priv, "unknown intr 0x%08x\n", stat); |
566 | nv_wr32(priv, 0x002140, 0); | 551 | nv_mask(priv, NV03_PFIFO_INTR_EN_0, stat, 0x00000000); |
567 | nv_wr32(priv, 0x000140, 0); | 552 | nv_wr32(priv, NV03_PFIFO_INTR_0, stat); |
568 | } | 553 | } |
569 | 554 | ||
570 | nv_wr32(priv, 0x000100, 0x00000100); | 555 | nv_wr32(priv, NV03_PFIFO_CACHES, reassign); |
571 | } | 556 | } |
572 | 557 | ||
573 | static int | 558 | static int |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c index 2e7ec389eea7..57e2c5b13123 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c | |||
@@ -1032,9 +1032,9 @@ gf100_grctx_generate_bundle(struct gf100_grctx *info) | |||
1032 | const int s = 8; | 1032 | const int s = 8; |
1033 | const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); | 1033 | const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); |
1034 | mmio_refn(info, 0x408004, 0x00000000, s, b); | 1034 | mmio_refn(info, 0x408004, 0x00000000, s, b); |
1035 | mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); | 1035 | mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s)); |
1036 | mmio_refn(info, 0x418808, 0x00000000, s, b); | 1036 | mmio_refn(info, 0x418808, 0x00000000, s, b); |
1037 | mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); | 1037 | mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s)); |
1038 | } | 1038 | } |
1039 | 1039 | ||
1040 | void | 1040 | void |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c index b52300d8861a..5e9454ba158f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c | |||
@@ -851,9 +851,9 @@ gk104_grctx_generate_bundle(struct gf100_grctx *info) | |||
851 | const int s = 8; | 851 | const int s = 8; |
852 | const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); | 852 | const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); |
853 | mmio_refn(info, 0x408004, 0x00000000, s, b); | 853 | mmio_refn(info, 0x408004, 0x00000000, s, b); |
854 | mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); | 854 | mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s)); |
855 | mmio_refn(info, 0x418808, 0x00000000, s, b); | 855 | mmio_refn(info, 0x418808, 0x00000000, s, b); |
856 | mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); | 856 | mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s)); |
857 | mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); | 857 | mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); |
858 | } | 858 | } |
859 | 859 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c index 956f4dce960c..b2fae6e389e2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c | |||
@@ -871,9 +871,9 @@ gm107_grctx_generate_bundle(struct gf100_grctx *info) | |||
871 | const int s = 8; | 871 | const int s = 8; |
872 | const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); | 872 | const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); |
873 | mmio_refn(info, 0x408004, 0x00000000, s, b); | 873 | mmio_refn(info, 0x408004, 0x00000000, s, b); |
874 | mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); | 874 | mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s)); |
875 | mmio_refn(info, 0x418e24, 0x00000000, s, b); | 875 | mmio_refn(info, 0x418e24, 0x00000000, s, b); |
876 | mmio_refn(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s), 0, b); | 876 | mmio_wr32(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s)); |
877 | mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); | 877 | mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); |
878 | } | 878 | } |
879 | 879 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c index d1a89b2bd5c1..c4e1f085ee10 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c | |||
@@ -74,7 +74,11 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info) | |||
74 | u16 ent = dcb_i2c_entry(bios, idx, &ver, &len); | 74 | u16 ent = dcb_i2c_entry(bios, idx, &ver, &len); |
75 | if (ent) { | 75 | if (ent) { |
76 | if (ver >= 0x41) { | 76 | if (ver >= 0x41) { |
77 | if (!(nv_ro32(bios, ent) & 0x80000000)) | 77 | u32 ent_value = nv_ro32(bios, ent); |
78 | u8 i2c_port = (ent_value >> 27) & 0x1f; | ||
79 | u8 dpaux_port = (ent_value >> 22) & 0x1f; | ||
80 | /* value 0x1f means unused according to DCB 4.x spec */ | ||
81 | if (i2c_port == 0x1f && dpaux_port == 0x1f) | ||
78 | info->type = DCB_I2C_UNUSED; | 82 | info->type = DCB_I2C_UNUSED; |
79 | else | 83 | else |
80 | info->type = DCB_I2C_PMGR; | 84 | info->type = DCB_I2C_PMGR; |
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index 061eaa9c19c7..122eb5693ba1 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c | |||
@@ -153,7 +153,7 @@ void radeon_kfd_device_init(struct radeon_device *rdev) | |||
153 | .compute_vmid_bitmap = 0xFF00, | 153 | .compute_vmid_bitmap = 0xFF00, |
154 | 154 | ||
155 | .first_compute_pipe = 1, | 155 | .first_compute_pipe = 1, |
156 | .compute_pipe_count = 8 - 1, | 156 | .compute_pipe_count = 4 - 1, |
157 | }; | 157 | }; |
158 | 158 | ||
159 | radeon_doorbell_get_kfd_info(rdev, | 159 | radeon_doorbell_get_kfd_info(rdev, |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 43e09942823e..318165d4855c 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -173,17 +173,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) | |||
173 | else | 173 | else |
174 | rbo->placements[i].lpfn = 0; | 174 | rbo->placements[i].lpfn = 0; |
175 | } | 175 | } |
176 | |||
177 | /* | ||
178 | * Use two-ended allocation depending on the buffer size to | ||
179 | * improve fragmentation quality. | ||
180 | * 512kb was measured as the most optimal number. | ||
181 | */ | ||
182 | if (rbo->tbo.mem.size > 512 * 1024) { | ||
183 | for (i = 0; i < c; i++) { | ||
184 | rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN; | ||
185 | } | ||
186 | } | ||
187 | } | 176 | } |
188 | 177 | ||
189 | int radeon_bo_create(struct radeon_device *rdev, | 178 | int radeon_bo_create(struct radeon_device *rdev, |
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 37de0173b6d2..74adcd2c967e 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -289,9 +289,16 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, | |||
289 | struct request_queue *q = bdev_get_queue(where->bdev); | 289 | struct request_queue *q = bdev_get_queue(where->bdev); |
290 | unsigned short logical_block_size = queue_logical_block_size(q); | 290 | unsigned short logical_block_size = queue_logical_block_size(q); |
291 | sector_t num_sectors; | 291 | sector_t num_sectors; |
292 | unsigned int uninitialized_var(special_cmd_max_sectors); | ||
292 | 293 | ||
293 | /* Reject unsupported discard requests */ | 294 | /* |
294 | if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) { | 295 | * Reject unsupported discard and write same requests. |
296 | */ | ||
297 | if (rw & REQ_DISCARD) | ||
298 | special_cmd_max_sectors = q->limits.max_discard_sectors; | ||
299 | else if (rw & REQ_WRITE_SAME) | ||
300 | special_cmd_max_sectors = q->limits.max_write_same_sectors; | ||
301 | if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) { | ||
295 | dec_count(io, region, -EOPNOTSUPP); | 302 | dec_count(io, region, -EOPNOTSUPP); |
296 | return; | 303 | return; |
297 | } | 304 | } |
@@ -317,7 +324,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, | |||
317 | store_io_and_region_in_bio(bio, io, region); | 324 | store_io_and_region_in_bio(bio, io, region); |
318 | 325 | ||
319 | if (rw & REQ_DISCARD) { | 326 | if (rw & REQ_DISCARD) { |
320 | num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); | 327 | num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); |
321 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; | 328 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; |
322 | remaining -= num_sectors; | 329 | remaining -= num_sectors; |
323 | } else if (rw & REQ_WRITE_SAME) { | 330 | } else if (rw & REQ_WRITE_SAME) { |
@@ -326,7 +333,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, | |||
326 | */ | 333 | */ |
327 | dp->get_page(dp, &page, &len, &offset); | 334 | dp->get_page(dp, &page, &len, &offset); |
328 | bio_add_page(bio, page, logical_block_size, offset); | 335 | bio_add_page(bio, page, logical_block_size, offset); |
329 | num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); | 336 | num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); |
330 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; | 337 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; |
331 | 338 | ||
332 | offset = 0; | 339 | offset = 0; |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 8b204ae216ab..f83a0f3fc365 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -20,6 +20,8 @@ | |||
20 | #include <linux/log2.h> | 20 | #include <linux/log2.h> |
21 | #include <linux/dm-kcopyd.h> | 21 | #include <linux/dm-kcopyd.h> |
22 | 22 | ||
23 | #include "dm.h" | ||
24 | |||
23 | #include "dm-exception-store.h" | 25 | #include "dm-exception-store.h" |
24 | 26 | ||
25 | #define DM_MSG_PREFIX "snapshots" | 27 | #define DM_MSG_PREFIX "snapshots" |
@@ -291,12 +293,23 @@ struct origin { | |||
291 | }; | 293 | }; |
292 | 294 | ||
293 | /* | 295 | /* |
296 | * This structure is allocated for each origin target | ||
297 | */ | ||
298 | struct dm_origin { | ||
299 | struct dm_dev *dev; | ||
300 | struct dm_target *ti; | ||
301 | unsigned split_boundary; | ||
302 | struct list_head hash_list; | ||
303 | }; | ||
304 | |||
305 | /* | ||
294 | * Size of the hash table for origin volumes. If we make this | 306 | * Size of the hash table for origin volumes. If we make this |
295 | * the size of the minors list then it should be nearly perfect | 307 | * the size of the minors list then it should be nearly perfect |
296 | */ | 308 | */ |
297 | #define ORIGIN_HASH_SIZE 256 | 309 | #define ORIGIN_HASH_SIZE 256 |
298 | #define ORIGIN_MASK 0xFF | 310 | #define ORIGIN_MASK 0xFF |
299 | static struct list_head *_origins; | 311 | static struct list_head *_origins; |
312 | static struct list_head *_dm_origins; | ||
300 | static struct rw_semaphore _origins_lock; | 313 | static struct rw_semaphore _origins_lock; |
301 | 314 | ||
302 | static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); | 315 | static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); |
@@ -310,12 +323,22 @@ static int init_origin_hash(void) | |||
310 | _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), | 323 | _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), |
311 | GFP_KERNEL); | 324 | GFP_KERNEL); |
312 | if (!_origins) { | 325 | if (!_origins) { |
313 | DMERR("unable to allocate memory"); | 326 | DMERR("unable to allocate memory for _origins"); |
314 | return -ENOMEM; | 327 | return -ENOMEM; |
315 | } | 328 | } |
316 | |||
317 | for (i = 0; i < ORIGIN_HASH_SIZE; i++) | 329 | for (i = 0; i < ORIGIN_HASH_SIZE; i++) |
318 | INIT_LIST_HEAD(_origins + i); | 330 | INIT_LIST_HEAD(_origins + i); |
331 | |||
332 | _dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), | ||
333 | GFP_KERNEL); | ||
334 | if (!_dm_origins) { | ||
335 | DMERR("unable to allocate memory for _dm_origins"); | ||
336 | kfree(_origins); | ||
337 | return -ENOMEM; | ||
338 | } | ||
339 | for (i = 0; i < ORIGIN_HASH_SIZE; i++) | ||
340 | INIT_LIST_HEAD(_dm_origins + i); | ||
341 | |||
319 | init_rwsem(&_origins_lock); | 342 | init_rwsem(&_origins_lock); |
320 | 343 | ||
321 | return 0; | 344 | return 0; |
@@ -324,6 +347,7 @@ static int init_origin_hash(void) | |||
324 | static void exit_origin_hash(void) | 347 | static void exit_origin_hash(void) |
325 | { | 348 | { |
326 | kfree(_origins); | 349 | kfree(_origins); |
350 | kfree(_dm_origins); | ||
327 | } | 351 | } |
328 | 352 | ||
329 | static unsigned origin_hash(struct block_device *bdev) | 353 | static unsigned origin_hash(struct block_device *bdev) |
@@ -350,6 +374,30 @@ static void __insert_origin(struct origin *o) | |||
350 | list_add_tail(&o->hash_list, sl); | 374 | list_add_tail(&o->hash_list, sl); |
351 | } | 375 | } |
352 | 376 | ||
377 | static struct dm_origin *__lookup_dm_origin(struct block_device *origin) | ||
378 | { | ||
379 | struct list_head *ol; | ||
380 | struct dm_origin *o; | ||
381 | |||
382 | ol = &_dm_origins[origin_hash(origin)]; | ||
383 | list_for_each_entry (o, ol, hash_list) | ||
384 | if (bdev_equal(o->dev->bdev, origin)) | ||
385 | return o; | ||
386 | |||
387 | return NULL; | ||
388 | } | ||
389 | |||
390 | static void __insert_dm_origin(struct dm_origin *o) | ||
391 | { | ||
392 | struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)]; | ||
393 | list_add_tail(&o->hash_list, sl); | ||
394 | } | ||
395 | |||
396 | static void __remove_dm_origin(struct dm_origin *o) | ||
397 | { | ||
398 | list_del(&o->hash_list); | ||
399 | } | ||
400 | |||
353 | /* | 401 | /* |
354 | * _origins_lock must be held when calling this function. | 402 | * _origins_lock must be held when calling this function. |
355 | * Returns number of snapshots registered using the supplied cow device, plus: | 403 | * Returns number of snapshots registered using the supplied cow device, plus: |
@@ -1840,9 +1888,40 @@ static int snapshot_preresume(struct dm_target *ti) | |||
1840 | static void snapshot_resume(struct dm_target *ti) | 1888 | static void snapshot_resume(struct dm_target *ti) |
1841 | { | 1889 | { |
1842 | struct dm_snapshot *s = ti->private; | 1890 | struct dm_snapshot *s = ti->private; |
1843 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; | 1891 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL; |
1892 | struct dm_origin *o; | ||
1893 | struct mapped_device *origin_md = NULL; | ||
1894 | bool must_restart_merging = false; | ||
1844 | 1895 | ||
1845 | down_read(&_origins_lock); | 1896 | down_read(&_origins_lock); |
1897 | |||
1898 | o = __lookup_dm_origin(s->origin->bdev); | ||
1899 | if (o) | ||
1900 | origin_md = dm_table_get_md(o->ti->table); | ||
1901 | if (!origin_md) { | ||
1902 | (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging); | ||
1903 | if (snap_merging) | ||
1904 | origin_md = dm_table_get_md(snap_merging->ti->table); | ||
1905 | } | ||
1906 | if (origin_md == dm_table_get_md(ti->table)) | ||
1907 | origin_md = NULL; | ||
1908 | if (origin_md) { | ||
1909 | if (dm_hold(origin_md)) | ||
1910 | origin_md = NULL; | ||
1911 | } | ||
1912 | |||
1913 | up_read(&_origins_lock); | ||
1914 | |||
1915 | if (origin_md) { | ||
1916 | dm_internal_suspend_fast(origin_md); | ||
1917 | if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) { | ||
1918 | must_restart_merging = true; | ||
1919 | stop_merge(snap_merging); | ||
1920 | } | ||
1921 | } | ||
1922 | |||
1923 | down_read(&_origins_lock); | ||
1924 | |||
1846 | (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); | 1925 | (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); |
1847 | if (snap_src && snap_dest) { | 1926 | if (snap_src && snap_dest) { |
1848 | down_write(&snap_src->lock); | 1927 | down_write(&snap_src->lock); |
@@ -1851,8 +1930,16 @@ static void snapshot_resume(struct dm_target *ti) | |||
1851 | up_write(&snap_dest->lock); | 1930 | up_write(&snap_dest->lock); |
1852 | up_write(&snap_src->lock); | 1931 | up_write(&snap_src->lock); |
1853 | } | 1932 | } |
1933 | |||
1854 | up_read(&_origins_lock); | 1934 | up_read(&_origins_lock); |
1855 | 1935 | ||
1936 | if (origin_md) { | ||
1937 | if (must_restart_merging) | ||
1938 | start_merge(snap_merging); | ||
1939 | dm_internal_resume_fast(origin_md); | ||
1940 | dm_put(origin_md); | ||
1941 | } | ||
1942 | |||
1856 | /* Now we have correct chunk size, reregister */ | 1943 | /* Now we have correct chunk size, reregister */ |
1857 | reregister_snapshot(s); | 1944 | reregister_snapshot(s); |
1858 | 1945 | ||
@@ -2133,11 +2220,6 @@ static int origin_write_extent(struct dm_snapshot *merging_snap, | |||
2133 | * Origin: maps a linear range of a device, with hooks for snapshotting. | 2220 | * Origin: maps a linear range of a device, with hooks for snapshotting. |
2134 | */ | 2221 | */ |
2135 | 2222 | ||
2136 | struct dm_origin { | ||
2137 | struct dm_dev *dev; | ||
2138 | unsigned split_boundary; | ||
2139 | }; | ||
2140 | |||
2141 | /* | 2223 | /* |
2142 | * Construct an origin mapping: <dev_path> | 2224 | * Construct an origin mapping: <dev_path> |
2143 | * The context for an origin is merely a 'struct dm_dev *' | 2225 | * The context for an origin is merely a 'struct dm_dev *' |
@@ -2166,6 +2248,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
2166 | goto bad_open; | 2248 | goto bad_open; |
2167 | } | 2249 | } |
2168 | 2250 | ||
2251 | o->ti = ti; | ||
2169 | ti->private = o; | 2252 | ti->private = o; |
2170 | ti->num_flush_bios = 1; | 2253 | ti->num_flush_bios = 1; |
2171 | 2254 | ||
@@ -2180,6 +2263,7 @@ bad_alloc: | |||
2180 | static void origin_dtr(struct dm_target *ti) | 2263 | static void origin_dtr(struct dm_target *ti) |
2181 | { | 2264 | { |
2182 | struct dm_origin *o = ti->private; | 2265 | struct dm_origin *o = ti->private; |
2266 | |||
2183 | dm_put_device(ti, o->dev); | 2267 | dm_put_device(ti, o->dev); |
2184 | kfree(o); | 2268 | kfree(o); |
2185 | } | 2269 | } |
@@ -2216,6 +2300,19 @@ static void origin_resume(struct dm_target *ti) | |||
2216 | struct dm_origin *o = ti->private; | 2300 | struct dm_origin *o = ti->private; |
2217 | 2301 | ||
2218 | o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev); | 2302 | o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev); |
2303 | |||
2304 | down_write(&_origins_lock); | ||
2305 | __insert_dm_origin(o); | ||
2306 | up_write(&_origins_lock); | ||
2307 | } | ||
2308 | |||
2309 | static void origin_postsuspend(struct dm_target *ti) | ||
2310 | { | ||
2311 | struct dm_origin *o = ti->private; | ||
2312 | |||
2313 | down_write(&_origins_lock); | ||
2314 | __remove_dm_origin(o); | ||
2315 | up_write(&_origins_lock); | ||
2219 | } | 2316 | } |
2220 | 2317 | ||
2221 | static void origin_status(struct dm_target *ti, status_type_t type, | 2318 | static void origin_status(struct dm_target *ti, status_type_t type, |
@@ -2258,12 +2355,13 @@ static int origin_iterate_devices(struct dm_target *ti, | |||
2258 | 2355 | ||
2259 | static struct target_type origin_target = { | 2356 | static struct target_type origin_target = { |
2260 | .name = "snapshot-origin", | 2357 | .name = "snapshot-origin", |
2261 | .version = {1, 8, 1}, | 2358 | .version = {1, 9, 0}, |
2262 | .module = THIS_MODULE, | 2359 | .module = THIS_MODULE, |
2263 | .ctr = origin_ctr, | 2360 | .ctr = origin_ctr, |
2264 | .dtr = origin_dtr, | 2361 | .dtr = origin_dtr, |
2265 | .map = origin_map, | 2362 | .map = origin_map, |
2266 | .resume = origin_resume, | 2363 | .resume = origin_resume, |
2364 | .postsuspend = origin_postsuspend, | ||
2267 | .status = origin_status, | 2365 | .status = origin_status, |
2268 | .merge = origin_merge, | 2366 | .merge = origin_merge, |
2269 | .iterate_devices = origin_iterate_devices, | 2367 | .iterate_devices = origin_iterate_devices, |
@@ -2271,7 +2369,7 @@ static struct target_type origin_target = { | |||
2271 | 2369 | ||
2272 | static struct target_type snapshot_target = { | 2370 | static struct target_type snapshot_target = { |
2273 | .name = "snapshot", | 2371 | .name = "snapshot", |
2274 | .version = {1, 12, 0}, | 2372 | .version = {1, 13, 0}, |
2275 | .module = THIS_MODULE, | 2373 | .module = THIS_MODULE, |
2276 | .ctr = snapshot_ctr, | 2374 | .ctr = snapshot_ctr, |
2277 | .dtr = snapshot_dtr, | 2375 | .dtr = snapshot_dtr, |
@@ -2285,7 +2383,7 @@ static struct target_type snapshot_target = { | |||
2285 | 2383 | ||
2286 | static struct target_type merge_target = { | 2384 | static struct target_type merge_target = { |
2287 | .name = dm_snapshot_merge_target_name, | 2385 | .name = dm_snapshot_merge_target_name, |
2288 | .version = {1, 2, 0}, | 2386 | .version = {1, 3, 0}, |
2289 | .module = THIS_MODULE, | 2387 | .module = THIS_MODULE, |
2290 | .ctr = snapshot_ctr, | 2388 | .ctr = snapshot_ctr, |
2291 | .dtr = snapshot_dtr, | 2389 | .dtr = snapshot_dtr, |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 654773cb1eee..921aafd12aee 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -2358,17 +2358,6 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) | |||
2358 | return DM_MAPIO_REMAPPED; | 2358 | return DM_MAPIO_REMAPPED; |
2359 | 2359 | ||
2360 | case -ENODATA: | 2360 | case -ENODATA: |
2361 | if (get_pool_mode(tc->pool) == PM_READ_ONLY) { | ||
2362 | /* | ||
2363 | * This block isn't provisioned, and we have no way | ||
2364 | * of doing so. | ||
2365 | */ | ||
2366 | handle_unserviceable_bio(tc->pool, bio); | ||
2367 | cell_defer_no_holder(tc, virt_cell); | ||
2368 | return DM_MAPIO_SUBMITTED; | ||
2369 | } | ||
2370 | /* fall through */ | ||
2371 | |||
2372 | case -EWOULDBLOCK: | 2361 | case -EWOULDBLOCK: |
2373 | thin_defer_cell(tc, virt_cell); | 2362 | thin_defer_cell(tc, virt_cell); |
2374 | return DM_MAPIO_SUBMITTED; | 2363 | return DM_MAPIO_SUBMITTED; |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 73f28802dc7a..9b641b38b857 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -2616,6 +2616,19 @@ void dm_get(struct mapped_device *md) | |||
2616 | BUG_ON(test_bit(DMF_FREEING, &md->flags)); | 2616 | BUG_ON(test_bit(DMF_FREEING, &md->flags)); |
2617 | } | 2617 | } |
2618 | 2618 | ||
2619 | int dm_hold(struct mapped_device *md) | ||
2620 | { | ||
2621 | spin_lock(&_minor_lock); | ||
2622 | if (test_bit(DMF_FREEING, &md->flags)) { | ||
2623 | spin_unlock(&_minor_lock); | ||
2624 | return -EBUSY; | ||
2625 | } | ||
2626 | dm_get(md); | ||
2627 | spin_unlock(&_minor_lock); | ||
2628 | return 0; | ||
2629 | } | ||
2630 | EXPORT_SYMBOL_GPL(dm_hold); | ||
2631 | |||
2619 | const char *dm_device_name(struct mapped_device *md) | 2632 | const char *dm_device_name(struct mapped_device *md) |
2620 | { | 2633 | { |
2621 | return md->name; | 2634 | return md->name; |
@@ -2638,10 +2651,16 @@ static void __dm_destroy(struct mapped_device *md, bool wait) | |||
2638 | if (dm_request_based(md)) | 2651 | if (dm_request_based(md)) |
2639 | flush_kthread_worker(&md->kworker); | 2652 | flush_kthread_worker(&md->kworker); |
2640 | 2653 | ||
2654 | /* | ||
2655 | * Take suspend_lock so that presuspend and postsuspend methods | ||
2656 | * do not race with internal suspend. | ||
2657 | */ | ||
2658 | mutex_lock(&md->suspend_lock); | ||
2641 | if (!dm_suspended_md(md)) { | 2659 | if (!dm_suspended_md(md)) { |
2642 | dm_table_presuspend_targets(map); | 2660 | dm_table_presuspend_targets(map); |
2643 | dm_table_postsuspend_targets(map); | 2661 | dm_table_postsuspend_targets(map); |
2644 | } | 2662 | } |
2663 | mutex_unlock(&md->suspend_lock); | ||
2645 | 2664 | ||
2646 | /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ | 2665 | /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ |
2647 | dm_put_live_table(md, srcu_idx); | 2666 | dm_put_live_table(md, srcu_idx); |
@@ -3115,6 +3134,7 @@ void dm_internal_suspend_fast(struct mapped_device *md) | |||
3115 | flush_workqueue(md->wq); | 3134 | flush_workqueue(md->wq); |
3116 | dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); | 3135 | dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); |
3117 | } | 3136 | } |
3137 | EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); | ||
3118 | 3138 | ||
3119 | void dm_internal_resume_fast(struct mapped_device *md) | 3139 | void dm_internal_resume_fast(struct mapped_device *md) |
3120 | { | 3140 | { |
@@ -3126,6 +3146,7 @@ void dm_internal_resume_fast(struct mapped_device *md) | |||
3126 | done: | 3146 | done: |
3127 | mutex_unlock(&md->suspend_lock); | 3147 | mutex_unlock(&md->suspend_lock); |
3128 | } | 3148 | } |
3149 | EXPORT_SYMBOL_GPL(dm_internal_resume_fast); | ||
3129 | 3150 | ||
3130 | /*----------------------------------------------------------------- | 3151 | /*----------------------------------------------------------------- |
3131 | * Event notification. | 3152 | * Event notification. |
diff --git a/drivers/md/md.c b/drivers/md/md.c index cadf9cc02b25..717daad71fb1 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -5080,7 +5080,8 @@ int md_run(struct mddev *mddev) | |||
5080 | } | 5080 | } |
5081 | if (err) { | 5081 | if (err) { |
5082 | mddev_detach(mddev); | 5082 | mddev_detach(mddev); |
5083 | pers->free(mddev, mddev->private); | 5083 | if (mddev->private) |
5084 | pers->free(mddev, mddev->private); | ||
5084 | module_put(pers->owner); | 5085 | module_put(pers->owner); |
5085 | bitmap_destroy(mddev); | 5086 | bitmap_destroy(mddev); |
5086 | return err; | 5087 | return err; |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index a13f738a7b39..3ed9f42ddca6 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -467,8 +467,6 @@ static int raid0_run(struct mddev *mddev) | |||
467 | dump_zones(mddev); | 467 | dump_zones(mddev); |
468 | 468 | ||
469 | ret = md_integrity_register(mddev); | 469 | ret = md_integrity_register(mddev); |
470 | if (ret) | ||
471 | raid0_free(mddev, conf); | ||
472 | 470 | ||
473 | return ret; | 471 | return ret; |
474 | } | 472 | } |
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index da4c79259f67..16e34b37d134 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c | |||
@@ -425,9 +425,10 @@ retry: | |||
425 | ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d", | 425 | ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d", |
426 | pnum, vol_id, lnum); | 426 | pnum, vol_id, lnum); |
427 | err = -EBADMSG; | 427 | err = -EBADMSG; |
428 | } else | 428 | } else { |
429 | err = -EINVAL; | 429 | err = -EINVAL; |
430 | ubi_ro_mode(ubi); | 430 | ubi_ro_mode(ubi); |
431 | } | ||
431 | } | 432 | } |
432 | goto out_free; | 433 | goto out_free; |
433 | } else if (err == UBI_IO_BITFLIPS) | 434 | } else if (err == UBI_IO_BITFLIPS) |
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c index fe48f4c51373..1762ad3910b2 100644 --- a/drivers/net/usb/cx82310_eth.c +++ b/drivers/net/usb/cx82310_eth.c | |||
@@ -46,8 +46,7 @@ enum cx82310_status { | |||
46 | }; | 46 | }; |
47 | 47 | ||
48 | #define CMD_PACKET_SIZE 64 | 48 | #define CMD_PACKET_SIZE 64 |
49 | /* first command after power on can take around 8 seconds */ | 49 | #define CMD_TIMEOUT 100 |
50 | #define CMD_TIMEOUT 15000 | ||
51 | #define CMD_REPLY_RETRY 5 | 50 | #define CMD_REPLY_RETRY 5 |
52 | 51 | ||
53 | #define CX82310_MTU 1514 | 52 | #define CX82310_MTU 1514 |
@@ -78,8 +77,9 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply, | |||
78 | ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf, | 77 | ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf, |
79 | CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT); | 78 | CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT); |
80 | if (ret < 0) { | 79 | if (ret < 0) { |
81 | dev_err(&dev->udev->dev, "send command %#x: error %d\n", | 80 | if (cmd != CMD_GET_LINK_STATUS) |
82 | cmd, ret); | 81 | dev_err(&dev->udev->dev, "send command %#x: error %d\n", |
82 | cmd, ret); | ||
83 | goto end; | 83 | goto end; |
84 | } | 84 | } |
85 | 85 | ||
@@ -90,8 +90,10 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply, | |||
90 | buf, CMD_PACKET_SIZE, &actual_len, | 90 | buf, CMD_PACKET_SIZE, &actual_len, |
91 | CMD_TIMEOUT); | 91 | CMD_TIMEOUT); |
92 | if (ret < 0) { | 92 | if (ret < 0) { |
93 | dev_err(&dev->udev->dev, | 93 | if (cmd != CMD_GET_LINK_STATUS) |
94 | "reply receive error %d\n", ret); | 94 | dev_err(&dev->udev->dev, |
95 | "reply receive error %d\n", | ||
96 | ret); | ||
95 | goto end; | 97 | goto end; |
96 | } | 98 | } |
97 | if (actual_len > 0) | 99 | if (actual_len > 0) |
@@ -134,6 +136,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf) | |||
134 | int ret; | 136 | int ret; |
135 | char buf[15]; | 137 | char buf[15]; |
136 | struct usb_device *udev = dev->udev; | 138 | struct usb_device *udev = dev->udev; |
139 | u8 link[3]; | ||
140 | int timeout = 50; | ||
137 | 141 | ||
138 | /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */ | 142 | /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */ |
139 | if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0 | 143 | if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0 |
@@ -160,6 +164,20 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf) | |||
160 | if (!dev->partial_data) | 164 | if (!dev->partial_data) |
161 | return -ENOMEM; | 165 | return -ENOMEM; |
162 | 166 | ||
167 | /* wait for firmware to become ready (indicated by the link being up) */ | ||
168 | while (--timeout) { | ||
169 | ret = cx82310_cmd(dev, CMD_GET_LINK_STATUS, true, NULL, 0, | ||
170 | link, sizeof(link)); | ||
171 | /* the command can time out during boot - it's not an error */ | ||
172 | if (!ret && link[0] == 1 && link[2] == 1) | ||
173 | break; | ||
174 | msleep(500); | ||
175 | }; | ||
176 | if (!timeout) { | ||
177 | dev_err(&udev->dev, "firmware not ready in time\n"); | ||
178 | return -ETIMEDOUT; | ||
179 | } | ||
180 | |||
163 | /* enable ethernet mode (?) */ | 181 | /* enable ethernet mode (?) */ |
164 | ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0); | 182 | ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0); |
165 | if (ret) { | 183 | if (ret) { |
diff --git a/drivers/of/base.c b/drivers/of/base.c index adb8764861c0..8f165b112e03 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
@@ -715,13 +715,8 @@ static struct device_node *__of_find_node_by_path(struct device_node *parent, | |||
715 | { | 715 | { |
716 | struct device_node *child; | 716 | struct device_node *child; |
717 | int len; | 717 | int len; |
718 | const char *end; | ||
719 | 718 | ||
720 | end = strchr(path, ':'); | 719 | len = strcspn(path, "/:"); |
721 | if (!end) | ||
722 | end = strchrnul(path, '/'); | ||
723 | |||
724 | len = end - path; | ||
725 | if (!len) | 720 | if (!len) |
726 | return NULL; | 721 | return NULL; |
727 | 722 | ||
@@ -1893,10 +1888,8 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) | |||
1893 | name = of_get_property(of_chosen, "linux,stdout-path", NULL); | 1888 | name = of_get_property(of_chosen, "linux,stdout-path", NULL); |
1894 | if (IS_ENABLED(CONFIG_PPC) && !name) | 1889 | if (IS_ENABLED(CONFIG_PPC) && !name) |
1895 | name = of_get_property(of_aliases, "stdout", NULL); | 1890 | name = of_get_property(of_aliases, "stdout", NULL); |
1896 | if (name) { | 1891 | if (name) |
1897 | of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); | 1892 | of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); |
1898 | add_preferred_console("stdout-path", 0, NULL); | ||
1899 | } | ||
1900 | } | 1893 | } |
1901 | 1894 | ||
1902 | if (!of_aliases) | 1895 | if (!of_aliases) |
diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 0d7765807f49..1a7980692f25 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c | |||
@@ -290,7 +290,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar | |||
290 | struct device_node *p; | 290 | struct device_node *p; |
291 | const __be32 *intspec, *tmp, *addr; | 291 | const __be32 *intspec, *tmp, *addr; |
292 | u32 intsize, intlen; | 292 | u32 intsize, intlen; |
293 | int i, res = -EINVAL; | 293 | int i, res; |
294 | 294 | ||
295 | pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index); | 295 | pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index); |
296 | 296 | ||
@@ -323,15 +323,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar | |||
323 | 323 | ||
324 | /* Get size of interrupt specifier */ | 324 | /* Get size of interrupt specifier */ |
325 | tmp = of_get_property(p, "#interrupt-cells", NULL); | 325 | tmp = of_get_property(p, "#interrupt-cells", NULL); |
326 | if (tmp == NULL) | 326 | if (tmp == NULL) { |
327 | res = -EINVAL; | ||
327 | goto out; | 328 | goto out; |
329 | } | ||
328 | intsize = be32_to_cpu(*tmp); | 330 | intsize = be32_to_cpu(*tmp); |
329 | 331 | ||
330 | pr_debug(" intsize=%d intlen=%d\n", intsize, intlen); | 332 | pr_debug(" intsize=%d intlen=%d\n", intsize, intlen); |
331 | 333 | ||
332 | /* Check index */ | 334 | /* Check index */ |
333 | if ((index + 1) * intsize > intlen) | 335 | if ((index + 1) * intsize > intlen) { |
336 | res = -EINVAL; | ||
334 | goto out; | 337 | goto out; |
338 | } | ||
335 | 339 | ||
336 | /* Copy intspec into irq structure */ | 340 | /* Copy intspec into irq structure */ |
337 | intspec += index * intsize; | 341 | intspec += index * intsize; |
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index aba8946cac46..52c45c7df07f 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c | |||
@@ -97,6 +97,11 @@ static void __init of_selftest_find_node_by_name(void) | |||
97 | "option path test, subcase #1 failed\n"); | 97 | "option path test, subcase #1 failed\n"); |
98 | of_node_put(np); | 98 | of_node_put(np); |
99 | 99 | ||
100 | np = of_find_node_opts_by_path("/testcase-data/testcase-device1:test/option", &options); | ||
101 | selftest(np && !strcmp("test/option", options), | ||
102 | "option path test, subcase #2 failed\n"); | ||
103 | of_node_put(np); | ||
104 | |||
100 | np = of_find_node_opts_by_path("/testcase-data:testoption", NULL); | 105 | np = of_find_node_opts_by_path("/testcase-data:testoption", NULL); |
101 | selftest(np, "NULL option path test failed\n"); | 106 | selftest(np, "NULL option path test failed\n"); |
102 | of_node_put(np); | 107 | of_node_put(np); |
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig index 3bb49252a098..45f67c63d385 100644 --- a/drivers/pcmcia/Kconfig +++ b/drivers/pcmcia/Kconfig | |||
@@ -69,8 +69,7 @@ config YENTA | |||
69 | tristate "CardBus yenta-compatible bridge support" | 69 | tristate "CardBus yenta-compatible bridge support" |
70 | depends on PCI | 70 | depends on PCI |
71 | select CARDBUS if !EXPERT | 71 | select CARDBUS if !EXPERT |
72 | select PCCARD_NONSTATIC if PCMCIA != n && ISA | 72 | select PCCARD_NONSTATIC if PCMCIA != n |
73 | select PCCARD_PCI if PCMCIA !=n && !ISA | ||
74 | ---help--- | 73 | ---help--- |
75 | This option enables support for CardBus host bridges. Virtually | 74 | This option enables support for CardBus host bridges. Virtually |
76 | all modern PCMCIA bridges are CardBus compatible. A "bridge" is | 75 | all modern PCMCIA bridges are CardBus compatible. A "bridge" is |
@@ -110,8 +109,7 @@ config YENTA_TOSHIBA | |||
110 | config PD6729 | 109 | config PD6729 |
111 | tristate "Cirrus PD6729 compatible bridge support" | 110 | tristate "Cirrus PD6729 compatible bridge support" |
112 | depends on PCMCIA && PCI | 111 | depends on PCMCIA && PCI |
113 | select PCCARD_NONSTATIC if PCMCIA != n && ISA | 112 | select PCCARD_NONSTATIC |
114 | select PCCARD_PCI if PCMCIA !=n && !ISA | ||
115 | help | 113 | help |
116 | This provides support for the Cirrus PD6729 PCI-to-PCMCIA bridge | 114 | This provides support for the Cirrus PD6729 PCI-to-PCMCIA bridge |
117 | device, found in some older laptops and PCMCIA card readers. | 115 | device, found in some older laptops and PCMCIA card readers. |
@@ -119,8 +117,7 @@ config PD6729 | |||
119 | config I82092 | 117 | config I82092 |
120 | tristate "i82092 compatible bridge support" | 118 | tristate "i82092 compatible bridge support" |
121 | depends on PCMCIA && PCI | 119 | depends on PCMCIA && PCI |
122 | select PCCARD_NONSTATIC if PCMCIA != n && ISA | 120 | select PCCARD_NONSTATIC |
123 | select PCCARD_PCI if PCMCIA !=n && !ISA | ||
124 | help | 121 | help |
125 | This provides support for the Intel I82092AA PCI-to-PCMCIA bridge device, | 122 | This provides support for the Intel I82092AA PCI-to-PCMCIA bridge device, |
126 | found in some older laptops and more commonly in evaluation boards for the | 123 | found in some older laptops and more commonly in evaluation boards for the |
@@ -291,9 +288,6 @@ config ELECTRA_CF | |||
291 | Say Y here to support the CompactFlash controller on the | 288 | Say Y here to support the CompactFlash controller on the |
292 | PA Semi Electra eval board. | 289 | PA Semi Electra eval board. |
293 | 290 | ||
294 | config PCCARD_PCI | ||
295 | bool | ||
296 | |||
297 | config PCCARD_NONSTATIC | 291 | config PCCARD_NONSTATIC |
298 | bool | 292 | bool |
299 | 293 | ||
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile index f1a7ca04d89e..27e94b30cf96 100644 --- a/drivers/pcmcia/Makefile +++ b/drivers/pcmcia/Makefile | |||
@@ -12,7 +12,6 @@ obj-$(CONFIG_PCMCIA) += pcmcia.o | |||
12 | pcmcia_rsrc-y += rsrc_mgr.o | 12 | pcmcia_rsrc-y += rsrc_mgr.o |
13 | pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o | 13 | pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o |
14 | pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o | 14 | pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o |
15 | pcmcia_rsrc-$(CONFIG_PCCARD_PCI) += rsrc_pci.o | ||
16 | obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o | 15 | obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o |
17 | 16 | ||
18 | 17 | ||
diff --git a/drivers/pcmcia/rsrc_pci.c b/drivers/pcmcia/rsrc_pci.c deleted file mode 100644 index 1f67b3ba70fb..000000000000 --- a/drivers/pcmcia/rsrc_pci.c +++ /dev/null | |||
@@ -1,173 +0,0 @@ | |||
1 | #include <linux/slab.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/kernel.h> | ||
4 | #include <linux/pci.h> | ||
5 | |||
6 | #include <pcmcia/ss.h> | ||
7 | #include <pcmcia/cistpl.h> | ||
8 | #include "cs_internal.h" | ||
9 | |||
10 | |||
11 | struct pcmcia_align_data { | ||
12 | unsigned long mask; | ||
13 | unsigned long offset; | ||
14 | }; | ||
15 | |||
16 | static resource_size_t pcmcia_align(void *align_data, | ||
17 | const struct resource *res, | ||
18 | resource_size_t size, resource_size_t align) | ||
19 | { | ||
20 | struct pcmcia_align_data *data = align_data; | ||
21 | resource_size_t start; | ||
22 | |||
23 | start = (res->start & ~data->mask) + data->offset; | ||
24 | if (start < res->start) | ||
25 | start += data->mask + 1; | ||
26 | return start; | ||
27 | } | ||
28 | |||
29 | static struct resource *find_io_region(struct pcmcia_socket *s, | ||
30 | unsigned long base, int num, | ||
31 | unsigned long align) | ||
32 | { | ||
33 | struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO, | ||
34 | dev_name(&s->dev)); | ||
35 | struct pcmcia_align_data data; | ||
36 | int ret; | ||
37 | |||
38 | data.mask = align - 1; | ||
39 | data.offset = base & data.mask; | ||
40 | |||
41 | ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1, | ||
42 | base, 0, pcmcia_align, &data); | ||
43 | if (ret != 0) { | ||
44 | kfree(res); | ||
45 | res = NULL; | ||
46 | } | ||
47 | return res; | ||
48 | } | ||
49 | |||
50 | static int res_pci_find_io(struct pcmcia_socket *s, unsigned int attr, | ||
51 | unsigned int *base, unsigned int num, | ||
52 | unsigned int align, struct resource **parent) | ||
53 | { | ||
54 | int i, ret = 0; | ||
55 | |||
56 | /* Check for an already-allocated window that must conflict with | ||
57 | * what was asked for. It is a hack because it does not catch all | ||
58 | * potential conflicts, just the most obvious ones. | ||
59 | */ | ||
60 | for (i = 0; i < MAX_IO_WIN; i++) { | ||
61 | if (!s->io[i].res) | ||
62 | continue; | ||
63 | |||
64 | if (!*base) | ||
65 | continue; | ||
66 | |||
67 | if ((s->io[i].res->start & (align-1)) == *base) | ||
68 | return -EBUSY; | ||
69 | } | ||
70 | |||
71 | for (i = 0; i < MAX_IO_WIN; i++) { | ||
72 | struct resource *res = s->io[i].res; | ||
73 | unsigned int try; | ||
74 | |||
75 | if (res && (res->flags & IORESOURCE_BITS) != | ||
76 | (attr & IORESOURCE_BITS)) | ||
77 | continue; | ||
78 | |||
79 | if (!res) { | ||
80 | if (align == 0) | ||
81 | align = 0x10000; | ||
82 | |||
83 | res = s->io[i].res = find_io_region(s, *base, num, | ||
84 | align); | ||
85 | if (!res) | ||
86 | return -EINVAL; | ||
87 | |||
88 | *base = res->start; | ||
89 | s->io[i].res->flags = | ||
90 | ((res->flags & ~IORESOURCE_BITS) | | ||
91 | (attr & IORESOURCE_BITS)); | ||
92 | s->io[i].InUse = num; | ||
93 | *parent = res; | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | /* Try to extend top of window */ | ||
98 | try = res->end + 1; | ||
99 | if ((*base == 0) || (*base == try)) { | ||
100 | ret = adjust_resource(s->io[i].res, res->start, | ||
101 | resource_size(res) + num); | ||
102 | if (ret) | ||
103 | continue; | ||
104 | *base = try; | ||
105 | s->io[i].InUse += num; | ||
106 | *parent = res; | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | /* Try to extend bottom of window */ | ||
111 | try = res->start - num; | ||
112 | if ((*base == 0) || (*base == try)) { | ||
113 | ret = adjust_resource(s->io[i].res, | ||
114 | res->start - num, | ||
115 | resource_size(res) + num); | ||
116 | if (ret) | ||
117 | continue; | ||
118 | *base = try; | ||
119 | s->io[i].InUse += num; | ||
120 | *parent = res; | ||
121 | return 0; | ||
122 | } | ||
123 | } | ||
124 | return -EINVAL; | ||
125 | } | ||
126 | |||
127 | static struct resource *res_pci_find_mem(u_long base, u_long num, | ||
128 | u_long align, int low, struct pcmcia_socket *s) | ||
129 | { | ||
130 | struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_MEM, | ||
131 | dev_name(&s->dev)); | ||
132 | struct pcmcia_align_data data; | ||
133 | unsigned long min; | ||
134 | int ret; | ||
135 | |||
136 | if (align < 0x20000) | ||
137 | align = 0x20000; | ||
138 | data.mask = align - 1; | ||
139 | data.offset = base & data.mask; | ||
140 | |||
141 | min = 0; | ||
142 | if (!low) | ||
143 | min = 0x100000UL; | ||
144 | |||
145 | ret = pci_bus_alloc_resource(s->cb_dev->bus, | ||
146 | res, num, 1, min, 0, | ||
147 | pcmcia_align, &data); | ||
148 | |||
149 | if (ret != 0) { | ||
150 | kfree(res); | ||
151 | res = NULL; | ||
152 | } | ||
153 | return res; | ||
154 | } | ||
155 | |||
156 | |||
157 | static int res_pci_init(struct pcmcia_socket *s) | ||
158 | { | ||
159 | if (!s->cb_dev || !(s->features & SS_CAP_PAGE_REGS)) { | ||
160 | dev_err(&s->dev, "not supported by res_pci\n"); | ||
161 | return -EOPNOTSUPP; | ||
162 | } | ||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | struct pccard_resource_ops pccard_nonstatic_ops = { | ||
167 | .validate_mem = NULL, | ||
168 | .find_io = res_pci_find_io, | ||
169 | .find_mem = res_pci_find_mem, | ||
170 | .init = res_pci_init, | ||
171 | .exit = NULL, | ||
172 | }; | ||
173 | EXPORT_SYMBOL(pccard_nonstatic_ops); | ||
diff --git a/drivers/phy/phy-armada375-usb2.c b/drivers/phy/phy-armada375-usb2.c index 7c99ca256f05..8ccc3952c13d 100644 --- a/drivers/phy/phy-armada375-usb2.c +++ b/drivers/phy/phy-armada375-usb2.c | |||
@@ -37,7 +37,7 @@ static int armada375_usb_phy_init(struct phy *phy) | |||
37 | struct armada375_cluster_phy *cluster_phy; | 37 | struct armada375_cluster_phy *cluster_phy; |
38 | u32 reg; | 38 | u32 reg; |
39 | 39 | ||
40 | cluster_phy = dev_get_drvdata(phy->dev.parent); | 40 | cluster_phy = phy_get_drvdata(phy); |
41 | if (!cluster_phy) | 41 | if (!cluster_phy) |
42 | return -ENODEV; | 42 | return -ENODEV; |
43 | 43 | ||
@@ -131,6 +131,7 @@ static int armada375_usb_phy_probe(struct platform_device *pdev) | |||
131 | cluster_phy->reg = usb_cluster_base; | 131 | cluster_phy->reg = usb_cluster_base; |
132 | 132 | ||
133 | dev_set_drvdata(dev, cluster_phy); | 133 | dev_set_drvdata(dev, cluster_phy); |
134 | phy_set_drvdata(phy, cluster_phy); | ||
134 | 135 | ||
135 | phy_provider = devm_of_phy_provider_register(&pdev->dev, | 136 | phy_provider = devm_of_phy_provider_register(&pdev->dev, |
136 | armada375_usb_phy_xlate); | 137 | armada375_usb_phy_xlate); |
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index a12d35338313..3791838f4bd4 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c | |||
@@ -52,7 +52,9 @@ static void devm_phy_consume(struct device *dev, void *res) | |||
52 | 52 | ||
53 | static int devm_phy_match(struct device *dev, void *res, void *match_data) | 53 | static int devm_phy_match(struct device *dev, void *res, void *match_data) |
54 | { | 54 | { |
55 | return res == match_data; | 55 | struct phy **phy = res; |
56 | |||
57 | return *phy == match_data; | ||
56 | } | 58 | } |
57 | 59 | ||
58 | /** | 60 | /** |
@@ -223,6 +225,7 @@ int phy_init(struct phy *phy) | |||
223 | ret = phy_pm_runtime_get_sync(phy); | 225 | ret = phy_pm_runtime_get_sync(phy); |
224 | if (ret < 0 && ret != -ENOTSUPP) | 226 | if (ret < 0 && ret != -ENOTSUPP) |
225 | return ret; | 227 | return ret; |
228 | ret = 0; /* Override possible ret == -ENOTSUPP */ | ||
226 | 229 | ||
227 | mutex_lock(&phy->mutex); | 230 | mutex_lock(&phy->mutex); |
228 | if (phy->init_count == 0 && phy->ops->init) { | 231 | if (phy->init_count == 0 && phy->ops->init) { |
@@ -231,8 +234,6 @@ int phy_init(struct phy *phy) | |||
231 | dev_err(&phy->dev, "phy init failed --> %d\n", ret); | 234 | dev_err(&phy->dev, "phy init failed --> %d\n", ret); |
232 | goto out; | 235 | goto out; |
233 | } | 236 | } |
234 | } else { | ||
235 | ret = 0; /* Override possible ret == -ENOTSUPP */ | ||
236 | } | 237 | } |
237 | ++phy->init_count; | 238 | ++phy->init_count; |
238 | 239 | ||
@@ -253,6 +254,7 @@ int phy_exit(struct phy *phy) | |||
253 | ret = phy_pm_runtime_get_sync(phy); | 254 | ret = phy_pm_runtime_get_sync(phy); |
254 | if (ret < 0 && ret != -ENOTSUPP) | 255 | if (ret < 0 && ret != -ENOTSUPP) |
255 | return ret; | 256 | return ret; |
257 | ret = 0; /* Override possible ret == -ENOTSUPP */ | ||
256 | 258 | ||
257 | mutex_lock(&phy->mutex); | 259 | mutex_lock(&phy->mutex); |
258 | if (phy->init_count == 1 && phy->ops->exit) { | 260 | if (phy->init_count == 1 && phy->ops->exit) { |
@@ -287,6 +289,7 @@ int phy_power_on(struct phy *phy) | |||
287 | ret = phy_pm_runtime_get_sync(phy); | 289 | ret = phy_pm_runtime_get_sync(phy); |
288 | if (ret < 0 && ret != -ENOTSUPP) | 290 | if (ret < 0 && ret != -ENOTSUPP) |
289 | return ret; | 291 | return ret; |
292 | ret = 0; /* Override possible ret == -ENOTSUPP */ | ||
290 | 293 | ||
291 | mutex_lock(&phy->mutex); | 294 | mutex_lock(&phy->mutex); |
292 | if (phy->power_count == 0 && phy->ops->power_on) { | 295 | if (phy->power_count == 0 && phy->ops->power_on) { |
@@ -295,8 +298,6 @@ int phy_power_on(struct phy *phy) | |||
295 | dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); | 298 | dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); |
296 | goto out; | 299 | goto out; |
297 | } | 300 | } |
298 | } else { | ||
299 | ret = 0; /* Override possible ret == -ENOTSUPP */ | ||
300 | } | 301 | } |
301 | ++phy->power_count; | 302 | ++phy->power_count; |
302 | mutex_unlock(&phy->mutex); | 303 | mutex_unlock(&phy->mutex); |
diff --git a/drivers/phy/phy-exynos-dp-video.c b/drivers/phy/phy-exynos-dp-video.c index f86cbe68ddaf..179cbf9451aa 100644 --- a/drivers/phy/phy-exynos-dp-video.c +++ b/drivers/phy/phy-exynos-dp-video.c | |||
@@ -30,28 +30,13 @@ struct exynos_dp_video_phy { | |||
30 | const struct exynos_dp_video_phy_drvdata *drvdata; | 30 | const struct exynos_dp_video_phy_drvdata *drvdata; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | static void exynos_dp_video_phy_pwr_isol(struct exynos_dp_video_phy *state, | ||
34 | unsigned int on) | ||
35 | { | ||
36 | unsigned int val; | ||
37 | |||
38 | if (IS_ERR(state->regs)) | ||
39 | return; | ||
40 | |||
41 | val = on ? 0 : EXYNOS5_PHY_ENABLE; | ||
42 | |||
43 | regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset, | ||
44 | EXYNOS5_PHY_ENABLE, val); | ||
45 | } | ||
46 | |||
47 | static int exynos_dp_video_phy_power_on(struct phy *phy) | 33 | static int exynos_dp_video_phy_power_on(struct phy *phy) |
48 | { | 34 | { |
49 | struct exynos_dp_video_phy *state = phy_get_drvdata(phy); | 35 | struct exynos_dp_video_phy *state = phy_get_drvdata(phy); |
50 | 36 | ||
51 | /* Disable power isolation on DP-PHY */ | 37 | /* Disable power isolation on DP-PHY */ |
52 | exynos_dp_video_phy_pwr_isol(state, 0); | 38 | return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset, |
53 | 39 | EXYNOS5_PHY_ENABLE, EXYNOS5_PHY_ENABLE); | |
54 | return 0; | ||
55 | } | 40 | } |
56 | 41 | ||
57 | static int exynos_dp_video_phy_power_off(struct phy *phy) | 42 | static int exynos_dp_video_phy_power_off(struct phy *phy) |
@@ -59,9 +44,8 @@ static int exynos_dp_video_phy_power_off(struct phy *phy) | |||
59 | struct exynos_dp_video_phy *state = phy_get_drvdata(phy); | 44 | struct exynos_dp_video_phy *state = phy_get_drvdata(phy); |
60 | 45 | ||
61 | /* Enable power isolation on DP-PHY */ | 46 | /* Enable power isolation on DP-PHY */ |
62 | exynos_dp_video_phy_pwr_isol(state, 1); | 47 | return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset, |
63 | 48 | EXYNOS5_PHY_ENABLE, 0); | |
64 | return 0; | ||
65 | } | 49 | } |
66 | 50 | ||
67 | static struct phy_ops exynos_dp_video_phy_ops = { | 51 | static struct phy_ops exynos_dp_video_phy_ops = { |
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c index f017b2f2a54e..df7519a39ba0 100644 --- a/drivers/phy/phy-exynos-mipi-video.c +++ b/drivers/phy/phy-exynos-mipi-video.c | |||
@@ -43,7 +43,6 @@ struct exynos_mipi_video_phy { | |||
43 | } phys[EXYNOS_MIPI_PHYS_NUM]; | 43 | } phys[EXYNOS_MIPI_PHYS_NUM]; |
44 | spinlock_t slock; | 44 | spinlock_t slock; |
45 | void __iomem *regs; | 45 | void __iomem *regs; |
46 | struct mutex mutex; | ||
47 | struct regmap *regmap; | 46 | struct regmap *regmap; |
48 | }; | 47 | }; |
49 | 48 | ||
@@ -59,8 +58,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state, | |||
59 | else | 58 | else |
60 | reset = EXYNOS4_MIPI_PHY_SRESETN; | 59 | reset = EXYNOS4_MIPI_PHY_SRESETN; |
61 | 60 | ||
62 | if (state->regmap) { | 61 | spin_lock(&state->slock); |
63 | mutex_lock(&state->mutex); | 62 | |
63 | if (!IS_ERR(state->regmap)) { | ||
64 | regmap_read(state->regmap, offset, &val); | 64 | regmap_read(state->regmap, offset, &val); |
65 | if (on) | 65 | if (on) |
66 | val |= reset; | 66 | val |= reset; |
@@ -72,11 +72,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state, | |||
72 | else if (!(val & EXYNOS4_MIPI_PHY_RESET_MASK)) | 72 | else if (!(val & EXYNOS4_MIPI_PHY_RESET_MASK)) |
73 | val &= ~EXYNOS4_MIPI_PHY_ENABLE; | 73 | val &= ~EXYNOS4_MIPI_PHY_ENABLE; |
74 | regmap_write(state->regmap, offset, val); | 74 | regmap_write(state->regmap, offset, val); |
75 | mutex_unlock(&state->mutex); | ||
76 | } else { | 75 | } else { |
77 | addr = state->regs + EXYNOS_MIPI_PHY_CONTROL(id / 2); | 76 | addr = state->regs + EXYNOS_MIPI_PHY_CONTROL(id / 2); |
78 | 77 | ||
79 | spin_lock(&state->slock); | ||
80 | val = readl(addr); | 78 | val = readl(addr); |
81 | if (on) | 79 | if (on) |
82 | val |= reset; | 80 | val |= reset; |
@@ -90,9 +88,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state, | |||
90 | val &= ~EXYNOS4_MIPI_PHY_ENABLE; | 88 | val &= ~EXYNOS4_MIPI_PHY_ENABLE; |
91 | 89 | ||
92 | writel(val, addr); | 90 | writel(val, addr); |
93 | spin_unlock(&state->slock); | ||
94 | } | 91 | } |
95 | 92 | ||
93 | spin_unlock(&state->slock); | ||
96 | return 0; | 94 | return 0; |
97 | } | 95 | } |
98 | 96 | ||
@@ -158,7 +156,6 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev) | |||
158 | 156 | ||
159 | dev_set_drvdata(dev, state); | 157 | dev_set_drvdata(dev, state); |
160 | spin_lock_init(&state->slock); | 158 | spin_lock_init(&state->slock); |
161 | mutex_init(&state->mutex); | ||
162 | 159 | ||
163 | for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) { | 160 | for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) { |
164 | struct phy *phy = devm_phy_create(dev, NULL, | 161 | struct phy *phy = devm_phy_create(dev, NULL, |
diff --git a/drivers/phy/phy-exynos4210-usb2.c b/drivers/phy/phy-exynos4210-usb2.c index 236a52ad94eb..f30bbb0fb3b2 100644 --- a/drivers/phy/phy-exynos4210-usb2.c +++ b/drivers/phy/phy-exynos4210-usb2.c | |||
@@ -250,7 +250,6 @@ static const struct samsung_usb2_common_phy exynos4210_phys[] = { | |||
250 | .power_on = exynos4210_power_on, | 250 | .power_on = exynos4210_power_on, |
251 | .power_off = exynos4210_power_off, | 251 | .power_off = exynos4210_power_off, |
252 | }, | 252 | }, |
253 | {}, | ||
254 | }; | 253 | }; |
255 | 254 | ||
256 | const struct samsung_usb2_phy_config exynos4210_usb2_phy_config = { | 255 | const struct samsung_usb2_phy_config exynos4210_usb2_phy_config = { |
diff --git a/drivers/phy/phy-exynos4x12-usb2.c b/drivers/phy/phy-exynos4x12-usb2.c index 0b9de88579b1..765da90a536f 100644 --- a/drivers/phy/phy-exynos4x12-usb2.c +++ b/drivers/phy/phy-exynos4x12-usb2.c | |||
@@ -361,7 +361,6 @@ static const struct samsung_usb2_common_phy exynos4x12_phys[] = { | |||
361 | .power_on = exynos4x12_power_on, | 361 | .power_on = exynos4x12_power_on, |
362 | .power_off = exynos4x12_power_off, | 362 | .power_off = exynos4x12_power_off, |
363 | }, | 363 | }, |
364 | {}, | ||
365 | }; | 364 | }; |
366 | 365 | ||
367 | const struct samsung_usb2_phy_config exynos3250_usb2_phy_config = { | 366 | const struct samsung_usb2_phy_config exynos3250_usb2_phy_config = { |
diff --git a/drivers/phy/phy-exynos5-usbdrd.c b/drivers/phy/phy-exynos5-usbdrd.c index 04374018425f..e2a0be750ad9 100644 --- a/drivers/phy/phy-exynos5-usbdrd.c +++ b/drivers/phy/phy-exynos5-usbdrd.c | |||
@@ -531,7 +531,7 @@ static struct phy *exynos5_usbdrd_phy_xlate(struct device *dev, | |||
531 | { | 531 | { |
532 | struct exynos5_usbdrd_phy *phy_drd = dev_get_drvdata(dev); | 532 | struct exynos5_usbdrd_phy *phy_drd = dev_get_drvdata(dev); |
533 | 533 | ||
534 | if (WARN_ON(args->args[0] > EXYNOS5_DRDPHYS_NUM)) | 534 | if (WARN_ON(args->args[0] >= EXYNOS5_DRDPHYS_NUM)) |
535 | return ERR_PTR(-ENODEV); | 535 | return ERR_PTR(-ENODEV); |
536 | 536 | ||
537 | return phy_drd->phys[args->args[0]].phy; | 537 | return phy_drd->phys[args->args[0]].phy; |
diff --git a/drivers/phy/phy-exynos5250-usb2.c b/drivers/phy/phy-exynos5250-usb2.c index 1c139aa0d074..2ed1735a076a 100644 --- a/drivers/phy/phy-exynos5250-usb2.c +++ b/drivers/phy/phy-exynos5250-usb2.c | |||
@@ -391,7 +391,6 @@ static const struct samsung_usb2_common_phy exynos5250_phys[] = { | |||
391 | .power_on = exynos5250_power_on, | 391 | .power_on = exynos5250_power_on, |
392 | .power_off = exynos5250_power_off, | 392 | .power_off = exynos5250_power_off, |
393 | }, | 393 | }, |
394 | {}, | ||
395 | }; | 394 | }; |
396 | 395 | ||
397 | const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = { | 396 | const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = { |
diff --git a/drivers/phy/phy-hix5hd2-sata.c b/drivers/phy/phy-hix5hd2-sata.c index 34915b4202f1..d6b22659cac1 100644 --- a/drivers/phy/phy-hix5hd2-sata.c +++ b/drivers/phy/phy-hix5hd2-sata.c | |||
@@ -147,6 +147,9 @@ static int hix5hd2_sata_phy_probe(struct platform_device *pdev) | |||
147 | return -ENOMEM; | 147 | return -ENOMEM; |
148 | 148 | ||
149 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 149 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
150 | if (!res) | ||
151 | return -EINVAL; | ||
152 | |||
150 | priv->base = devm_ioremap(dev, res->start, resource_size(res)); | 153 | priv->base = devm_ioremap(dev, res->start, resource_size(res)); |
151 | if (!priv->base) | 154 | if (!priv->base) |
152 | return -ENOMEM; | 155 | return -ENOMEM; |
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c index 9b2848e6115d..933435214acc 100644 --- a/drivers/phy/phy-miphy28lp.c +++ b/drivers/phy/phy-miphy28lp.c | |||
@@ -228,6 +228,7 @@ struct miphy28lp_dev { | |||
228 | struct regmap *regmap; | 228 | struct regmap *regmap; |
229 | struct mutex miphy_mutex; | 229 | struct mutex miphy_mutex; |
230 | struct miphy28lp_phy **phys; | 230 | struct miphy28lp_phy **phys; |
231 | int nphys; | ||
231 | }; | 232 | }; |
232 | 233 | ||
233 | struct miphy_initval { | 234 | struct miphy_initval { |
@@ -1116,7 +1117,7 @@ static struct phy *miphy28lp_xlate(struct device *dev, | |||
1116 | return ERR_PTR(-EINVAL); | 1117 | return ERR_PTR(-EINVAL); |
1117 | } | 1118 | } |
1118 | 1119 | ||
1119 | for (index = 0; index < of_get_child_count(dev->of_node); index++) | 1120 | for (index = 0; index < miphy_dev->nphys; index++) |
1120 | if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { | 1121 | if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { |
1121 | miphy_phy = miphy_dev->phys[index]; | 1122 | miphy_phy = miphy_dev->phys[index]; |
1122 | break; | 1123 | break; |
@@ -1138,6 +1139,7 @@ static struct phy *miphy28lp_xlate(struct device *dev, | |||
1138 | 1139 | ||
1139 | static struct phy_ops miphy28lp_ops = { | 1140 | static struct phy_ops miphy28lp_ops = { |
1140 | .init = miphy28lp_init, | 1141 | .init = miphy28lp_init, |
1142 | .owner = THIS_MODULE, | ||
1141 | }; | 1143 | }; |
1142 | 1144 | ||
1143 | static int miphy28lp_probe_resets(struct device_node *node, | 1145 | static int miphy28lp_probe_resets(struct device_node *node, |
@@ -1200,16 +1202,15 @@ static int miphy28lp_probe(struct platform_device *pdev) | |||
1200 | struct miphy28lp_dev *miphy_dev; | 1202 | struct miphy28lp_dev *miphy_dev; |
1201 | struct phy_provider *provider; | 1203 | struct phy_provider *provider; |
1202 | struct phy *phy; | 1204 | struct phy *phy; |
1203 | int chancount, port = 0; | 1205 | int ret, port = 0; |
1204 | int ret; | ||
1205 | 1206 | ||
1206 | miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); | 1207 | miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); |
1207 | if (!miphy_dev) | 1208 | if (!miphy_dev) |
1208 | return -ENOMEM; | 1209 | return -ENOMEM; |
1209 | 1210 | ||
1210 | chancount = of_get_child_count(np); | 1211 | miphy_dev->nphys = of_get_child_count(np); |
1211 | miphy_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * chancount, | 1212 | miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys, |
1212 | GFP_KERNEL); | 1213 | sizeof(*miphy_dev->phys), GFP_KERNEL); |
1213 | if (!miphy_dev->phys) | 1214 | if (!miphy_dev->phys) |
1214 | return -ENOMEM; | 1215 | return -ENOMEM; |
1215 | 1216 | ||
diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c index 6c80154e8bff..51b459db9137 100644 --- a/drivers/phy/phy-miphy365x.c +++ b/drivers/phy/phy-miphy365x.c | |||
@@ -150,6 +150,7 @@ struct miphy365x_dev { | |||
150 | struct regmap *regmap; | 150 | struct regmap *regmap; |
151 | struct mutex miphy_mutex; | 151 | struct mutex miphy_mutex; |
152 | struct miphy365x_phy **phys; | 152 | struct miphy365x_phy **phys; |
153 | int nphys; | ||
153 | }; | 154 | }; |
154 | 155 | ||
155 | /* | 156 | /* |
@@ -485,7 +486,7 @@ static struct phy *miphy365x_xlate(struct device *dev, | |||
485 | return ERR_PTR(-EINVAL); | 486 | return ERR_PTR(-EINVAL); |
486 | } | 487 | } |
487 | 488 | ||
488 | for (index = 0; index < of_get_child_count(dev->of_node); index++) | 489 | for (index = 0; index < miphy_dev->nphys; index++) |
489 | if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { | 490 | if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { |
490 | miphy_phy = miphy_dev->phys[index]; | 491 | miphy_phy = miphy_dev->phys[index]; |
491 | break; | 492 | break; |
@@ -541,16 +542,15 @@ static int miphy365x_probe(struct platform_device *pdev) | |||
541 | struct miphy365x_dev *miphy_dev; | 542 | struct miphy365x_dev *miphy_dev; |
542 | struct phy_provider *provider; | 543 | struct phy_provider *provider; |
543 | struct phy *phy; | 544 | struct phy *phy; |
544 | int chancount, port = 0; | 545 | int ret, port = 0; |
545 | int ret; | ||
546 | 546 | ||
547 | miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); | 547 | miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); |
548 | if (!miphy_dev) | 548 | if (!miphy_dev) |
549 | return -ENOMEM; | 549 | return -ENOMEM; |
550 | 550 | ||
551 | chancount = of_get_child_count(np); | 551 | miphy_dev->nphys = of_get_child_count(np); |
552 | miphy_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * chancount, | 552 | miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys, |
553 | GFP_KERNEL); | 553 | sizeof(*miphy_dev->phys), GFP_KERNEL); |
554 | if (!miphy_dev->phys) | 554 | if (!miphy_dev->phys) |
555 | return -ENOMEM; | 555 | return -ENOMEM; |
556 | 556 | ||
diff --git a/drivers/phy/phy-omap-control.c b/drivers/phy/phy-omap-control.c index efe724f97e02..93252e053a31 100644 --- a/drivers/phy/phy-omap-control.c +++ b/drivers/phy/phy-omap-control.c | |||
@@ -360,7 +360,7 @@ static void __exit omap_control_phy_exit(void) | |||
360 | } | 360 | } |
361 | module_exit(omap_control_phy_exit); | 361 | module_exit(omap_control_phy_exit); |
362 | 362 | ||
363 | MODULE_ALIAS("platform: omap_control_phy"); | 363 | MODULE_ALIAS("platform:omap_control_phy"); |
364 | MODULE_AUTHOR("Texas Instruments Inc."); | 364 | MODULE_AUTHOR("Texas Instruments Inc."); |
365 | MODULE_DESCRIPTION("OMAP Control Module PHY Driver"); | 365 | MODULE_DESCRIPTION("OMAP Control Module PHY Driver"); |
366 | MODULE_LICENSE("GPL v2"); | 366 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c index 6f4aef3db248..4757e765696a 100644 --- a/drivers/phy/phy-omap-usb2.c +++ b/drivers/phy/phy-omap-usb2.c | |||
@@ -296,10 +296,11 @@ static int omap_usb2_probe(struct platform_device *pdev) | |||
296 | dev_warn(&pdev->dev, | 296 | dev_warn(&pdev->dev, |
297 | "found usb_otg_ss_refclk960m, please fix DTS\n"); | 297 | "found usb_otg_ss_refclk960m, please fix DTS\n"); |
298 | } | 298 | } |
299 | } else { | ||
300 | clk_prepare(phy->optclk); | ||
301 | } | 299 | } |
302 | 300 | ||
301 | if (!IS_ERR(phy->optclk)) | ||
302 | clk_prepare(phy->optclk); | ||
303 | |||
303 | usb_add_phy_dev(&phy->phy); | 304 | usb_add_phy_dev(&phy->phy); |
304 | 305 | ||
305 | return 0; | 306 | return 0; |
@@ -383,7 +384,7 @@ static struct platform_driver omap_usb2_driver = { | |||
383 | 384 | ||
384 | module_platform_driver(omap_usb2_driver); | 385 | module_platform_driver(omap_usb2_driver); |
385 | 386 | ||
386 | MODULE_ALIAS("platform: omap_usb2"); | 387 | MODULE_ALIAS("platform:omap_usb2"); |
387 | MODULE_AUTHOR("Texas Instruments Inc."); | 388 | MODULE_AUTHOR("Texas Instruments Inc."); |
388 | MODULE_DESCRIPTION("OMAP USB2 phy driver"); | 389 | MODULE_DESCRIPTION("OMAP USB2 phy driver"); |
389 | MODULE_LICENSE("GPL v2"); | 390 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c index 22011c3b6a4b..7d4c33643768 100644 --- a/drivers/phy/phy-rockchip-usb.c +++ b/drivers/phy/phy-rockchip-usb.c | |||
@@ -61,8 +61,6 @@ static int rockchip_usb_phy_power_off(struct phy *_phy) | |||
61 | return ret; | 61 | return ret; |
62 | 62 | ||
63 | clk_disable_unprepare(phy->clk); | 63 | clk_disable_unprepare(phy->clk); |
64 | if (ret) | ||
65 | return ret; | ||
66 | 64 | ||
67 | return 0; | 65 | return 0; |
68 | } | 66 | } |
@@ -78,8 +76,10 @@ static int rockchip_usb_phy_power_on(struct phy *_phy) | |||
78 | 76 | ||
79 | /* Power up usb phy analog blocks by set siddq 0 */ | 77 | /* Power up usb phy analog blocks by set siddq 0 */ |
80 | ret = rockchip_usb_phy_power(phy, 0); | 78 | ret = rockchip_usb_phy_power(phy, 0); |
81 | if (ret) | 79 | if (ret) { |
80 | clk_disable_unprepare(phy->clk); | ||
82 | return ret; | 81 | return ret; |
82 | } | ||
83 | 83 | ||
84 | return 0; | 84 | return 0; |
85 | } | 85 | } |
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c index 95c88f929f27..2ba610b72ca2 100644 --- a/drivers/phy/phy-ti-pipe3.c +++ b/drivers/phy/phy-ti-pipe3.c | |||
@@ -165,15 +165,11 @@ static int ti_pipe3_dpll_wait_lock(struct ti_pipe3 *phy) | |||
165 | cpu_relax(); | 165 | cpu_relax(); |
166 | val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); | 166 | val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); |
167 | if (val & PLL_LOCK) | 167 | if (val & PLL_LOCK) |
168 | break; | 168 | return 0; |
169 | } while (!time_after(jiffies, timeout)); | 169 | } while (!time_after(jiffies, timeout)); |
170 | 170 | ||
171 | if (!(val & PLL_LOCK)) { | 171 | dev_err(phy->dev, "DPLL failed to lock\n"); |
172 | dev_err(phy->dev, "DPLL failed to lock\n"); | 172 | return -EBUSY; |
173 | return -EBUSY; | ||
174 | } | ||
175 | |||
176 | return 0; | ||
177 | } | 173 | } |
178 | 174 | ||
179 | static int ti_pipe3_dpll_program(struct ti_pipe3 *phy) | 175 | static int ti_pipe3_dpll_program(struct ti_pipe3 *phy) |
@@ -608,7 +604,7 @@ static struct platform_driver ti_pipe3_driver = { | |||
608 | 604 | ||
609 | module_platform_driver(ti_pipe3_driver); | 605 | module_platform_driver(ti_pipe3_driver); |
610 | 606 | ||
611 | MODULE_ALIAS("platform: ti_pipe3"); | 607 | MODULE_ALIAS("platform:ti_pipe3"); |
612 | MODULE_AUTHOR("Texas Instruments Inc."); | 608 | MODULE_AUTHOR("Texas Instruments Inc."); |
613 | MODULE_DESCRIPTION("TI PIPE3 phy driver"); | 609 | MODULE_DESCRIPTION("TI PIPE3 phy driver"); |
614 | MODULE_LICENSE("GPL v2"); | 610 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c index 8e87f54671f3..bc42d6a8939f 100644 --- a/drivers/phy/phy-twl4030-usb.c +++ b/drivers/phy/phy-twl4030-usb.c | |||
@@ -666,7 +666,6 @@ static int twl4030_usb_probe(struct platform_device *pdev) | |||
666 | twl->dev = &pdev->dev; | 666 | twl->dev = &pdev->dev; |
667 | twl->irq = platform_get_irq(pdev, 0); | 667 | twl->irq = platform_get_irq(pdev, 0); |
668 | twl->vbus_supplied = false; | 668 | twl->vbus_supplied = false; |
669 | twl->linkstat = -EINVAL; | ||
670 | twl->linkstat = OMAP_MUSB_UNKNOWN; | 669 | twl->linkstat = OMAP_MUSB_UNKNOWN; |
671 | 670 | ||
672 | twl->phy.dev = twl->dev; | 671 | twl->phy.dev = twl->dev; |
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c index 29214a36ea28..2263cd010032 100644 --- a/drivers/phy/phy-xgene.c +++ b/drivers/phy/phy-xgene.c | |||
@@ -1704,7 +1704,6 @@ static int xgene_phy_probe(struct platform_device *pdev) | |||
1704 | for (i = 0; i < MAX_LANE; i++) | 1704 | for (i = 0; i < MAX_LANE; i++) |
1705 | ctx->sata_param.speed[i] = 2; /* Default to Gen3 */ | 1705 | ctx->sata_param.speed[i] = 2; /* Default to Gen3 */ |
1706 | 1706 | ||
1707 | ctx->dev = &pdev->dev; | ||
1708 | platform_set_drvdata(pdev, ctx); | 1707 | platform_set_drvdata(pdev, ctx); |
1709 | 1708 | ||
1710 | ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops); | 1709 | ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops); |
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index 97b5e4ee1ca4..63d4033eb683 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c | |||
@@ -73,7 +73,7 @@ | |||
73 | 73 | ||
74 | #define TIME_WINDOW_MAX_MSEC 40000 | 74 | #define TIME_WINDOW_MAX_MSEC 40000 |
75 | #define TIME_WINDOW_MIN_MSEC 250 | 75 | #define TIME_WINDOW_MIN_MSEC 250 |
76 | 76 | #define ENERGY_UNIT_SCALE 1000 /* scale from driver unit to powercap unit */ | |
77 | enum unit_type { | 77 | enum unit_type { |
78 | ARBITRARY_UNIT, /* no translation */ | 78 | ARBITRARY_UNIT, /* no translation */ |
79 | POWER_UNIT, | 79 | POWER_UNIT, |
@@ -158,6 +158,7 @@ struct rapl_domain { | |||
158 | struct rapl_power_limit rpl[NR_POWER_LIMITS]; | 158 | struct rapl_power_limit rpl[NR_POWER_LIMITS]; |
159 | u64 attr_map; /* track capabilities */ | 159 | u64 attr_map; /* track capabilities */ |
160 | unsigned int state; | 160 | unsigned int state; |
161 | unsigned int domain_energy_unit; | ||
161 | int package_id; | 162 | int package_id; |
162 | }; | 163 | }; |
163 | #define power_zone_to_rapl_domain(_zone) \ | 164 | #define power_zone_to_rapl_domain(_zone) \ |
@@ -190,6 +191,7 @@ struct rapl_defaults { | |||
190 | void (*set_floor_freq)(struct rapl_domain *rd, bool mode); | 191 | void (*set_floor_freq)(struct rapl_domain *rd, bool mode); |
191 | u64 (*compute_time_window)(struct rapl_package *rp, u64 val, | 192 | u64 (*compute_time_window)(struct rapl_package *rp, u64 val, |
192 | bool to_raw); | 193 | bool to_raw); |
194 | unsigned int dram_domain_energy_unit; | ||
193 | }; | 195 | }; |
194 | static struct rapl_defaults *rapl_defaults; | 196 | static struct rapl_defaults *rapl_defaults; |
195 | 197 | ||
@@ -227,7 +229,8 @@ static int rapl_read_data_raw(struct rapl_domain *rd, | |||
227 | static int rapl_write_data_raw(struct rapl_domain *rd, | 229 | static int rapl_write_data_raw(struct rapl_domain *rd, |
228 | enum rapl_primitives prim, | 230 | enum rapl_primitives prim, |
229 | unsigned long long value); | 231 | unsigned long long value); |
230 | static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, | 232 | static u64 rapl_unit_xlate(struct rapl_domain *rd, int package, |
233 | enum unit_type type, u64 value, | ||
231 | int to_raw); | 234 | int to_raw); |
232 | static void package_power_limit_irq_save(int package_id); | 235 | static void package_power_limit_irq_save(int package_id); |
233 | 236 | ||
@@ -305,7 +308,9 @@ static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw) | |||
305 | 308 | ||
306 | static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy) | 309 | static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy) |
307 | { | 310 | { |
308 | *energy = rapl_unit_xlate(0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0); | 311 | struct rapl_domain *rd = power_zone_to_rapl_domain(pcd_dev); |
312 | |||
313 | *energy = rapl_unit_xlate(rd, 0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0); | ||
309 | return 0; | 314 | return 0; |
310 | } | 315 | } |
311 | 316 | ||
@@ -639,6 +644,11 @@ static void rapl_init_domains(struct rapl_package *rp) | |||
639 | rd->msrs[4] = MSR_DRAM_POWER_INFO; | 644 | rd->msrs[4] = MSR_DRAM_POWER_INFO; |
640 | rd->rpl[0].prim_id = PL1_ENABLE; | 645 | rd->rpl[0].prim_id = PL1_ENABLE; |
641 | rd->rpl[0].name = pl1_name; | 646 | rd->rpl[0].name = pl1_name; |
647 | rd->domain_energy_unit = | ||
648 | rapl_defaults->dram_domain_energy_unit; | ||
649 | if (rd->domain_energy_unit) | ||
650 | pr_info("DRAM domain energy unit %dpj\n", | ||
651 | rd->domain_energy_unit); | ||
642 | break; | 652 | break; |
643 | } | 653 | } |
644 | if (mask) { | 654 | if (mask) { |
@@ -648,11 +658,13 @@ static void rapl_init_domains(struct rapl_package *rp) | |||
648 | } | 658 | } |
649 | } | 659 | } |
650 | 660 | ||
651 | static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, | 661 | static u64 rapl_unit_xlate(struct rapl_domain *rd, int package, |
662 | enum unit_type type, u64 value, | ||
652 | int to_raw) | 663 | int to_raw) |
653 | { | 664 | { |
654 | u64 units = 1; | 665 | u64 units = 1; |
655 | struct rapl_package *rp; | 666 | struct rapl_package *rp; |
667 | u64 scale = 1; | ||
656 | 668 | ||
657 | rp = find_package_by_id(package); | 669 | rp = find_package_by_id(package); |
658 | if (!rp) | 670 | if (!rp) |
@@ -663,7 +675,12 @@ static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, | |||
663 | units = rp->power_unit; | 675 | units = rp->power_unit; |
664 | break; | 676 | break; |
665 | case ENERGY_UNIT: | 677 | case ENERGY_UNIT: |
666 | units = rp->energy_unit; | 678 | scale = ENERGY_UNIT_SCALE; |
679 | /* per domain unit takes precedence */ | ||
680 | if (rd && rd->domain_energy_unit) | ||
681 | units = rd->domain_energy_unit; | ||
682 | else | ||
683 | units = rp->energy_unit; | ||
667 | break; | 684 | break; |
668 | case TIME_UNIT: | 685 | case TIME_UNIT: |
669 | return rapl_defaults->compute_time_window(rp, value, to_raw); | 686 | return rapl_defaults->compute_time_window(rp, value, to_raw); |
@@ -673,11 +690,11 @@ static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, | |||
673 | }; | 690 | }; |
674 | 691 | ||
675 | if (to_raw) | 692 | if (to_raw) |
676 | return div64_u64(value, units); | 693 | return div64_u64(value, units) * scale; |
677 | 694 | ||
678 | value *= units; | 695 | value *= units; |
679 | 696 | ||
680 | return value; | 697 | return div64_u64(value, scale); |
681 | } | 698 | } |
682 | 699 | ||
683 | /* in the order of enum rapl_primitives */ | 700 | /* in the order of enum rapl_primitives */ |
@@ -773,7 +790,7 @@ static int rapl_read_data_raw(struct rapl_domain *rd, | |||
773 | final = value & rp->mask; | 790 | final = value & rp->mask; |
774 | final = final >> rp->shift; | 791 | final = final >> rp->shift; |
775 | if (xlate) | 792 | if (xlate) |
776 | *data = rapl_unit_xlate(rd->package_id, rp->unit, final, 0); | 793 | *data = rapl_unit_xlate(rd, rd->package_id, rp->unit, final, 0); |
777 | else | 794 | else |
778 | *data = final; | 795 | *data = final; |
779 | 796 | ||
@@ -799,7 +816,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd, | |||
799 | "failed to read msr 0x%x on cpu %d\n", msr, cpu); | 816 | "failed to read msr 0x%x on cpu %d\n", msr, cpu); |
800 | return -EIO; | 817 | return -EIO; |
801 | } | 818 | } |
802 | value = rapl_unit_xlate(rd->package_id, rp->unit, value, 1); | 819 | value = rapl_unit_xlate(rd, rd->package_id, rp->unit, value, 1); |
803 | msr_val &= ~rp->mask; | 820 | msr_val &= ~rp->mask; |
804 | msr_val |= value << rp->shift; | 821 | msr_val |= value << rp->shift; |
805 | if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) { | 822 | if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) { |
@@ -818,7 +835,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd, | |||
818 | * calculate units differ on different CPUs. | 835 | * calculate units differ on different CPUs. |
819 | * We convert the units to below format based on CPUs. | 836 | * We convert the units to below format based on CPUs. |
820 | * i.e. | 837 | * i.e. |
821 | * energy unit: microJoules : Represented in microJoules by default | 838 | * energy unit: picoJoules : Represented in picoJoules by default |
822 | * power unit : microWatts : Represented in milliWatts by default | 839 | * power unit : microWatts : Represented in milliWatts by default |
823 | * time unit : microseconds: Represented in seconds by default | 840 | * time unit : microseconds: Represented in seconds by default |
824 | */ | 841 | */ |
@@ -834,7 +851,7 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu) | |||
834 | } | 851 | } |
835 | 852 | ||
836 | value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; | 853 | value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; |
837 | rp->energy_unit = 1000000 / (1 << value); | 854 | rp->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value); |
838 | 855 | ||
839 | value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; | 856 | value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; |
840 | rp->power_unit = 1000000 / (1 << value); | 857 | rp->power_unit = 1000000 / (1 << value); |
@@ -842,7 +859,7 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu) | |||
842 | value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; | 859 | value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; |
843 | rp->time_unit = 1000000 / (1 << value); | 860 | rp->time_unit = 1000000 / (1 << value); |
844 | 861 | ||
845 | pr_debug("Core CPU package %d energy=%duJ, time=%dus, power=%duW\n", | 862 | pr_debug("Core CPU package %d energy=%dpJ, time=%dus, power=%duW\n", |
846 | rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); | 863 | rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); |
847 | 864 | ||
848 | return 0; | 865 | return 0; |
@@ -859,7 +876,7 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu) | |||
859 | return -ENODEV; | 876 | return -ENODEV; |
860 | } | 877 | } |
861 | value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; | 878 | value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; |
862 | rp->energy_unit = 1 << value; | 879 | rp->energy_unit = ENERGY_UNIT_SCALE * 1 << value; |
863 | 880 | ||
864 | value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; | 881 | value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; |
865 | rp->power_unit = (1 << value) * 1000; | 882 | rp->power_unit = (1 << value) * 1000; |
@@ -867,7 +884,7 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu) | |||
867 | value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; | 884 | value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; |
868 | rp->time_unit = 1000000 / (1 << value); | 885 | rp->time_unit = 1000000 / (1 << value); |
869 | 886 | ||
870 | pr_debug("Atom package %d energy=%duJ, time=%dus, power=%duW\n", | 887 | pr_debug("Atom package %d energy=%dpJ, time=%dus, power=%duW\n", |
871 | rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); | 888 | rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); |
872 | 889 | ||
873 | return 0; | 890 | return 0; |
@@ -1017,6 +1034,13 @@ static const struct rapl_defaults rapl_defaults_core = { | |||
1017 | .compute_time_window = rapl_compute_time_window_core, | 1034 | .compute_time_window = rapl_compute_time_window_core, |
1018 | }; | 1035 | }; |
1019 | 1036 | ||
1037 | static const struct rapl_defaults rapl_defaults_hsw_server = { | ||
1038 | .check_unit = rapl_check_unit_core, | ||
1039 | .set_floor_freq = set_floor_freq_default, | ||
1040 | .compute_time_window = rapl_compute_time_window_core, | ||
1041 | .dram_domain_energy_unit = 15300, | ||
1042 | }; | ||
1043 | |||
1020 | static const struct rapl_defaults rapl_defaults_atom = { | 1044 | static const struct rapl_defaults rapl_defaults_atom = { |
1021 | .check_unit = rapl_check_unit_atom, | 1045 | .check_unit = rapl_check_unit_atom, |
1022 | .set_floor_freq = set_floor_freq_atom, | 1046 | .set_floor_freq = set_floor_freq_atom, |
@@ -1037,7 +1061,7 @@ static const struct x86_cpu_id rapl_ids[] = { | |||
1037 | RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */ | 1061 | RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */ |
1038 | RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */ | 1062 | RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */ |
1039 | RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */ | 1063 | RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */ |
1040 | RAPL_CPU(0x3f, rapl_defaults_core),/* Haswell */ | 1064 | RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */ |
1041 | RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ | 1065 | RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ |
1042 | RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */ | 1066 | RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */ |
1043 | RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */ | 1067 | RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */ |
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index b4f7744f6751..b283a1a573b3 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c | |||
@@ -324,7 +324,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id) | |||
324 | 324 | ||
325 | ret = IRQ_HANDLED; | 325 | ret = IRQ_HANDLED; |
326 | } | 326 | } |
327 | spin_lock(&suspended_lock); | 327 | spin_unlock(&suspended_lock); |
328 | 328 | ||
329 | return ret; | 329 | return ret; |
330 | } | 330 | } |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 99f43b7fc9ab..ab4879e12ea7 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
@@ -1596,7 +1596,7 @@ static int tcm_qla2xxx_check_initiator_node_acl( | |||
1596 | /* | 1596 | /* |
1597 | * Finally register the new FC Nexus with TCM | 1597 | * Finally register the new FC Nexus with TCM |
1598 | */ | 1598 | */ |
1599 | __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); | 1599 | transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); |
1600 | 1600 | ||
1601 | return 0; | 1601 | return 0; |
1602 | } | 1602 | } |
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index 4324282afe49..03b2a90b9ac0 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c | |||
@@ -330,16 +330,6 @@ static void device_init_registers(struct vnt_private *pDevice) | |||
330 | /* zonetype initial */ | 330 | /* zonetype initial */ |
331 | pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; | 331 | pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; |
332 | 332 | ||
333 | /* Get RFType */ | ||
334 | pDevice->byRFType = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_RFTYPE); | ||
335 | |||
336 | /* force change RevID for VT3253 emu */ | ||
337 | if ((pDevice->byRFType & RF_EMU) != 0) | ||
338 | pDevice->byRevId = 0x80; | ||
339 | |||
340 | pDevice->byRFType &= RF_MASK; | ||
341 | pr_debug("pDevice->byRFType = %x\n", pDevice->byRFType); | ||
342 | |||
343 | if (!pDevice->bZoneRegExist) | 333 | if (!pDevice->bZoneRegExist) |
344 | pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; | 334 | pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; |
345 | 335 | ||
@@ -1187,12 +1177,14 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) | |||
1187 | { | 1177 | { |
1188 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 1178 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
1189 | PSTxDesc head_td; | 1179 | PSTxDesc head_td; |
1190 | u32 dma_idx = TYPE_AC0DMA; | 1180 | u32 dma_idx; |
1191 | unsigned long flags; | 1181 | unsigned long flags; |
1192 | 1182 | ||
1193 | spin_lock_irqsave(&priv->lock, flags); | 1183 | spin_lock_irqsave(&priv->lock, flags); |
1194 | 1184 | ||
1195 | if (!ieee80211_is_data(hdr->frame_control)) | 1185 | if (ieee80211_is_data(hdr->frame_control)) |
1186 | dma_idx = TYPE_AC0DMA; | ||
1187 | else | ||
1196 | dma_idx = TYPE_TXDMA0; | 1188 | dma_idx = TYPE_TXDMA0; |
1197 | 1189 | ||
1198 | if (AVAIL_TD(priv, dma_idx) < 1) { | 1190 | if (AVAIL_TD(priv, dma_idx) < 1) { |
@@ -1206,6 +1198,9 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) | |||
1206 | 1198 | ||
1207 | head_td->pTDInfo->skb = skb; | 1199 | head_td->pTDInfo->skb = skb; |
1208 | 1200 | ||
1201 | if (dma_idx == TYPE_AC0DMA) | ||
1202 | head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB; | ||
1203 | |||
1209 | priv->iTDUsed[dma_idx]++; | 1204 | priv->iTDUsed[dma_idx]++; |
1210 | 1205 | ||
1211 | /* Take ownership */ | 1206 | /* Take ownership */ |
@@ -1234,13 +1229,10 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) | |||
1234 | 1229 | ||
1235 | head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma); | 1230 | head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma); |
1236 | 1231 | ||
1237 | if (dma_idx == TYPE_AC0DMA) { | 1232 | if (head_td->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) |
1238 | head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB; | ||
1239 | |||
1240 | MACvTransmitAC0(priv->PortOffset); | 1233 | MACvTransmitAC0(priv->PortOffset); |
1241 | } else { | 1234 | else |
1242 | MACvTransmit0(priv->PortOffset); | 1235 | MACvTransmit0(priv->PortOffset); |
1243 | } | ||
1244 | 1236 | ||
1245 | spin_unlock_irqrestore(&priv->lock, flags); | 1237 | spin_unlock_irqrestore(&priv->lock, flags); |
1246 | 1238 | ||
@@ -1778,6 +1770,12 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent) | |||
1778 | MACvInitialize(priv->PortOffset); | 1770 | MACvInitialize(priv->PortOffset); |
1779 | MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr); | 1771 | MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr); |
1780 | 1772 | ||
1773 | /* Get RFType */ | ||
1774 | priv->byRFType = SROMbyReadEmbedded(priv->PortOffset, EEP_OFS_RFTYPE); | ||
1775 | priv->byRFType &= RF_MASK; | ||
1776 | |||
1777 | dev_dbg(&pcid->dev, "RF Type = %x\n", priv->byRFType); | ||
1778 | |||
1781 | device_get_options(priv); | 1779 | device_get_options(priv); |
1782 | device_set_options(priv); | 1780 | device_set_options(priv); |
1783 | /* Mask out the options cannot be set to the chip */ | 1781 | /* Mask out the options cannot be set to the chip */ |
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c index 941b2adca95a..7626f635f160 100644 --- a/drivers/staging/vt6655/rf.c +++ b/drivers/staging/vt6655/rf.c | |||
@@ -794,6 +794,7 @@ bool RFbSetPower( | |||
794 | break; | 794 | break; |
795 | case RATE_6M: | 795 | case RATE_6M: |
796 | case RATE_9M: | 796 | case RATE_9M: |
797 | case RATE_12M: | ||
797 | case RATE_18M: | 798 | case RATE_18M: |
798 | byPwr = priv->abyOFDMPwrTbl[uCH]; | 799 | byPwr = priv->abyOFDMPwrTbl[uCH]; |
799 | if (priv->byRFType == RF_UW2452) | 800 | if (priv->byRFType == RF_UW2452) |
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c index c42cde59f598..c4286ccac320 100644 --- a/drivers/staging/vt6656/rf.c +++ b/drivers/staging/vt6656/rf.c | |||
@@ -640,6 +640,7 @@ int vnt_rf_setpower(struct vnt_private *priv, u32 rate, u32 channel) | |||
640 | break; | 640 | break; |
641 | case RATE_6M: | 641 | case RATE_6M: |
642 | case RATE_9M: | 642 | case RATE_9M: |
643 | case RATE_12M: | ||
643 | case RATE_18M: | 644 | case RATE_18M: |
644 | case RATE_24M: | 645 | case RATE_24M: |
645 | case RATE_36M: | 646 | case RATE_36M: |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 50bad55a0c42..2accb6e47beb 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -4256,11 +4256,17 @@ int iscsit_close_connection( | |||
4256 | pr_debug("Closing iSCSI connection CID %hu on SID:" | 4256 | pr_debug("Closing iSCSI connection CID %hu on SID:" |
4257 | " %u\n", conn->cid, sess->sid); | 4257 | " %u\n", conn->cid, sess->sid); |
4258 | /* | 4258 | /* |
4259 | * Always up conn_logout_comp just in case the RX Thread is sleeping | 4259 | * Always up conn_logout_comp for the traditional TCP case just in case |
4260 | * and the logout response never got sent because the connection | 4260 | * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout |
4261 | * failed. | 4261 | * response never got sent because the connection failed. |
4262 | * | ||
4263 | * However for iser-target, isert_wait4logout() is using conn_logout_comp | ||
4264 | * to signal logout response TX interrupt completion. Go ahead and skip | ||
4265 | * this for iser since isert_rx_opcode() does not wait on logout failure, | ||
4266 | * and to avoid iscsi_conn pointer dereference in iser-target code. | ||
4262 | */ | 4267 | */ |
4263 | complete(&conn->conn_logout_comp); | 4268 | if (conn->conn_transport->transport_type == ISCSI_TCP) |
4269 | complete(&conn->conn_logout_comp); | ||
4264 | 4270 | ||
4265 | iscsi_release_thread_set(conn); | 4271 | iscsi_release_thread_set(conn); |
4266 | 4272 | ||
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 1c197bad6132..bdd8731a4daa 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <target/target_core_fabric.h> | 22 | #include <target/target_core_fabric.h> |
23 | 23 | ||
24 | #include <target/iscsi/iscsi_target_core.h> | 24 | #include <target/iscsi/iscsi_target_core.h> |
25 | #include <target/iscsi/iscsi_transport.h> | ||
26 | #include "iscsi_target_seq_pdu_list.h" | 25 | #include "iscsi_target_seq_pdu_list.h" |
27 | #include "iscsi_target_tq.h" | 26 | #include "iscsi_target_tq.h" |
28 | #include "iscsi_target_erl0.h" | 27 | #include "iscsi_target_erl0.h" |
@@ -940,8 +939,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) | |||
940 | 939 | ||
941 | if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { | 940 | if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { |
942 | spin_unlock_bh(&conn->state_lock); | 941 | spin_unlock_bh(&conn->state_lock); |
943 | if (conn->conn_transport->transport_type == ISCSI_TCP) | 942 | iscsit_close_connection(conn); |
944 | iscsit_close_connection(conn); | ||
945 | return; | 943 | return; |
946 | } | 944 | } |
947 | 945 | ||
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 6b3c32954689..c36bd7c29136 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c | |||
@@ -953,11 +953,8 @@ static int tcm_loop_make_nexus( | |||
953 | transport_free_session(tl_nexus->se_sess); | 953 | transport_free_session(tl_nexus->se_sess); |
954 | goto out; | 954 | goto out; |
955 | } | 955 | } |
956 | /* | 956 | /* Now, register the SAS I_T Nexus as active. */ |
957 | * Now, register the SAS I_T Nexus as active with the call to | 957 | transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, |
958 | * transport_register_session() | ||
959 | */ | ||
960 | __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, | ||
961 | tl_nexus->se_sess, tl_nexus); | 958 | tl_nexus->se_sess, tl_nexus); |
962 | tl_tpg->tl_nexus = tl_nexus; | 959 | tl_tpg->tl_nexus = tl_nexus; |
963 | pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" | 960 | pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 58f49ff69b14..79b4ec3ca2db 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -650,6 +650,18 @@ static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) | |||
650 | return aligned_max_sectors; | 650 | return aligned_max_sectors; |
651 | } | 651 | } |
652 | 652 | ||
653 | bool se_dev_check_wce(struct se_device *dev) | ||
654 | { | ||
655 | bool wce = false; | ||
656 | |||
657 | if (dev->transport->get_write_cache) | ||
658 | wce = dev->transport->get_write_cache(dev); | ||
659 | else if (dev->dev_attrib.emulate_write_cache > 0) | ||
660 | wce = true; | ||
661 | |||
662 | return wce; | ||
663 | } | ||
664 | |||
653 | int se_dev_set_max_unmap_lba_count( | 665 | int se_dev_set_max_unmap_lba_count( |
654 | struct se_device *dev, | 666 | struct se_device *dev, |
655 | u32 max_unmap_lba_count) | 667 | u32 max_unmap_lba_count) |
@@ -767,6 +779,16 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) | |||
767 | pr_err("Illegal value %d\n", flag); | 779 | pr_err("Illegal value %d\n", flag); |
768 | return -EINVAL; | 780 | return -EINVAL; |
769 | } | 781 | } |
782 | if (flag && | ||
783 | dev->transport->get_write_cache) { | ||
784 | pr_err("emulate_fua_write not supported for this device\n"); | ||
785 | return -EINVAL; | ||
786 | } | ||
787 | if (dev->export_count) { | ||
788 | pr_err("emulate_fua_write cannot be changed with active" | ||
789 | " exports: %d\n", dev->export_count); | ||
790 | return -EINVAL; | ||
791 | } | ||
770 | dev->dev_attrib.emulate_fua_write = flag; | 792 | dev->dev_attrib.emulate_fua_write = flag; |
771 | pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", | 793 | pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", |
772 | dev, dev->dev_attrib.emulate_fua_write); | 794 | dev, dev->dev_attrib.emulate_fua_write); |
@@ -801,7 +823,11 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) | |||
801 | pr_err("emulate_write_cache not supported for this device\n"); | 823 | pr_err("emulate_write_cache not supported for this device\n"); |
802 | return -EINVAL; | 824 | return -EINVAL; |
803 | } | 825 | } |
804 | 826 | if (dev->export_count) { | |
827 | pr_err("emulate_write_cache cannot be changed with active" | ||
828 | " exports: %d\n", dev->export_count); | ||
829 | return -EINVAL; | ||
830 | } | ||
805 | dev->dev_attrib.emulate_write_cache = flag; | 831 | dev->dev_attrib.emulate_write_cache = flag; |
806 | pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", | 832 | pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", |
807 | dev, dev->dev_attrib.emulate_write_cache); | 833 | dev, dev->dev_attrib.emulate_write_cache); |
@@ -1534,8 +1560,6 @@ int target_configure_device(struct se_device *dev) | |||
1534 | ret = dev->transport->configure_device(dev); | 1560 | ret = dev->transport->configure_device(dev); |
1535 | if (ret) | 1561 | if (ret) |
1536 | goto out; | 1562 | goto out; |
1537 | dev->dev_flags |= DF_CONFIGURED; | ||
1538 | |||
1539 | /* | 1563 | /* |
1540 | * XXX: there is not much point to have two different values here.. | 1564 | * XXX: there is not much point to have two different values here.. |
1541 | */ | 1565 | */ |
@@ -1597,6 +1621,8 @@ int target_configure_device(struct se_device *dev) | |||
1597 | list_add_tail(&dev->g_dev_node, &g_device_list); | 1621 | list_add_tail(&dev->g_dev_node, &g_device_list); |
1598 | mutex_unlock(&g_device_mutex); | 1622 | mutex_unlock(&g_device_mutex); |
1599 | 1623 | ||
1624 | dev->dev_flags |= DF_CONFIGURED; | ||
1625 | |||
1600 | return 0; | 1626 | return 0; |
1601 | 1627 | ||
1602 | out_free_alua: | 1628 | out_free_alua: |
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 1045dcd7bf65..f6c954c4635f 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
@@ -1121,7 +1121,7 @@ static u32 pscsi_get_device_type(struct se_device *dev) | |||
1121 | struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); | 1121 | struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); |
1122 | struct scsi_device *sd = pdv->pdv_sd; | 1122 | struct scsi_device *sd = pdv->pdv_sd; |
1123 | 1123 | ||
1124 | return sd->type; | 1124 | return (sd) ? sd->type : TYPE_NO_LUN; |
1125 | } | 1125 | } |
1126 | 1126 | ||
1127 | static sector_t pscsi_get_blocks(struct se_device *dev) | 1127 | static sector_t pscsi_get_blocks(struct se_device *dev) |
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 9a2f9d3a6e70..3e7297411110 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c | |||
@@ -708,8 +708,7 @@ sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb) | |||
708 | } | 708 | } |
709 | } | 709 | } |
710 | if (cdb[1] & 0x8) { | 710 | if (cdb[1] & 0x8) { |
711 | if (!dev->dev_attrib.emulate_fua_write || | 711 | if (!dev->dev_attrib.emulate_fua_write || !se_dev_check_wce(dev)) { |
712 | !dev->dev_attrib.emulate_write_cache) { | ||
713 | pr_err("Got CDB: 0x%02x with FUA bit set, but device" | 712 | pr_err("Got CDB: 0x%02x with FUA bit set, but device" |
714 | " does not advertise support for FUA write\n", | 713 | " does not advertise support for FUA write\n", |
715 | cdb[0]); | 714 | cdb[0]); |
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 460e93109473..6c8bd6bc175c 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c | |||
@@ -454,19 +454,6 @@ check_scsi_name: | |||
454 | } | 454 | } |
455 | EXPORT_SYMBOL(spc_emulate_evpd_83); | 455 | EXPORT_SYMBOL(spc_emulate_evpd_83); |
456 | 456 | ||
457 | static bool | ||
458 | spc_check_dev_wce(struct se_device *dev) | ||
459 | { | ||
460 | bool wce = false; | ||
461 | |||
462 | if (dev->transport->get_write_cache) | ||
463 | wce = dev->transport->get_write_cache(dev); | ||
464 | else if (dev->dev_attrib.emulate_write_cache > 0) | ||
465 | wce = true; | ||
466 | |||
467 | return wce; | ||
468 | } | ||
469 | |||
470 | /* Extended INQUIRY Data VPD Page */ | 457 | /* Extended INQUIRY Data VPD Page */ |
471 | static sense_reason_t | 458 | static sense_reason_t |
472 | spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) | 459 | spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) |
@@ -490,7 +477,7 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) | |||
490 | buf[5] = 0x07; | 477 | buf[5] = 0x07; |
491 | 478 | ||
492 | /* If WriteCache emulation is enabled, set V_SUP */ | 479 | /* If WriteCache emulation is enabled, set V_SUP */ |
493 | if (spc_check_dev_wce(dev)) | 480 | if (se_dev_check_wce(dev)) |
494 | buf[6] = 0x01; | 481 | buf[6] = 0x01; |
495 | /* If an LBA map is present set R_SUP */ | 482 | /* If an LBA map is present set R_SUP */ |
496 | spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); | 483 | spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); |
@@ -897,7 +884,7 @@ static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p) | |||
897 | if (pc == 1) | 884 | if (pc == 1) |
898 | goto out; | 885 | goto out; |
899 | 886 | ||
900 | if (spc_check_dev_wce(dev)) | 887 | if (se_dev_check_wce(dev)) |
901 | p[2] = 0x04; /* Write Cache Enable */ | 888 | p[2] = 0x04; /* Write Cache Enable */ |
902 | p[12] = 0x20; /* Disabled Read Ahead */ | 889 | p[12] = 0x20; /* Disabled Read Ahead */ |
903 | 890 | ||
@@ -1009,7 +996,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) | |||
1009 | (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) | 996 | (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) |
1010 | spc_modesense_write_protect(&buf[length], type); | 997 | spc_modesense_write_protect(&buf[length], type); |
1011 | 998 | ||
1012 | if ((spc_check_dev_wce(dev)) && | 999 | if ((se_dev_check_wce(dev)) && |
1013 | (dev->dev_attrib.emulate_fua_write > 0)) | 1000 | (dev->dev_attrib.emulate_fua_write > 0)) |
1014 | spc_modesense_dpofua(&buf[length], type); | 1001 | spc_modesense_dpofua(&buf[length], type); |
1015 | 1002 | ||
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 0adc0f650213..ac3cbabdbdf0 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -2389,6 +2389,10 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, | |||
2389 | list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); | 2389 | list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); |
2390 | out: | 2390 | out: |
2391 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2391 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
2392 | |||
2393 | if (ret && ack_kref) | ||
2394 | target_put_sess_cmd(se_sess, se_cmd); | ||
2395 | |||
2392 | return ret; | 2396 | return ret; |
2393 | } | 2397 | } |
2394 | EXPORT_SYMBOL(target_get_sess_cmd); | 2398 | EXPORT_SYMBOL(target_get_sess_cmd); |
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 97b486c3dda1..583e755d8091 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c | |||
@@ -359,7 +359,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd) | |||
359 | ep = fc_seq_exch(seq); | 359 | ep = fc_seq_exch(seq); |
360 | if (ep) { | 360 | if (ep) { |
361 | lport = ep->lp; | 361 | lport = ep->lp; |
362 | if (lport && (ep->xid <= lport->lro_xid)) | 362 | if (lport && (ep->xid <= lport->lro_xid)) { |
363 | /* | 363 | /* |
364 | * "ddp_done" trigger invalidation of HW | 364 | * "ddp_done" trigger invalidation of HW |
365 | * specific DDP context | 365 | * specific DDP context |
@@ -374,6 +374,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd) | |||
374 | * identified using ep->xid) | 374 | * identified using ep->xid) |
375 | */ | 375 | */ |
376 | cmd->was_ddp_setup = 0; | 376 | cmd->was_ddp_setup = 0; |
377 | } | ||
377 | } | 378 | } |
378 | } | 379 | } |
379 | } | 380 | } |
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 2ab229ddee38..6ae5b8560e4d 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c | |||
@@ -119,7 +119,10 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value) | |||
119 | dw8250_force_idle(p); | 119 | dw8250_force_idle(p); |
120 | writeb(value, p->membase + (UART_LCR << p->regshift)); | 120 | writeb(value, p->membase + (UART_LCR << p->regshift)); |
121 | } | 121 | } |
122 | dev_err(p->dev, "Couldn't set LCR to %d\n", value); | 122 | /* |
123 | * FIXME: this deadlocks if port->lock is already held | ||
124 | * dev_err(p->dev, "Couldn't set LCR to %d\n", value); | ||
125 | */ | ||
123 | } | 126 | } |
124 | } | 127 | } |
125 | 128 | ||
@@ -163,7 +166,10 @@ static void dw8250_serial_outq(struct uart_port *p, int offset, int value) | |||
163 | __raw_writeq(value & 0xff, | 166 | __raw_writeq(value & 0xff, |
164 | p->membase + (UART_LCR << p->regshift)); | 167 | p->membase + (UART_LCR << p->regshift)); |
165 | } | 168 | } |
166 | dev_err(p->dev, "Couldn't set LCR to %d\n", value); | 169 | /* |
170 | * FIXME: this deadlocks if port->lock is already held | ||
171 | * dev_err(p->dev, "Couldn't set LCR to %d\n", value); | ||
172 | */ | ||
167 | } | 173 | } |
168 | } | 174 | } |
169 | #endif /* CONFIG_64BIT */ | 175 | #endif /* CONFIG_64BIT */ |
@@ -187,7 +193,10 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value) | |||
187 | dw8250_force_idle(p); | 193 | dw8250_force_idle(p); |
188 | writel(value, p->membase + (UART_LCR << p->regshift)); | 194 | writel(value, p->membase + (UART_LCR << p->regshift)); |
189 | } | 195 | } |
190 | dev_err(p->dev, "Couldn't set LCR to %d\n", value); | 196 | /* |
197 | * FIXME: this deadlocks if port->lock is already held | ||
198 | * dev_err(p->dev, "Couldn't set LCR to %d\n", value); | ||
199 | */ | ||
191 | } | 200 | } |
192 | } | 201 | } |
193 | 202 | ||
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index ff451048c1ac..4bfb7ac0239f 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c | |||
@@ -929,6 +929,13 @@ __acquires(hwep->lock) | |||
929 | return retval; | 929 | return retval; |
930 | } | 930 | } |
931 | 931 | ||
932 | static int otg_a_alt_hnp_support(struct ci_hdrc *ci) | ||
933 | { | ||
934 | dev_warn(&ci->gadget.dev, | ||
935 | "connect the device to an alternate port if you want HNP\n"); | ||
936 | return isr_setup_status_phase(ci); | ||
937 | } | ||
938 | |||
932 | /** | 939 | /** |
933 | * isr_setup_packet_handler: setup packet handler | 940 | * isr_setup_packet_handler: setup packet handler |
934 | * @ci: UDC descriptor | 941 | * @ci: UDC descriptor |
@@ -1061,6 +1068,10 @@ __acquires(ci->lock) | |||
1061 | ci); | 1068 | ci); |
1062 | } | 1069 | } |
1063 | break; | 1070 | break; |
1071 | case USB_DEVICE_A_ALT_HNP_SUPPORT: | ||
1072 | if (ci_otg_is_fsm_mode(ci)) | ||
1073 | err = otg_a_alt_hnp_support(ci); | ||
1074 | break; | ||
1064 | default: | 1075 | default: |
1065 | goto delegate; | 1076 | goto delegate; |
1066 | } | 1077 | } |
diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c index c6b35b77dab7..61d538aa2346 100644 --- a/drivers/usb/common/usb-otg-fsm.c +++ b/drivers/usb/common/usb-otg-fsm.c | |||
@@ -150,9 +150,9 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state) | |||
150 | break; | 150 | break; |
151 | case OTG_STATE_B_PERIPHERAL: | 151 | case OTG_STATE_B_PERIPHERAL: |
152 | otg_chrg_vbus(fsm, 0); | 152 | otg_chrg_vbus(fsm, 0); |
153 | otg_loc_conn(fsm, 1); | ||
154 | otg_loc_sof(fsm, 0); | 153 | otg_loc_sof(fsm, 0); |
155 | otg_set_protocol(fsm, PROTO_GADGET); | 154 | otg_set_protocol(fsm, PROTO_GADGET); |
155 | otg_loc_conn(fsm, 1); | ||
156 | break; | 156 | break; |
157 | case OTG_STATE_B_WAIT_ACON: | 157 | case OTG_STATE_B_WAIT_ACON: |
158 | otg_chrg_vbus(fsm, 0); | 158 | otg_chrg_vbus(fsm, 0); |
@@ -213,10 +213,10 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state) | |||
213 | 213 | ||
214 | break; | 214 | break; |
215 | case OTG_STATE_A_PERIPHERAL: | 215 | case OTG_STATE_A_PERIPHERAL: |
216 | otg_loc_conn(fsm, 1); | ||
217 | otg_loc_sof(fsm, 0); | 216 | otg_loc_sof(fsm, 0); |
218 | otg_set_protocol(fsm, PROTO_GADGET); | 217 | otg_set_protocol(fsm, PROTO_GADGET); |
219 | otg_drv_vbus(fsm, 1); | 218 | otg_drv_vbus(fsm, 1); |
219 | otg_loc_conn(fsm, 1); | ||
220 | otg_add_timer(fsm, A_BIDL_ADIS); | 220 | otg_add_timer(fsm, A_BIDL_ADIS); |
221 | break; | 221 | break; |
222 | case OTG_STATE_A_WAIT_VFALL: | 222 | case OTG_STATE_A_WAIT_VFALL: |
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c index 02e3e2d4ea56..6cf047878dba 100644 --- a/drivers/usb/dwc2/core_intr.c +++ b/drivers/usb/dwc2/core_intr.c | |||
@@ -377,6 +377,9 @@ static void dwc2_handle_disconnect_intr(struct dwc2_hsotg *hsotg) | |||
377 | dwc2_is_host_mode(hsotg) ? "Host" : "Device", | 377 | dwc2_is_host_mode(hsotg) ? "Host" : "Device", |
378 | dwc2_op_state_str(hsotg)); | 378 | dwc2_op_state_str(hsotg)); |
379 | 379 | ||
380 | if (hsotg->op_state == OTG_STATE_A_HOST) | ||
381 | dwc2_hcd_disconnect(hsotg); | ||
382 | |||
380 | /* Change to L3 (OFF) state */ | 383 | /* Change to L3 (OFF) state */ |
381 | hsotg->lx_state = DWC2_L3; | 384 | hsotg->lx_state = DWC2_L3; |
382 | 385 | ||
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c index 298b46112b1a..39f49f1ad22f 100644 --- a/drivers/usb/gadget/function/f_loopback.c +++ b/drivers/usb/gadget/function/f_loopback.c | |||
@@ -289,8 +289,7 @@ static void disable_loopback(struct f_loopback *loop) | |||
289 | struct usb_composite_dev *cdev; | 289 | struct usb_composite_dev *cdev; |
290 | 290 | ||
291 | cdev = loop->function.config->cdev; | 291 | cdev = loop->function.config->cdev; |
292 | disable_endpoints(cdev, loop->in_ep, loop->out_ep, NULL, NULL, NULL, | 292 | disable_endpoints(cdev, loop->in_ep, loop->out_ep, NULL, NULL); |
293 | NULL); | ||
294 | VDBG(cdev, "%s disabled\n", loop->function.name); | 293 | VDBG(cdev, "%s disabled\n", loop->function.name); |
295 | } | 294 | } |
296 | 295 | ||
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c index e3dae47baef3..3a5ae9900b1e 100644 --- a/drivers/usb/gadget/function/f_sourcesink.c +++ b/drivers/usb/gadget/function/f_sourcesink.c | |||
@@ -23,15 +23,6 @@ | |||
23 | #include "gadget_chips.h" | 23 | #include "gadget_chips.h" |
24 | #include "u_f.h" | 24 | #include "u_f.h" |
25 | 25 | ||
26 | #define USB_MS_TO_SS_INTERVAL(x) USB_MS_TO_HS_INTERVAL(x) | ||
27 | |||
28 | enum eptype { | ||
29 | EP_CONTROL = 0, | ||
30 | EP_BULK, | ||
31 | EP_ISOC, | ||
32 | EP_INTERRUPT, | ||
33 | }; | ||
34 | |||
35 | /* | 26 | /* |
36 | * SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral | 27 | * SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral |
37 | * controller drivers. | 28 | * controller drivers. |
@@ -64,8 +55,6 @@ struct f_sourcesink { | |||
64 | struct usb_ep *out_ep; | 55 | struct usb_ep *out_ep; |
65 | struct usb_ep *iso_in_ep; | 56 | struct usb_ep *iso_in_ep; |
66 | struct usb_ep *iso_out_ep; | 57 | struct usb_ep *iso_out_ep; |
67 | struct usb_ep *int_in_ep; | ||
68 | struct usb_ep *int_out_ep; | ||
69 | int cur_alt; | 58 | int cur_alt; |
70 | }; | 59 | }; |
71 | 60 | ||
@@ -79,10 +68,6 @@ static unsigned isoc_interval; | |||
79 | static unsigned isoc_maxpacket; | 68 | static unsigned isoc_maxpacket; |
80 | static unsigned isoc_mult; | 69 | static unsigned isoc_mult; |
81 | static unsigned isoc_maxburst; | 70 | static unsigned isoc_maxburst; |
82 | static unsigned int_interval; /* In ms */ | ||
83 | static unsigned int_maxpacket; | ||
84 | static unsigned int_mult; | ||
85 | static unsigned int_maxburst; | ||
86 | static unsigned buflen; | 71 | static unsigned buflen; |
87 | 72 | ||
88 | /*-------------------------------------------------------------------------*/ | 73 | /*-------------------------------------------------------------------------*/ |
@@ -107,16 +92,6 @@ static struct usb_interface_descriptor source_sink_intf_alt1 = { | |||
107 | /* .iInterface = DYNAMIC */ | 92 | /* .iInterface = DYNAMIC */ |
108 | }; | 93 | }; |
109 | 94 | ||
110 | static struct usb_interface_descriptor source_sink_intf_alt2 = { | ||
111 | .bLength = USB_DT_INTERFACE_SIZE, | ||
112 | .bDescriptorType = USB_DT_INTERFACE, | ||
113 | |||
114 | .bAlternateSetting = 2, | ||
115 | .bNumEndpoints = 2, | ||
116 | .bInterfaceClass = USB_CLASS_VENDOR_SPEC, | ||
117 | /* .iInterface = DYNAMIC */ | ||
118 | }; | ||
119 | |||
120 | /* full speed support: */ | 95 | /* full speed support: */ |
121 | 96 | ||
122 | static struct usb_endpoint_descriptor fs_source_desc = { | 97 | static struct usb_endpoint_descriptor fs_source_desc = { |
@@ -155,26 +130,6 @@ static struct usb_endpoint_descriptor fs_iso_sink_desc = { | |||
155 | .bInterval = 4, | 130 | .bInterval = 4, |
156 | }; | 131 | }; |
157 | 132 | ||
158 | static struct usb_endpoint_descriptor fs_int_source_desc = { | ||
159 | .bLength = USB_DT_ENDPOINT_SIZE, | ||
160 | .bDescriptorType = USB_DT_ENDPOINT, | ||
161 | |||
162 | .bEndpointAddress = USB_DIR_IN, | ||
163 | .bmAttributes = USB_ENDPOINT_XFER_INT, | ||
164 | .wMaxPacketSize = cpu_to_le16(64), | ||
165 | .bInterval = GZERO_INT_INTERVAL, | ||
166 | }; | ||
167 | |||
168 | static struct usb_endpoint_descriptor fs_int_sink_desc = { | ||
169 | .bLength = USB_DT_ENDPOINT_SIZE, | ||
170 | .bDescriptorType = USB_DT_ENDPOINT, | ||
171 | |||
172 | .bEndpointAddress = USB_DIR_OUT, | ||
173 | .bmAttributes = USB_ENDPOINT_XFER_INT, | ||
174 | .wMaxPacketSize = cpu_to_le16(64), | ||
175 | .bInterval = GZERO_INT_INTERVAL, | ||
176 | }; | ||
177 | |||
178 | static struct usb_descriptor_header *fs_source_sink_descs[] = { | 133 | static struct usb_descriptor_header *fs_source_sink_descs[] = { |
179 | (struct usb_descriptor_header *) &source_sink_intf_alt0, | 134 | (struct usb_descriptor_header *) &source_sink_intf_alt0, |
180 | (struct usb_descriptor_header *) &fs_sink_desc, | 135 | (struct usb_descriptor_header *) &fs_sink_desc, |
@@ -185,10 +140,6 @@ static struct usb_descriptor_header *fs_source_sink_descs[] = { | |||
185 | (struct usb_descriptor_header *) &fs_source_desc, | 140 | (struct usb_descriptor_header *) &fs_source_desc, |
186 | (struct usb_descriptor_header *) &fs_iso_sink_desc, | 141 | (struct usb_descriptor_header *) &fs_iso_sink_desc, |
187 | (struct usb_descriptor_header *) &fs_iso_source_desc, | 142 | (struct usb_descriptor_header *) &fs_iso_source_desc, |
188 | (struct usb_descriptor_header *) &source_sink_intf_alt2, | ||
189 | #define FS_ALT_IFC_2_OFFSET 8 | ||
190 | (struct usb_descriptor_header *) &fs_int_sink_desc, | ||
191 | (struct usb_descriptor_header *) &fs_int_source_desc, | ||
192 | NULL, | 143 | NULL, |
193 | }; | 144 | }; |
194 | 145 | ||
@@ -228,24 +179,6 @@ static struct usb_endpoint_descriptor hs_iso_sink_desc = { | |||
228 | .bInterval = 4, | 179 | .bInterval = 4, |
229 | }; | 180 | }; |
230 | 181 | ||
231 | static struct usb_endpoint_descriptor hs_int_source_desc = { | ||
232 | .bLength = USB_DT_ENDPOINT_SIZE, | ||
233 | .bDescriptorType = USB_DT_ENDPOINT, | ||
234 | |||
235 | .bmAttributes = USB_ENDPOINT_XFER_INT, | ||
236 | .wMaxPacketSize = cpu_to_le16(1024), | ||
237 | .bInterval = USB_MS_TO_HS_INTERVAL(GZERO_INT_INTERVAL), | ||
238 | }; | ||
239 | |||
240 | static struct usb_endpoint_descriptor hs_int_sink_desc = { | ||
241 | .bLength = USB_DT_ENDPOINT_SIZE, | ||
242 | .bDescriptorType = USB_DT_ENDPOINT, | ||
243 | |||
244 | .bmAttributes = USB_ENDPOINT_XFER_INT, | ||
245 | .wMaxPacketSize = cpu_to_le16(1024), | ||
246 | .bInterval = USB_MS_TO_HS_INTERVAL(GZERO_INT_INTERVAL), | ||
247 | }; | ||
248 | |||
249 | static struct usb_descriptor_header *hs_source_sink_descs[] = { | 182 | static struct usb_descriptor_header *hs_source_sink_descs[] = { |
250 | (struct usb_descriptor_header *) &source_sink_intf_alt0, | 183 | (struct usb_descriptor_header *) &source_sink_intf_alt0, |
251 | (struct usb_descriptor_header *) &hs_source_desc, | 184 | (struct usb_descriptor_header *) &hs_source_desc, |
@@ -256,10 +189,6 @@ static struct usb_descriptor_header *hs_source_sink_descs[] = { | |||
256 | (struct usb_descriptor_header *) &hs_sink_desc, | 189 | (struct usb_descriptor_header *) &hs_sink_desc, |
257 | (struct usb_descriptor_header *) &hs_iso_source_desc, | 190 | (struct usb_descriptor_header *) &hs_iso_source_desc, |
258 | (struct usb_descriptor_header *) &hs_iso_sink_desc, | 191 | (struct usb_descriptor_header *) &hs_iso_sink_desc, |
259 | (struct usb_descriptor_header *) &source_sink_intf_alt2, | ||
260 | #define HS_ALT_IFC_2_OFFSET 8 | ||
261 | (struct usb_descriptor_header *) &hs_int_source_desc, | ||
262 | (struct usb_descriptor_header *) &hs_int_sink_desc, | ||
263 | NULL, | 192 | NULL, |
264 | }; | 193 | }; |
265 | 194 | ||
@@ -335,42 +264,6 @@ static struct usb_ss_ep_comp_descriptor ss_iso_sink_comp_desc = { | |||
335 | .wBytesPerInterval = cpu_to_le16(1024), | 264 | .wBytesPerInterval = cpu_to_le16(1024), |
336 | }; | 265 | }; |
337 | 266 | ||
338 | static struct usb_endpoint_descriptor ss_int_source_desc = { | ||
339 | .bLength = USB_DT_ENDPOINT_SIZE, | ||
340 | .bDescriptorType = USB_DT_ENDPOINT, | ||
341 | |||
342 | .bmAttributes = USB_ENDPOINT_XFER_INT, | ||
343 | .wMaxPacketSize = cpu_to_le16(1024), | ||
344 | .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL), | ||
345 | }; | ||
346 | |||
347 | static struct usb_ss_ep_comp_descriptor ss_int_source_comp_desc = { | ||
348 | .bLength = USB_DT_SS_EP_COMP_SIZE, | ||
349 | .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, | ||
350 | |||
351 | .bMaxBurst = 0, | ||
352 | .bmAttributes = 0, | ||
353 | .wBytesPerInterval = cpu_to_le16(1024), | ||
354 | }; | ||
355 | |||
356 | static struct usb_endpoint_descriptor ss_int_sink_desc = { | ||
357 | .bLength = USB_DT_ENDPOINT_SIZE, | ||
358 | .bDescriptorType = USB_DT_ENDPOINT, | ||
359 | |||
360 | .bmAttributes = USB_ENDPOINT_XFER_INT, | ||
361 | .wMaxPacketSize = cpu_to_le16(1024), | ||
362 | .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL), | ||
363 | }; | ||
364 | |||
365 | static struct usb_ss_ep_comp_descriptor ss_int_sink_comp_desc = { | ||
366 | .bLength = USB_DT_SS_EP_COMP_SIZE, | ||
367 | .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, | ||
368 | |||
369 | .bMaxBurst = 0, | ||
370 | .bmAttributes = 0, | ||
371 | .wBytesPerInterval = cpu_to_le16(1024), | ||
372 | }; | ||
373 | |||
374 | static struct usb_descriptor_header *ss_source_sink_descs[] = { | 267 | static struct usb_descriptor_header *ss_source_sink_descs[] = { |
375 | (struct usb_descriptor_header *) &source_sink_intf_alt0, | 268 | (struct usb_descriptor_header *) &source_sink_intf_alt0, |
376 | (struct usb_descriptor_header *) &ss_source_desc, | 269 | (struct usb_descriptor_header *) &ss_source_desc, |
@@ -387,12 +280,6 @@ static struct usb_descriptor_header *ss_source_sink_descs[] = { | |||
387 | (struct usb_descriptor_header *) &ss_iso_source_comp_desc, | 280 | (struct usb_descriptor_header *) &ss_iso_source_comp_desc, |
388 | (struct usb_descriptor_header *) &ss_iso_sink_desc, | 281 | (struct usb_descriptor_header *) &ss_iso_sink_desc, |
389 | (struct usb_descriptor_header *) &ss_iso_sink_comp_desc, | 282 | (struct usb_descriptor_header *) &ss_iso_sink_comp_desc, |
390 | (struct usb_descriptor_header *) &source_sink_intf_alt2, | ||
391 | #define SS_ALT_IFC_2_OFFSET 14 | ||
392 | (struct usb_descriptor_header *) &ss_int_source_desc, | ||
393 | (struct usb_descriptor_header *) &ss_int_source_comp_desc, | ||
394 | (struct usb_descriptor_header *) &ss_int_sink_desc, | ||
395 | (struct usb_descriptor_header *) &ss_int_sink_comp_desc, | ||
396 | NULL, | 283 | NULL, |
397 | }; | 284 | }; |
398 | 285 | ||
@@ -414,21 +301,6 @@ static struct usb_gadget_strings *sourcesink_strings[] = { | |||
414 | }; | 301 | }; |
415 | 302 | ||
416 | /*-------------------------------------------------------------------------*/ | 303 | /*-------------------------------------------------------------------------*/ |
417 | static const char *get_ep_string(enum eptype ep_type) | ||
418 | { | ||
419 | switch (ep_type) { | ||
420 | case EP_ISOC: | ||
421 | return "ISOC-"; | ||
422 | case EP_INTERRUPT: | ||
423 | return "INTERRUPT-"; | ||
424 | case EP_CONTROL: | ||
425 | return "CTRL-"; | ||
426 | case EP_BULK: | ||
427 | return "BULK-"; | ||
428 | default: | ||
429 | return "UNKNOWN-"; | ||
430 | } | ||
431 | } | ||
432 | 304 | ||
433 | static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len) | 305 | static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len) |
434 | { | 306 | { |
@@ -456,8 +328,7 @@ static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep) | |||
456 | 328 | ||
457 | void disable_endpoints(struct usb_composite_dev *cdev, | 329 | void disable_endpoints(struct usb_composite_dev *cdev, |
458 | struct usb_ep *in, struct usb_ep *out, | 330 | struct usb_ep *in, struct usb_ep *out, |
459 | struct usb_ep *iso_in, struct usb_ep *iso_out, | 331 | struct usb_ep *iso_in, struct usb_ep *iso_out) |
460 | struct usb_ep *int_in, struct usb_ep *int_out) | ||
461 | { | 332 | { |
462 | disable_ep(cdev, in); | 333 | disable_ep(cdev, in); |
463 | disable_ep(cdev, out); | 334 | disable_ep(cdev, out); |
@@ -465,10 +336,6 @@ void disable_endpoints(struct usb_composite_dev *cdev, | |||
465 | disable_ep(cdev, iso_in); | 336 | disable_ep(cdev, iso_in); |
466 | if (iso_out) | 337 | if (iso_out) |
467 | disable_ep(cdev, iso_out); | 338 | disable_ep(cdev, iso_out); |
468 | if (int_in) | ||
469 | disable_ep(cdev, int_in); | ||
470 | if (int_out) | ||
471 | disable_ep(cdev, int_out); | ||
472 | } | 339 | } |
473 | 340 | ||
474 | static int | 341 | static int |
@@ -485,7 +352,6 @@ sourcesink_bind(struct usb_configuration *c, struct usb_function *f) | |||
485 | return id; | 352 | return id; |
486 | source_sink_intf_alt0.bInterfaceNumber = id; | 353 | source_sink_intf_alt0.bInterfaceNumber = id; |
487 | source_sink_intf_alt1.bInterfaceNumber = id; | 354 | source_sink_intf_alt1.bInterfaceNumber = id; |
488 | source_sink_intf_alt2.bInterfaceNumber = id; | ||
489 | 355 | ||
490 | /* allocate bulk endpoints */ | 356 | /* allocate bulk endpoints */ |
491 | ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc); | 357 | ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc); |
@@ -546,55 +412,14 @@ no_iso: | |||
546 | if (isoc_maxpacket > 1024) | 412 | if (isoc_maxpacket > 1024) |
547 | isoc_maxpacket = 1024; | 413 | isoc_maxpacket = 1024; |
548 | 414 | ||
549 | /* sanity check the interrupt module parameters */ | ||
550 | if (int_interval < 1) | ||
551 | int_interval = 1; | ||
552 | if (int_interval > 4096) | ||
553 | int_interval = 4096; | ||
554 | if (int_mult > 2) | ||
555 | int_mult = 2; | ||
556 | if (int_maxburst > 15) | ||
557 | int_maxburst = 15; | ||
558 | |||
559 | /* fill in the FS interrupt descriptors from the module parameters */ | ||
560 | fs_int_source_desc.wMaxPacketSize = int_maxpacket > 64 ? | ||
561 | 64 : int_maxpacket; | ||
562 | fs_int_source_desc.bInterval = int_interval > 255 ? | ||
563 | 255 : int_interval; | ||
564 | fs_int_sink_desc.wMaxPacketSize = int_maxpacket > 64 ? | ||
565 | 64 : int_maxpacket; | ||
566 | fs_int_sink_desc.bInterval = int_interval > 255 ? | ||
567 | 255 : int_interval; | ||
568 | |||
569 | /* allocate int endpoints */ | ||
570 | ss->int_in_ep = usb_ep_autoconfig(cdev->gadget, &fs_int_source_desc); | ||
571 | if (!ss->int_in_ep) | ||
572 | goto no_int; | ||
573 | ss->int_in_ep->driver_data = cdev; /* claim */ | ||
574 | |||
575 | ss->int_out_ep = usb_ep_autoconfig(cdev->gadget, &fs_int_sink_desc); | ||
576 | if (ss->int_out_ep) { | ||
577 | ss->int_out_ep->driver_data = cdev; /* claim */ | ||
578 | } else { | ||
579 | ss->int_in_ep->driver_data = NULL; | ||
580 | ss->int_in_ep = NULL; | ||
581 | no_int: | ||
582 | fs_source_sink_descs[FS_ALT_IFC_2_OFFSET] = NULL; | ||
583 | hs_source_sink_descs[HS_ALT_IFC_2_OFFSET] = NULL; | ||
584 | ss_source_sink_descs[SS_ALT_IFC_2_OFFSET] = NULL; | ||
585 | } | ||
586 | |||
587 | if (int_maxpacket > 1024) | ||
588 | int_maxpacket = 1024; | ||
589 | |||
590 | /* support high speed hardware */ | 415 | /* support high speed hardware */ |
591 | hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress; | 416 | hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress; |
592 | hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress; | 417 | hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress; |
593 | 418 | ||
594 | /* | 419 | /* |
595 | * Fill in the HS isoc and interrupt descriptors from the module | 420 | * Fill in the HS isoc descriptors from the module parameters. |
596 | * parameters. We assume that the user knows what they are doing and | 421 | * We assume that the user knows what they are doing and won't |
597 | * won't give parameters that their UDC doesn't support. | 422 | * give parameters that their UDC doesn't support. |
598 | */ | 423 | */ |
599 | hs_iso_source_desc.wMaxPacketSize = isoc_maxpacket; | 424 | hs_iso_source_desc.wMaxPacketSize = isoc_maxpacket; |
600 | hs_iso_source_desc.wMaxPacketSize |= isoc_mult << 11; | 425 | hs_iso_source_desc.wMaxPacketSize |= isoc_mult << 11; |
@@ -607,17 +432,6 @@ no_int: | |||
607 | hs_iso_sink_desc.bInterval = isoc_interval; | 432 | hs_iso_sink_desc.bInterval = isoc_interval; |
608 | hs_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; | 433 | hs_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; |
609 | 434 | ||
610 | hs_int_source_desc.wMaxPacketSize = int_maxpacket; | ||
611 | hs_int_source_desc.wMaxPacketSize |= int_mult << 11; | ||
612 | hs_int_source_desc.bInterval = USB_MS_TO_HS_INTERVAL(int_interval); | ||
613 | hs_int_source_desc.bEndpointAddress = | ||
614 | fs_int_source_desc.bEndpointAddress; | ||
615 | |||
616 | hs_int_sink_desc.wMaxPacketSize = int_maxpacket; | ||
617 | hs_int_sink_desc.wMaxPacketSize |= int_mult << 11; | ||
618 | hs_int_sink_desc.bInterval = USB_MS_TO_HS_INTERVAL(int_interval); | ||
619 | hs_int_sink_desc.bEndpointAddress = fs_int_sink_desc.bEndpointAddress; | ||
620 | |||
621 | /* support super speed hardware */ | 435 | /* support super speed hardware */ |
622 | ss_source_desc.bEndpointAddress = | 436 | ss_source_desc.bEndpointAddress = |
623 | fs_source_desc.bEndpointAddress; | 437 | fs_source_desc.bEndpointAddress; |
@@ -625,9 +439,9 @@ no_int: | |||
625 | fs_sink_desc.bEndpointAddress; | 439 | fs_sink_desc.bEndpointAddress; |
626 | 440 | ||
627 | /* | 441 | /* |
628 | * Fill in the SS isoc and interrupt descriptors from the module | 442 | * Fill in the SS isoc descriptors from the module parameters. |
629 | * parameters. We assume that the user knows what they are doing and | 443 | * We assume that the user knows what they are doing and won't |
630 | * won't give parameters that their UDC doesn't support. | 444 | * give parameters that their UDC doesn't support. |
631 | */ | 445 | */ |
632 | ss_iso_source_desc.wMaxPacketSize = isoc_maxpacket; | 446 | ss_iso_source_desc.wMaxPacketSize = isoc_maxpacket; |
633 | ss_iso_source_desc.bInterval = isoc_interval; | 447 | ss_iso_source_desc.bInterval = isoc_interval; |
@@ -646,37 +460,17 @@ no_int: | |||
646 | isoc_maxpacket * (isoc_mult + 1) * (isoc_maxburst + 1); | 460 | isoc_maxpacket * (isoc_mult + 1) * (isoc_maxburst + 1); |
647 | ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; | 461 | ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; |
648 | 462 | ||
649 | ss_int_source_desc.wMaxPacketSize = int_maxpacket; | ||
650 | ss_int_source_desc.bInterval = USB_MS_TO_SS_INTERVAL(int_interval); | ||
651 | ss_int_source_comp_desc.bmAttributes = int_mult; | ||
652 | ss_int_source_comp_desc.bMaxBurst = int_maxburst; | ||
653 | ss_int_source_comp_desc.wBytesPerInterval = | ||
654 | int_maxpacket * (int_mult + 1) * (int_maxburst + 1); | ||
655 | ss_int_source_desc.bEndpointAddress = | ||
656 | fs_int_source_desc.bEndpointAddress; | ||
657 | |||
658 | ss_int_sink_desc.wMaxPacketSize = int_maxpacket; | ||
659 | ss_int_sink_desc.bInterval = USB_MS_TO_SS_INTERVAL(int_interval); | ||
660 | ss_int_sink_comp_desc.bmAttributes = int_mult; | ||
661 | ss_int_sink_comp_desc.bMaxBurst = int_maxburst; | ||
662 | ss_int_sink_comp_desc.wBytesPerInterval = | ||
663 | int_maxpacket * (int_mult + 1) * (int_maxburst + 1); | ||
664 | ss_int_sink_desc.bEndpointAddress = fs_int_sink_desc.bEndpointAddress; | ||
665 | |||
666 | ret = usb_assign_descriptors(f, fs_source_sink_descs, | 463 | ret = usb_assign_descriptors(f, fs_source_sink_descs, |
667 | hs_source_sink_descs, ss_source_sink_descs); | 464 | hs_source_sink_descs, ss_source_sink_descs); |
668 | if (ret) | 465 | if (ret) |
669 | return ret; | 466 | return ret; |
670 | 467 | ||
671 | DBG(cdev, "%s speed %s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s, " | 468 | DBG(cdev, "%s speed %s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s\n", |
672 | "INT-IN/%s, INT-OUT/%s\n", | ||
673 | (gadget_is_superspeed(c->cdev->gadget) ? "super" : | 469 | (gadget_is_superspeed(c->cdev->gadget) ? "super" : |
674 | (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")), | 470 | (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")), |
675 | f->name, ss->in_ep->name, ss->out_ep->name, | 471 | f->name, ss->in_ep->name, ss->out_ep->name, |
676 | ss->iso_in_ep ? ss->iso_in_ep->name : "<none>", | 472 | ss->iso_in_ep ? ss->iso_in_ep->name : "<none>", |
677 | ss->iso_out_ep ? ss->iso_out_ep->name : "<none>", | 473 | ss->iso_out_ep ? ss->iso_out_ep->name : "<none>"); |
678 | ss->int_in_ep ? ss->int_in_ep->name : "<none>", | ||
679 | ss->int_out_ep ? ss->int_out_ep->name : "<none>"); | ||
680 | return 0; | 474 | return 0; |
681 | } | 475 | } |
682 | 476 | ||
@@ -807,15 +601,14 @@ static void source_sink_complete(struct usb_ep *ep, struct usb_request *req) | |||
807 | } | 601 | } |
808 | 602 | ||
809 | static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, | 603 | static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, |
810 | enum eptype ep_type, int speed) | 604 | bool is_iso, int speed) |
811 | { | 605 | { |
812 | struct usb_ep *ep; | 606 | struct usb_ep *ep; |
813 | struct usb_request *req; | 607 | struct usb_request *req; |
814 | int i, size, status; | 608 | int i, size, status; |
815 | 609 | ||
816 | for (i = 0; i < 8; i++) { | 610 | for (i = 0; i < 8; i++) { |
817 | switch (ep_type) { | 611 | if (is_iso) { |
818 | case EP_ISOC: | ||
819 | switch (speed) { | 612 | switch (speed) { |
820 | case USB_SPEED_SUPER: | 613 | case USB_SPEED_SUPER: |
821 | size = isoc_maxpacket * (isoc_mult + 1) * | 614 | size = isoc_maxpacket * (isoc_mult + 1) * |
@@ -831,28 +624,9 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, | |||
831 | } | 624 | } |
832 | ep = is_in ? ss->iso_in_ep : ss->iso_out_ep; | 625 | ep = is_in ? ss->iso_in_ep : ss->iso_out_ep; |
833 | req = ss_alloc_ep_req(ep, size); | 626 | req = ss_alloc_ep_req(ep, size); |
834 | break; | 627 | } else { |
835 | case EP_INTERRUPT: | ||
836 | switch (speed) { | ||
837 | case USB_SPEED_SUPER: | ||
838 | size = int_maxpacket * (int_mult + 1) * | ||
839 | (int_maxburst + 1); | ||
840 | break; | ||
841 | case USB_SPEED_HIGH: | ||
842 | size = int_maxpacket * (int_mult + 1); | ||
843 | break; | ||
844 | default: | ||
845 | size = int_maxpacket > 1023 ? | ||
846 | 1023 : int_maxpacket; | ||
847 | break; | ||
848 | } | ||
849 | ep = is_in ? ss->int_in_ep : ss->int_out_ep; | ||
850 | req = ss_alloc_ep_req(ep, size); | ||
851 | break; | ||
852 | default: | ||
853 | ep = is_in ? ss->in_ep : ss->out_ep; | 628 | ep = is_in ? ss->in_ep : ss->out_ep; |
854 | req = ss_alloc_ep_req(ep, 0); | 629 | req = ss_alloc_ep_req(ep, 0); |
855 | break; | ||
856 | } | 630 | } |
857 | 631 | ||
858 | if (!req) | 632 | if (!req) |
@@ -870,12 +644,12 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, | |||
870 | 644 | ||
871 | cdev = ss->function.config->cdev; | 645 | cdev = ss->function.config->cdev; |
872 | ERROR(cdev, "start %s%s %s --> %d\n", | 646 | ERROR(cdev, "start %s%s %s --> %d\n", |
873 | get_ep_string(ep_type), is_in ? "IN" : "OUT", | 647 | is_iso ? "ISO-" : "", is_in ? "IN" : "OUT", |
874 | ep->name, status); | 648 | ep->name, status); |
875 | free_ep_req(ep, req); | 649 | free_ep_req(ep, req); |
876 | } | 650 | } |
877 | 651 | ||
878 | if (!(ep_type == EP_ISOC)) | 652 | if (!is_iso) |
879 | break; | 653 | break; |
880 | } | 654 | } |
881 | 655 | ||
@@ -888,7 +662,7 @@ static void disable_source_sink(struct f_sourcesink *ss) | |||
888 | 662 | ||
889 | cdev = ss->function.config->cdev; | 663 | cdev = ss->function.config->cdev; |
890 | disable_endpoints(cdev, ss->in_ep, ss->out_ep, ss->iso_in_ep, | 664 | disable_endpoints(cdev, ss->in_ep, ss->out_ep, ss->iso_in_ep, |
891 | ss->iso_out_ep, ss->int_in_ep, ss->int_out_ep); | 665 | ss->iso_out_ep); |
892 | VDBG(cdev, "%s disabled\n", ss->function.name); | 666 | VDBG(cdev, "%s disabled\n", ss->function.name); |
893 | } | 667 | } |
894 | 668 | ||
@@ -900,62 +674,6 @@ enable_source_sink(struct usb_composite_dev *cdev, struct f_sourcesink *ss, | |||
900 | int speed = cdev->gadget->speed; | 674 | int speed = cdev->gadget->speed; |
901 | struct usb_ep *ep; | 675 | struct usb_ep *ep; |
902 | 676 | ||
903 | if (alt == 2) { | ||
904 | /* Configure for periodic interrupt endpoint */ | ||
905 | ep = ss->int_in_ep; | ||
906 | if (ep) { | ||
907 | result = config_ep_by_speed(cdev->gadget, | ||
908 | &(ss->function), ep); | ||
909 | if (result) | ||
910 | return result; | ||
911 | |||
912 | result = usb_ep_enable(ep); | ||
913 | if (result < 0) | ||
914 | return result; | ||
915 | |||
916 | ep->driver_data = ss; | ||
917 | result = source_sink_start_ep(ss, true, EP_INTERRUPT, | ||
918 | speed); | ||
919 | if (result < 0) { | ||
920 | fail1: | ||
921 | ep = ss->int_in_ep; | ||
922 | if (ep) { | ||
923 | usb_ep_disable(ep); | ||
924 | ep->driver_data = NULL; | ||
925 | } | ||
926 | return result; | ||
927 | } | ||
928 | } | ||
929 | |||
930 | /* | ||
931 | * one interrupt endpoint reads (sinks) anything OUT (from the | ||
932 | * host) | ||
933 | */ | ||
934 | ep = ss->int_out_ep; | ||
935 | if (ep) { | ||
936 | result = config_ep_by_speed(cdev->gadget, | ||
937 | &(ss->function), ep); | ||
938 | if (result) | ||
939 | goto fail1; | ||
940 | |||
941 | result = usb_ep_enable(ep); | ||
942 | if (result < 0) | ||
943 | goto fail1; | ||
944 | |||
945 | ep->driver_data = ss; | ||
946 | result = source_sink_start_ep(ss, false, EP_INTERRUPT, | ||
947 | speed); | ||
948 | if (result < 0) { | ||
949 | ep = ss->int_out_ep; | ||
950 | usb_ep_disable(ep); | ||
951 | ep->driver_data = NULL; | ||
952 | goto fail1; | ||
953 | } | ||
954 | } | ||
955 | |||
956 | goto out; | ||
957 | } | ||
958 | |||
959 | /* one bulk endpoint writes (sources) zeroes IN (to the host) */ | 677 | /* one bulk endpoint writes (sources) zeroes IN (to the host) */ |
960 | ep = ss->in_ep; | 678 | ep = ss->in_ep; |
961 | result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); | 679 | result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); |
@@ -966,7 +684,7 @@ fail1: | |||
966 | return result; | 684 | return result; |
967 | ep->driver_data = ss; | 685 | ep->driver_data = ss; |
968 | 686 | ||
969 | result = source_sink_start_ep(ss, true, EP_BULK, speed); | 687 | result = source_sink_start_ep(ss, true, false, speed); |
970 | if (result < 0) { | 688 | if (result < 0) { |
971 | fail: | 689 | fail: |
972 | ep = ss->in_ep; | 690 | ep = ss->in_ep; |
@@ -985,7 +703,7 @@ fail: | |||
985 | goto fail; | 703 | goto fail; |
986 | ep->driver_data = ss; | 704 | ep->driver_data = ss; |
987 | 705 | ||
988 | result = source_sink_start_ep(ss, false, EP_BULK, speed); | 706 | result = source_sink_start_ep(ss, false, false, speed); |
989 | if (result < 0) { | 707 | if (result < 0) { |
990 | fail2: | 708 | fail2: |
991 | ep = ss->out_ep; | 709 | ep = ss->out_ep; |
@@ -1008,7 +726,7 @@ fail2: | |||
1008 | goto fail2; | 726 | goto fail2; |
1009 | ep->driver_data = ss; | 727 | ep->driver_data = ss; |
1010 | 728 | ||
1011 | result = source_sink_start_ep(ss, true, EP_ISOC, speed); | 729 | result = source_sink_start_ep(ss, true, true, speed); |
1012 | if (result < 0) { | 730 | if (result < 0) { |
1013 | fail3: | 731 | fail3: |
1014 | ep = ss->iso_in_ep; | 732 | ep = ss->iso_in_ep; |
@@ -1031,14 +749,13 @@ fail3: | |||
1031 | goto fail3; | 749 | goto fail3; |
1032 | ep->driver_data = ss; | 750 | ep->driver_data = ss; |
1033 | 751 | ||
1034 | result = source_sink_start_ep(ss, false, EP_ISOC, speed); | 752 | result = source_sink_start_ep(ss, false, true, speed); |
1035 | if (result < 0) { | 753 | if (result < 0) { |
1036 | usb_ep_disable(ep); | 754 | usb_ep_disable(ep); |
1037 | ep->driver_data = NULL; | 755 | ep->driver_data = NULL; |
1038 | goto fail3; | 756 | goto fail3; |
1039 | } | 757 | } |
1040 | } | 758 | } |
1041 | |||
1042 | out: | 759 | out: |
1043 | ss->cur_alt = alt; | 760 | ss->cur_alt = alt; |
1044 | 761 | ||
@@ -1054,8 +771,6 @@ static int sourcesink_set_alt(struct usb_function *f, | |||
1054 | 771 | ||
1055 | if (ss->in_ep->driver_data) | 772 | if (ss->in_ep->driver_data) |
1056 | disable_source_sink(ss); | 773 | disable_source_sink(ss); |
1057 | else if (alt == 2 && ss->int_in_ep->driver_data) | ||
1058 | disable_source_sink(ss); | ||
1059 | return enable_source_sink(cdev, ss, alt); | 774 | return enable_source_sink(cdev, ss, alt); |
1060 | } | 775 | } |
1061 | 776 | ||
@@ -1168,10 +883,6 @@ static struct usb_function *source_sink_alloc_func( | |||
1168 | isoc_maxpacket = ss_opts->isoc_maxpacket; | 883 | isoc_maxpacket = ss_opts->isoc_maxpacket; |
1169 | isoc_mult = ss_opts->isoc_mult; | 884 | isoc_mult = ss_opts->isoc_mult; |
1170 | isoc_maxburst = ss_opts->isoc_maxburst; | 885 | isoc_maxburst = ss_opts->isoc_maxburst; |
1171 | int_interval = ss_opts->int_interval; | ||
1172 | int_maxpacket = ss_opts->int_maxpacket; | ||
1173 | int_mult = ss_opts->int_mult; | ||
1174 | int_maxburst = ss_opts->int_maxburst; | ||
1175 | buflen = ss_opts->bulk_buflen; | 886 | buflen = ss_opts->bulk_buflen; |
1176 | 887 | ||
1177 | ss->function.name = "source/sink"; | 888 | ss->function.name = "source/sink"; |
@@ -1468,182 +1179,6 @@ static struct f_ss_opts_attribute f_ss_opts_bulk_buflen = | |||
1468 | f_ss_opts_bulk_buflen_show, | 1179 | f_ss_opts_bulk_buflen_show, |
1469 | f_ss_opts_bulk_buflen_store); | 1180 | f_ss_opts_bulk_buflen_store); |
1470 | 1181 | ||
1471 | static ssize_t f_ss_opts_int_interval_show(struct f_ss_opts *opts, char *page) | ||
1472 | { | ||
1473 | int result; | ||
1474 | |||
1475 | mutex_lock(&opts->lock); | ||
1476 | result = sprintf(page, "%u", opts->int_interval); | ||
1477 | mutex_unlock(&opts->lock); | ||
1478 | |||
1479 | return result; | ||
1480 | } | ||
1481 | |||
1482 | static ssize_t f_ss_opts_int_interval_store(struct f_ss_opts *opts, | ||
1483 | const char *page, size_t len) | ||
1484 | { | ||
1485 | int ret; | ||
1486 | u32 num; | ||
1487 | |||
1488 | mutex_lock(&opts->lock); | ||
1489 | if (opts->refcnt) { | ||
1490 | ret = -EBUSY; | ||
1491 | goto end; | ||
1492 | } | ||
1493 | |||
1494 | ret = kstrtou32(page, 0, &num); | ||
1495 | if (ret) | ||
1496 | goto end; | ||
1497 | |||
1498 | if (num > 4096) { | ||
1499 | ret = -EINVAL; | ||
1500 | goto end; | ||
1501 | } | ||
1502 | |||
1503 | opts->int_interval = num; | ||
1504 | ret = len; | ||
1505 | end: | ||
1506 | mutex_unlock(&opts->lock); | ||
1507 | return ret; | ||
1508 | } | ||
1509 | |||
1510 | static struct f_ss_opts_attribute f_ss_opts_int_interval = | ||
1511 | __CONFIGFS_ATTR(int_interval, S_IRUGO | S_IWUSR, | ||
1512 | f_ss_opts_int_interval_show, | ||
1513 | f_ss_opts_int_interval_store); | ||
1514 | |||
1515 | static ssize_t f_ss_opts_int_maxpacket_show(struct f_ss_opts *opts, char *page) | ||
1516 | { | ||
1517 | int result; | ||
1518 | |||
1519 | mutex_lock(&opts->lock); | ||
1520 | result = sprintf(page, "%u", opts->int_maxpacket); | ||
1521 | mutex_unlock(&opts->lock); | ||
1522 | |||
1523 | return result; | ||
1524 | } | ||
1525 | |||
1526 | static ssize_t f_ss_opts_int_maxpacket_store(struct f_ss_opts *opts, | ||
1527 | const char *page, size_t len) | ||
1528 | { | ||
1529 | int ret; | ||
1530 | u16 num; | ||
1531 | |||
1532 | mutex_lock(&opts->lock); | ||
1533 | if (opts->refcnt) { | ||
1534 | ret = -EBUSY; | ||
1535 | goto end; | ||
1536 | } | ||
1537 | |||
1538 | ret = kstrtou16(page, 0, &num); | ||
1539 | if (ret) | ||
1540 | goto end; | ||
1541 | |||
1542 | if (num > 1024) { | ||
1543 | ret = -EINVAL; | ||
1544 | goto end; | ||
1545 | } | ||
1546 | |||
1547 | opts->int_maxpacket = num; | ||
1548 | ret = len; | ||
1549 | end: | ||
1550 | mutex_unlock(&opts->lock); | ||
1551 | return ret; | ||
1552 | } | ||
1553 | |||
1554 | static struct f_ss_opts_attribute f_ss_opts_int_maxpacket = | ||
1555 | __CONFIGFS_ATTR(int_maxpacket, S_IRUGO | S_IWUSR, | ||
1556 | f_ss_opts_int_maxpacket_show, | ||
1557 | f_ss_opts_int_maxpacket_store); | ||
1558 | |||
1559 | static ssize_t f_ss_opts_int_mult_show(struct f_ss_opts *opts, char *page) | ||
1560 | { | ||
1561 | int result; | ||
1562 | |||
1563 | mutex_lock(&opts->lock); | ||
1564 | result = sprintf(page, "%u", opts->int_mult); | ||
1565 | mutex_unlock(&opts->lock); | ||
1566 | |||
1567 | return result; | ||
1568 | } | ||
1569 | |||
1570 | static ssize_t f_ss_opts_int_mult_store(struct f_ss_opts *opts, | ||
1571 | const char *page, size_t len) | ||
1572 | { | ||
1573 | int ret; | ||
1574 | u8 num; | ||
1575 | |||
1576 | mutex_lock(&opts->lock); | ||
1577 | if (opts->refcnt) { | ||
1578 | ret = -EBUSY; | ||
1579 | goto end; | ||
1580 | } | ||
1581 | |||
1582 | ret = kstrtou8(page, 0, &num); | ||
1583 | if (ret) | ||
1584 | goto end; | ||
1585 | |||
1586 | if (num > 2) { | ||
1587 | ret = -EINVAL; | ||
1588 | goto end; | ||
1589 | } | ||
1590 | |||
1591 | opts->int_mult = num; | ||
1592 | ret = len; | ||
1593 | end: | ||
1594 | mutex_unlock(&opts->lock); | ||
1595 | return ret; | ||
1596 | } | ||
1597 | |||
1598 | static struct f_ss_opts_attribute f_ss_opts_int_mult = | ||
1599 | __CONFIGFS_ATTR(int_mult, S_IRUGO | S_IWUSR, | ||
1600 | f_ss_opts_int_mult_show, | ||
1601 | f_ss_opts_int_mult_store); | ||
1602 | |||
1603 | static ssize_t f_ss_opts_int_maxburst_show(struct f_ss_opts *opts, char *page) | ||
1604 | { | ||
1605 | int result; | ||
1606 | |||
1607 | mutex_lock(&opts->lock); | ||
1608 | result = sprintf(page, "%u", opts->int_maxburst); | ||
1609 | mutex_unlock(&opts->lock); | ||
1610 | |||
1611 | return result; | ||
1612 | } | ||
1613 | |||
1614 | static ssize_t f_ss_opts_int_maxburst_store(struct f_ss_opts *opts, | ||
1615 | const char *page, size_t len) | ||
1616 | { | ||
1617 | int ret; | ||
1618 | u8 num; | ||
1619 | |||
1620 | mutex_lock(&opts->lock); | ||
1621 | if (opts->refcnt) { | ||
1622 | ret = -EBUSY; | ||
1623 | goto end; | ||
1624 | } | ||
1625 | |||
1626 | ret = kstrtou8(page, 0, &num); | ||
1627 | if (ret) | ||
1628 | goto end; | ||
1629 | |||
1630 | if (num > 15) { | ||
1631 | ret = -EINVAL; | ||
1632 | goto end; | ||
1633 | } | ||
1634 | |||
1635 | opts->int_maxburst = num; | ||
1636 | ret = len; | ||
1637 | end: | ||
1638 | mutex_unlock(&opts->lock); | ||
1639 | return ret; | ||
1640 | } | ||
1641 | |||
1642 | static struct f_ss_opts_attribute f_ss_opts_int_maxburst = | ||
1643 | __CONFIGFS_ATTR(int_maxburst, S_IRUGO | S_IWUSR, | ||
1644 | f_ss_opts_int_maxburst_show, | ||
1645 | f_ss_opts_int_maxburst_store); | ||
1646 | |||
1647 | static struct configfs_attribute *ss_attrs[] = { | 1182 | static struct configfs_attribute *ss_attrs[] = { |
1648 | &f_ss_opts_pattern.attr, | 1183 | &f_ss_opts_pattern.attr, |
1649 | &f_ss_opts_isoc_interval.attr, | 1184 | &f_ss_opts_isoc_interval.attr, |
@@ -1651,10 +1186,6 @@ static struct configfs_attribute *ss_attrs[] = { | |||
1651 | &f_ss_opts_isoc_mult.attr, | 1186 | &f_ss_opts_isoc_mult.attr, |
1652 | &f_ss_opts_isoc_maxburst.attr, | 1187 | &f_ss_opts_isoc_maxburst.attr, |
1653 | &f_ss_opts_bulk_buflen.attr, | 1188 | &f_ss_opts_bulk_buflen.attr, |
1654 | &f_ss_opts_int_interval.attr, | ||
1655 | &f_ss_opts_int_maxpacket.attr, | ||
1656 | &f_ss_opts_int_mult.attr, | ||
1657 | &f_ss_opts_int_maxburst.attr, | ||
1658 | NULL, | 1189 | NULL, |
1659 | }; | 1190 | }; |
1660 | 1191 | ||
@@ -1684,8 +1215,6 @@ static struct usb_function_instance *source_sink_alloc_inst(void) | |||
1684 | ss_opts->isoc_interval = GZERO_ISOC_INTERVAL; | 1215 | ss_opts->isoc_interval = GZERO_ISOC_INTERVAL; |
1685 | ss_opts->isoc_maxpacket = GZERO_ISOC_MAXPACKET; | 1216 | ss_opts->isoc_maxpacket = GZERO_ISOC_MAXPACKET; |
1686 | ss_opts->bulk_buflen = GZERO_BULK_BUFLEN; | 1217 | ss_opts->bulk_buflen = GZERO_BULK_BUFLEN; |
1687 | ss_opts->int_interval = GZERO_INT_INTERVAL; | ||
1688 | ss_opts->int_maxpacket = GZERO_INT_MAXPACKET; | ||
1689 | 1218 | ||
1690 | config_group_init_type_name(&ss_opts->func_inst.group, "", | 1219 | config_group_init_type_name(&ss_opts->func_inst.group, "", |
1691 | &ss_func_type); | 1220 | &ss_func_type); |
diff --git a/drivers/usb/gadget/function/g_zero.h b/drivers/usb/gadget/function/g_zero.h index 2ce28b9d97cc..15f180904f8a 100644 --- a/drivers/usb/gadget/function/g_zero.h +++ b/drivers/usb/gadget/function/g_zero.h | |||
@@ -10,8 +10,6 @@ | |||
10 | #define GZERO_QLEN 32 | 10 | #define GZERO_QLEN 32 |
11 | #define GZERO_ISOC_INTERVAL 4 | 11 | #define GZERO_ISOC_INTERVAL 4 |
12 | #define GZERO_ISOC_MAXPACKET 1024 | 12 | #define GZERO_ISOC_MAXPACKET 1024 |
13 | #define GZERO_INT_INTERVAL 1 /* Default interrupt interval = 1 ms */ | ||
14 | #define GZERO_INT_MAXPACKET 1024 | ||
15 | 13 | ||
16 | struct usb_zero_options { | 14 | struct usb_zero_options { |
17 | unsigned pattern; | 15 | unsigned pattern; |
@@ -19,10 +17,6 @@ struct usb_zero_options { | |||
19 | unsigned isoc_maxpacket; | 17 | unsigned isoc_maxpacket; |
20 | unsigned isoc_mult; | 18 | unsigned isoc_mult; |
21 | unsigned isoc_maxburst; | 19 | unsigned isoc_maxburst; |
22 | unsigned int_interval; /* In ms */ | ||
23 | unsigned int_maxpacket; | ||
24 | unsigned int_mult; | ||
25 | unsigned int_maxburst; | ||
26 | unsigned bulk_buflen; | 20 | unsigned bulk_buflen; |
27 | unsigned qlen; | 21 | unsigned qlen; |
28 | }; | 22 | }; |
@@ -34,10 +28,6 @@ struct f_ss_opts { | |||
34 | unsigned isoc_maxpacket; | 28 | unsigned isoc_maxpacket; |
35 | unsigned isoc_mult; | 29 | unsigned isoc_mult; |
36 | unsigned isoc_maxburst; | 30 | unsigned isoc_maxburst; |
37 | unsigned int_interval; /* In ms */ | ||
38 | unsigned int_maxpacket; | ||
39 | unsigned int_mult; | ||
40 | unsigned int_maxburst; | ||
41 | unsigned bulk_buflen; | 31 | unsigned bulk_buflen; |
42 | 32 | ||
43 | /* | 33 | /* |
@@ -72,7 +62,6 @@ int lb_modinit(void); | |||
72 | void free_ep_req(struct usb_ep *ep, struct usb_request *req); | 62 | void free_ep_req(struct usb_ep *ep, struct usb_request *req); |
73 | void disable_endpoints(struct usb_composite_dev *cdev, | 63 | void disable_endpoints(struct usb_composite_dev *cdev, |
74 | struct usb_ep *in, struct usb_ep *out, | 64 | struct usb_ep *in, struct usb_ep *out, |
75 | struct usb_ep *iso_in, struct usb_ep *iso_out, | 65 | struct usb_ep *iso_in, struct usb_ep *iso_out); |
76 | struct usb_ep *int_in, struct usb_ep *int_out); | ||
77 | 66 | ||
78 | #endif /* __G_ZERO_H */ | 67 | #endif /* __G_ZERO_H */ |
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c index 3a494168661e..6e0a019aad54 100644 --- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c +++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c | |||
@@ -1740,10 +1740,9 @@ static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name) | |||
1740 | goto err_session; | 1740 | goto err_session; |
1741 | } | 1741 | } |
1742 | /* | 1742 | /* |
1743 | * Now register the TCM vHost virtual I_T Nexus as active with the | 1743 | * Now register the TCM vHost virtual I_T Nexus as active. |
1744 | * call to __transport_register_session() | ||
1745 | */ | 1744 | */ |
1746 | __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, | 1745 | transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, |
1747 | tv_nexus->tvn_se_sess, tv_nexus); | 1746 | tv_nexus->tvn_se_sess, tv_nexus); |
1748 | tpg->tpg_nexus = tv_nexus; | 1747 | tpg->tpg_nexus = tv_nexus; |
1749 | mutex_unlock(&tpg->tpg_mutex); | 1748 | mutex_unlock(&tpg->tpg_mutex); |
diff --git a/drivers/usb/gadget/legacy/zero.c b/drivers/usb/gadget/legacy/zero.c index ff97ac93ac03..5ee95152493c 100644 --- a/drivers/usb/gadget/legacy/zero.c +++ b/drivers/usb/gadget/legacy/zero.c | |||
@@ -68,8 +68,6 @@ static struct usb_zero_options gzero_options = { | |||
68 | .isoc_maxpacket = GZERO_ISOC_MAXPACKET, | 68 | .isoc_maxpacket = GZERO_ISOC_MAXPACKET, |
69 | .bulk_buflen = GZERO_BULK_BUFLEN, | 69 | .bulk_buflen = GZERO_BULK_BUFLEN, |
70 | .qlen = GZERO_QLEN, | 70 | .qlen = GZERO_QLEN, |
71 | .int_interval = GZERO_INT_INTERVAL, | ||
72 | .int_maxpacket = GZERO_INT_MAXPACKET, | ||
73 | }; | 71 | }; |
74 | 72 | ||
75 | /*-------------------------------------------------------------------------*/ | 73 | /*-------------------------------------------------------------------------*/ |
@@ -268,21 +266,6 @@ module_param_named(isoc_maxburst, gzero_options.isoc_maxburst, uint, | |||
268 | S_IRUGO|S_IWUSR); | 266 | S_IRUGO|S_IWUSR); |
269 | MODULE_PARM_DESC(isoc_maxburst, "0 - 15 (ss only)"); | 267 | MODULE_PARM_DESC(isoc_maxburst, "0 - 15 (ss only)"); |
270 | 268 | ||
271 | module_param_named(int_interval, gzero_options.int_interval, uint, | ||
272 | S_IRUGO|S_IWUSR); | ||
273 | MODULE_PARM_DESC(int_interval, "1 - 16"); | ||
274 | |||
275 | module_param_named(int_maxpacket, gzero_options.int_maxpacket, uint, | ||
276 | S_IRUGO|S_IWUSR); | ||
277 | MODULE_PARM_DESC(int_maxpacket, "0 - 1023 (fs), 0 - 1024 (hs/ss)"); | ||
278 | |||
279 | module_param_named(int_mult, gzero_options.int_mult, uint, S_IRUGO|S_IWUSR); | ||
280 | MODULE_PARM_DESC(int_mult, "0 - 2 (hs/ss only)"); | ||
281 | |||
282 | module_param_named(int_maxburst, gzero_options.int_maxburst, uint, | ||
283 | S_IRUGO|S_IWUSR); | ||
284 | MODULE_PARM_DESC(int_maxburst, "0 - 15 (ss only)"); | ||
285 | |||
286 | static struct usb_function *func_lb; | 269 | static struct usb_function *func_lb; |
287 | static struct usb_function_instance *func_inst_lb; | 270 | static struct usb_function_instance *func_inst_lb; |
288 | 271 | ||
@@ -318,10 +301,6 @@ static int __init zero_bind(struct usb_composite_dev *cdev) | |||
318 | ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket; | 301 | ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket; |
319 | ss_opts->isoc_mult = gzero_options.isoc_mult; | 302 | ss_opts->isoc_mult = gzero_options.isoc_mult; |
320 | ss_opts->isoc_maxburst = gzero_options.isoc_maxburst; | 303 | ss_opts->isoc_maxburst = gzero_options.isoc_maxburst; |
321 | ss_opts->int_interval = gzero_options.int_interval; | ||
322 | ss_opts->int_maxpacket = gzero_options.int_maxpacket; | ||
323 | ss_opts->int_mult = gzero_options.int_mult; | ||
324 | ss_opts->int_maxburst = gzero_options.int_maxburst; | ||
325 | ss_opts->bulk_buflen = gzero_options.bulk_buflen; | 304 | ss_opts->bulk_buflen = gzero_options.bulk_buflen; |
326 | 305 | ||
327 | func_ss = usb_get_function(func_inst_ss); | 306 | func_ss = usb_get_function(func_inst_ss); |
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c index 663f7908b15c..be0964a801e8 100644 --- a/drivers/usb/host/ehci-atmel.c +++ b/drivers/usb/host/ehci-atmel.c | |||
@@ -34,7 +34,6 @@ static const char hcd_name[] = "ehci-atmel"; | |||
34 | 34 | ||
35 | struct atmel_ehci_priv { | 35 | struct atmel_ehci_priv { |
36 | struct clk *iclk; | 36 | struct clk *iclk; |
37 | struct clk *fclk; | ||
38 | struct clk *uclk; | 37 | struct clk *uclk; |
39 | bool clocked; | 38 | bool clocked; |
40 | }; | 39 | }; |
@@ -51,12 +50,9 @@ static void atmel_start_clock(struct atmel_ehci_priv *atmel_ehci) | |||
51 | { | 50 | { |
52 | if (atmel_ehci->clocked) | 51 | if (atmel_ehci->clocked) |
53 | return; | 52 | return; |
54 | if (IS_ENABLED(CONFIG_COMMON_CLK)) { | 53 | |
55 | clk_set_rate(atmel_ehci->uclk, 48000000); | 54 | clk_prepare_enable(atmel_ehci->uclk); |
56 | clk_prepare_enable(atmel_ehci->uclk); | ||
57 | } | ||
58 | clk_prepare_enable(atmel_ehci->iclk); | 55 | clk_prepare_enable(atmel_ehci->iclk); |
59 | clk_prepare_enable(atmel_ehci->fclk); | ||
60 | atmel_ehci->clocked = true; | 56 | atmel_ehci->clocked = true; |
61 | } | 57 | } |
62 | 58 | ||
@@ -64,10 +60,9 @@ static void atmel_stop_clock(struct atmel_ehci_priv *atmel_ehci) | |||
64 | { | 60 | { |
65 | if (!atmel_ehci->clocked) | 61 | if (!atmel_ehci->clocked) |
66 | return; | 62 | return; |
67 | clk_disable_unprepare(atmel_ehci->fclk); | 63 | |
68 | clk_disable_unprepare(atmel_ehci->iclk); | 64 | clk_disable_unprepare(atmel_ehci->iclk); |
69 | if (IS_ENABLED(CONFIG_COMMON_CLK)) | 65 | clk_disable_unprepare(atmel_ehci->uclk); |
70 | clk_disable_unprepare(atmel_ehci->uclk); | ||
71 | atmel_ehci->clocked = false; | 66 | atmel_ehci->clocked = false; |
72 | } | 67 | } |
73 | 68 | ||
@@ -146,20 +141,13 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev) | |||
146 | retval = -ENOENT; | 141 | retval = -ENOENT; |
147 | goto fail_request_resource; | 142 | goto fail_request_resource; |
148 | } | 143 | } |
149 | atmel_ehci->fclk = devm_clk_get(&pdev->dev, "uhpck"); | 144 | |
150 | if (IS_ERR(atmel_ehci->fclk)) { | 145 | atmel_ehci->uclk = devm_clk_get(&pdev->dev, "usb_clk"); |
151 | dev_err(&pdev->dev, "Error getting function clock\n"); | 146 | if (IS_ERR(atmel_ehci->uclk)) { |
152 | retval = -ENOENT; | 147 | dev_err(&pdev->dev, "failed to get uclk\n"); |
148 | retval = PTR_ERR(atmel_ehci->uclk); | ||
153 | goto fail_request_resource; | 149 | goto fail_request_resource; |
154 | } | 150 | } |
155 | if (IS_ENABLED(CONFIG_COMMON_CLK)) { | ||
156 | atmel_ehci->uclk = devm_clk_get(&pdev->dev, "usb_clk"); | ||
157 | if (IS_ERR(atmel_ehci->uclk)) { | ||
158 | dev_err(&pdev->dev, "failed to get uclk\n"); | ||
159 | retval = PTR_ERR(atmel_ehci->uclk); | ||
160 | goto fail_request_resource; | ||
161 | } | ||
162 | } | ||
163 | 151 | ||
164 | ehci = hcd_to_ehci(hcd); | 152 | ehci = hcd_to_ehci(hcd); |
165 | /* registers start at offset 0x0 */ | 153 | /* registers start at offset 0x0 */ |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 5fb66db89e05..73485fa4372f 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -1729,7 +1729,7 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, | |||
1729 | if (!command) | 1729 | if (!command) |
1730 | return; | 1730 | return; |
1731 | 1731 | ||
1732 | ep->ep_state |= EP_HALTED | EP_RECENTLY_HALTED; | 1732 | ep->ep_state |= EP_HALTED; |
1733 | ep->stopped_stream = stream_id; | 1733 | ep->stopped_stream = stream_id; |
1734 | 1734 | ||
1735 | xhci_queue_reset_ep(xhci, command, slot_id, ep_index); | 1735 | xhci_queue_reset_ep(xhci, command, slot_id, ep_index); |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index b06d1a53652d..ec8ac1674854 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -1338,12 +1338,6 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
1338 | goto exit; | 1338 | goto exit; |
1339 | } | 1339 | } |
1340 | 1340 | ||
1341 | /* Reject urb if endpoint is in soft reset, queue must stay empty */ | ||
1342 | if (xhci->devs[slot_id]->eps[ep_index].ep_state & EP_CONFIG_PENDING) { | ||
1343 | xhci_warn(xhci, "Can't enqueue URB while ep is in soft reset\n"); | ||
1344 | ret = -EINVAL; | ||
1345 | } | ||
1346 | |||
1347 | if (usb_endpoint_xfer_isoc(&urb->ep->desc)) | 1341 | if (usb_endpoint_xfer_isoc(&urb->ep->desc)) |
1348 | size = urb->number_of_packets; | 1342 | size = urb->number_of_packets; |
1349 | else | 1343 | else |
@@ -2954,36 +2948,23 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, | |||
2954 | } | 2948 | } |
2955 | } | 2949 | } |
2956 | 2950 | ||
2957 | /* Called after clearing a halted device. USB core should have sent the control | 2951 | /* Called when clearing halted device. The core should have sent the control |
2958 | * message to clear the device halt condition. The host side of the halt should | 2952 | * message to clear the device halt condition. The host side of the halt should |
2959 | * already be cleared with a reset endpoint command issued immediately when the | 2953 | * already be cleared with a reset endpoint command issued when the STALL tx |
2960 | * STALL tx event was received. | 2954 | * event was received. |
2955 | * | ||
2956 | * Context: in_interrupt | ||
2961 | */ | 2957 | */ |
2962 | 2958 | ||
2963 | void xhci_endpoint_reset(struct usb_hcd *hcd, | 2959 | void xhci_endpoint_reset(struct usb_hcd *hcd, |
2964 | struct usb_host_endpoint *ep) | 2960 | struct usb_host_endpoint *ep) |
2965 | { | 2961 | { |
2966 | struct xhci_hcd *xhci; | 2962 | struct xhci_hcd *xhci; |
2967 | struct usb_device *udev; | ||
2968 | struct xhci_virt_device *virt_dev; | ||
2969 | struct xhci_virt_ep *virt_ep; | ||
2970 | struct xhci_input_control_ctx *ctrl_ctx; | ||
2971 | struct xhci_command *command; | ||
2972 | unsigned int ep_index, ep_state; | ||
2973 | unsigned long flags; | ||
2974 | u32 ep_flag; | ||
2975 | 2963 | ||
2976 | xhci = hcd_to_xhci(hcd); | 2964 | xhci = hcd_to_xhci(hcd); |
2977 | udev = (struct usb_device *) ep->hcpriv; | ||
2978 | if (!ep->hcpriv) | ||
2979 | return; | ||
2980 | virt_dev = xhci->devs[udev->slot_id]; | ||
2981 | ep_index = xhci_get_endpoint_index(&ep->desc); | ||
2982 | virt_ep = &virt_dev->eps[ep_index]; | ||
2983 | ep_state = virt_ep->ep_state; | ||
2984 | 2965 | ||
2985 | /* | 2966 | /* |
2986 | * Implement the config ep command in xhci 4.6.8 additional note: | 2967 | * We might need to implement the config ep cmd in xhci 4.8.1 note: |
2987 | * The Reset Endpoint Command may only be issued to endpoints in the | 2968 | * The Reset Endpoint Command may only be issued to endpoints in the |
2988 | * Halted state. If software wishes reset the Data Toggle or Sequence | 2969 | * Halted state. If software wishes reset the Data Toggle or Sequence |
2989 | * Number of an endpoint that isn't in the Halted state, then software | 2970 | * Number of an endpoint that isn't in the Halted state, then software |
@@ -2991,72 +2972,9 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, | |||
2991 | * for the target endpoint. that is in the Stopped state. | 2972 | * for the target endpoint. that is in the Stopped state. |
2992 | */ | 2973 | */ |
2993 | 2974 | ||
2994 | if (ep_state & SET_DEQ_PENDING || ep_state & EP_RECENTLY_HALTED) { | 2975 | /* For now just print debug to follow the situation */ |
2995 | virt_ep->ep_state &= ~EP_RECENTLY_HALTED; | 2976 | xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n", |
2996 | xhci_dbg(xhci, "ep recently halted, no toggle reset needed\n"); | 2977 | ep->desc.bEndpointAddress); |
2997 | return; | ||
2998 | } | ||
2999 | |||
3000 | /* Only interrupt and bulk ep's use Data toggle, USB2 spec 5.5.4-> */ | ||
3001 | if (usb_endpoint_xfer_control(&ep->desc) || | ||
3002 | usb_endpoint_xfer_isoc(&ep->desc)) | ||
3003 | return; | ||
3004 | |||
3005 | ep_flag = xhci_get_endpoint_flag(&ep->desc); | ||
3006 | |||
3007 | if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG) | ||
3008 | return; | ||
3009 | |||
3010 | command = xhci_alloc_command(xhci, true, true, GFP_NOWAIT); | ||
3011 | if (!command) { | ||
3012 | xhci_err(xhci, "Could not allocate xHCI command structure.\n"); | ||
3013 | return; | ||
3014 | } | ||
3015 | |||
3016 | spin_lock_irqsave(&xhci->lock, flags); | ||
3017 | |||
3018 | /* block ringing ep doorbell */ | ||
3019 | virt_ep->ep_state |= EP_CONFIG_PENDING; | ||
3020 | |||
3021 | /* | ||
3022 | * Make sure endpoint ring is empty before resetting the toggle/seq. | ||
3023 | * Driver is required to synchronously cancel all transfer request. | ||
3024 | * | ||
3025 | * xhci 4.6.6 says we can issue a configure endpoint command on a | ||
3026 | * running endpoint ring as long as it's idle (queue empty) | ||
3027 | */ | ||
3028 | |||
3029 | if (!list_empty(&virt_ep->ring->td_list)) { | ||
3030 | dev_err(&udev->dev, "EP not empty, refuse reset\n"); | ||
3031 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
3032 | goto cleanup; | ||
3033 | } | ||
3034 | |||
3035 | xhci_dbg(xhci, "Reset toggle/seq for slot %d, ep_index: %d\n", | ||
3036 | udev->slot_id, ep_index); | ||
3037 | |||
3038 | ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); | ||
3039 | if (!ctrl_ctx) { | ||
3040 | xhci_err(xhci, "Could not get input context, bad type. virt_dev: %p, in_ctx %p\n", | ||
3041 | virt_dev, virt_dev->in_ctx); | ||
3042 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
3043 | goto cleanup; | ||
3044 | } | ||
3045 | xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, | ||
3046 | virt_dev->out_ctx, ctrl_ctx, | ||
3047 | ep_flag, ep_flag); | ||
3048 | xhci_endpoint_copy(xhci, command->in_ctx, virt_dev->out_ctx, ep_index); | ||
3049 | |||
3050 | xhci_queue_configure_endpoint(xhci, command, command->in_ctx->dma, | ||
3051 | udev->slot_id, false); | ||
3052 | xhci_ring_cmd_db(xhci); | ||
3053 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
3054 | |||
3055 | wait_for_completion(command->completion); | ||
3056 | |||
3057 | cleanup: | ||
3058 | virt_ep->ep_state &= ~EP_CONFIG_PENDING; | ||
3059 | xhci_free_command(xhci, command); | ||
3060 | } | 2978 | } |
3061 | 2979 | ||
3062 | static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, | 2980 | static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 265ab1771d24..8e421b89632d 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -865,8 +865,6 @@ struct xhci_virt_ep { | |||
865 | #define EP_HAS_STREAMS (1 << 4) | 865 | #define EP_HAS_STREAMS (1 << 4) |
866 | /* Transitioning the endpoint to not using streams, don't enqueue URBs */ | 866 | /* Transitioning the endpoint to not using streams, don't enqueue URBs */ |
867 | #define EP_GETTING_NO_STREAMS (1 << 5) | 867 | #define EP_GETTING_NO_STREAMS (1 << 5) |
868 | #define EP_RECENTLY_HALTED (1 << 6) | ||
869 | #define EP_CONFIG_PENDING (1 << 7) | ||
870 | /* ---- Related to URB cancellation ---- */ | 868 | /* ---- Related to URB cancellation ---- */ |
871 | struct list_head cancelled_td_list; | 869 | struct list_head cancelled_td_list; |
872 | struct xhci_td *stopped_td; | 870 | struct xhci_td *stopped_td; |
diff --git a/drivers/usb/isp1760/isp1760-core.c b/drivers/usb/isp1760/isp1760-core.c index b9827556455f..bfa402cf3a27 100644 --- a/drivers/usb/isp1760/isp1760-core.c +++ b/drivers/usb/isp1760/isp1760-core.c | |||
@@ -151,8 +151,7 @@ int isp1760_register(struct resource *mem, int irq, unsigned long irqflags, | |||
151 | } | 151 | } |
152 | 152 | ||
153 | if (IS_ENABLED(CONFIG_USB_ISP1761_UDC) && !udc_disabled) { | 153 | if (IS_ENABLED(CONFIG_USB_ISP1761_UDC) && !udc_disabled) { |
154 | ret = isp1760_udc_register(isp, irq, irqflags | IRQF_SHARED | | 154 | ret = isp1760_udc_register(isp, irq, irqflags); |
155 | IRQF_DISABLED); | ||
156 | if (ret < 0) { | 155 | if (ret < 0) { |
157 | isp1760_hcd_unregister(&isp->hcd); | 156 | isp1760_hcd_unregister(&isp->hcd); |
158 | return ret; | 157 | return ret; |
diff --git a/drivers/usb/isp1760/isp1760-udc.c b/drivers/usb/isp1760/isp1760-udc.c index 9612d7990565..f32c292cc868 100644 --- a/drivers/usb/isp1760/isp1760-udc.c +++ b/drivers/usb/isp1760/isp1760-udc.c | |||
@@ -1191,6 +1191,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget, | |||
1191 | struct usb_gadget_driver *driver) | 1191 | struct usb_gadget_driver *driver) |
1192 | { | 1192 | { |
1193 | struct isp1760_udc *udc = gadget_to_udc(gadget); | 1193 | struct isp1760_udc *udc = gadget_to_udc(gadget); |
1194 | unsigned long flags; | ||
1194 | 1195 | ||
1195 | /* The hardware doesn't support low speed. */ | 1196 | /* The hardware doesn't support low speed. */ |
1196 | if (driver->max_speed < USB_SPEED_FULL) { | 1197 | if (driver->max_speed < USB_SPEED_FULL) { |
@@ -1198,7 +1199,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget, | |||
1198 | return -EINVAL; | 1199 | return -EINVAL; |
1199 | } | 1200 | } |
1200 | 1201 | ||
1201 | spin_lock(&udc->lock); | 1202 | spin_lock_irqsave(&udc->lock, flags); |
1202 | 1203 | ||
1203 | if (udc->driver) { | 1204 | if (udc->driver) { |
1204 | dev_err(udc->isp->dev, "UDC already has a gadget driver\n"); | 1205 | dev_err(udc->isp->dev, "UDC already has a gadget driver\n"); |
@@ -1208,7 +1209,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget, | |||
1208 | 1209 | ||
1209 | udc->driver = driver; | 1210 | udc->driver = driver; |
1210 | 1211 | ||
1211 | spin_unlock(&udc->lock); | 1212 | spin_unlock_irqrestore(&udc->lock, flags); |
1212 | 1213 | ||
1213 | dev_dbg(udc->isp->dev, "starting UDC with driver %s\n", | 1214 | dev_dbg(udc->isp->dev, "starting UDC with driver %s\n", |
1214 | driver->function); | 1215 | driver->function); |
@@ -1232,6 +1233,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget, | |||
1232 | static int isp1760_udc_stop(struct usb_gadget *gadget) | 1233 | static int isp1760_udc_stop(struct usb_gadget *gadget) |
1233 | { | 1234 | { |
1234 | struct isp1760_udc *udc = gadget_to_udc(gadget); | 1235 | struct isp1760_udc *udc = gadget_to_udc(gadget); |
1236 | unsigned long flags; | ||
1235 | 1237 | ||
1236 | dev_dbg(udc->isp->dev, "%s\n", __func__); | 1238 | dev_dbg(udc->isp->dev, "%s\n", __func__); |
1237 | 1239 | ||
@@ -1239,9 +1241,9 @@ static int isp1760_udc_stop(struct usb_gadget *gadget) | |||
1239 | 1241 | ||
1240 | isp1760_udc_write(udc, DC_MODE, 0); | 1242 | isp1760_udc_write(udc, DC_MODE, 0); |
1241 | 1243 | ||
1242 | spin_lock(&udc->lock); | 1244 | spin_lock_irqsave(&udc->lock, flags); |
1243 | udc->driver = NULL; | 1245 | udc->driver = NULL; |
1244 | spin_unlock(&udc->lock); | 1246 | spin_unlock_irqrestore(&udc->lock, flags); |
1245 | 1247 | ||
1246 | return 0; | 1248 | return 0; |
1247 | } | 1249 | } |
@@ -1411,7 +1413,7 @@ static int isp1760_udc_init(struct isp1760_udc *udc) | |||
1411 | return -ENODEV; | 1413 | return -ENODEV; |
1412 | } | 1414 | } |
1413 | 1415 | ||
1414 | if (chipid != 0x00011582) { | 1416 | if (chipid != 0x00011582 && chipid != 0x00158210) { |
1415 | dev_err(udc->isp->dev, "udc: invalid chip ID 0x%08x\n", chipid); | 1417 | dev_err(udc->isp->dev, "udc: invalid chip ID 0x%08x\n", chipid); |
1416 | return -ENODEV; | 1418 | return -ENODEV; |
1417 | } | 1419 | } |
@@ -1451,8 +1453,8 @@ int isp1760_udc_register(struct isp1760_device *isp, int irq, | |||
1451 | 1453 | ||
1452 | sprintf(udc->irqname, "%s (udc)", devname); | 1454 | sprintf(udc->irqname, "%s (udc)", devname); |
1453 | 1455 | ||
1454 | ret = request_irq(irq, isp1760_udc_irq, IRQF_SHARED | IRQF_DISABLED | | 1456 | ret = request_irq(irq, isp1760_udc_irq, IRQF_SHARED | irqflags, |
1455 | irqflags, udc->irqname, udc); | 1457 | udc->irqname, udc); |
1456 | if (ret < 0) | 1458 | if (ret < 0) |
1457 | goto error; | 1459 | goto error; |
1458 | 1460 | ||
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index 14e1628483d9..39db8b603627 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig | |||
@@ -79,7 +79,8 @@ config USB_MUSB_TUSB6010 | |||
79 | 79 | ||
80 | config USB_MUSB_OMAP2PLUS | 80 | config USB_MUSB_OMAP2PLUS |
81 | tristate "OMAP2430 and onwards" | 81 | tristate "OMAP2430 and onwards" |
82 | depends on ARCH_OMAP2PLUS && USB && OMAP_CONTROL_PHY | 82 | depends on ARCH_OMAP2PLUS && USB |
83 | depends on OMAP_CONTROL_PHY || !OMAP_CONTROL_PHY | ||
83 | select GENERIC_PHY | 84 | select GENERIC_PHY |
84 | 85 | ||
85 | config USB_MUSB_AM35X | 86 | config USB_MUSB_AM35X |
diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c index 403fab772724..7b3035ff9434 100644 --- a/drivers/usb/phy/phy-am335x-control.c +++ b/drivers/usb/phy/phy-am335x-control.c | |||
@@ -126,6 +126,9 @@ struct phy_control *am335x_get_phy_control(struct device *dev) | |||
126 | return NULL; | 126 | return NULL; |
127 | 127 | ||
128 | dev = bus_find_device(&platform_bus_type, NULL, node, match); | 128 | dev = bus_find_device(&platform_bus_type, NULL, node, match); |
129 | if (!dev) | ||
130 | return NULL; | ||
131 | |||
129 | ctrl_usb = dev_get_drvdata(dev); | 132 | ctrl_usb = dev_get_drvdata(dev); |
130 | if (!ctrl_usb) | 133 | if (!ctrl_usb) |
131 | return NULL; | 134 | return NULL; |
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 82570425fdfe..c85ea530085f 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h | |||
@@ -113,6 +113,13 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999, | |||
113 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 113 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
114 | US_FL_NO_ATA_1X), | 114 | US_FL_NO_ATA_1X), |
115 | 115 | ||
116 | /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */ | ||
117 | UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, | ||
118 | "Initio Corporation", | ||
119 | "", | ||
120 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
121 | US_FL_NO_ATA_1X), | ||
122 | |||
116 | /* Reported-by: Tom Arild Naess <tanaess@gmail.com> */ | 123 | /* Reported-by: Tom Arild Naess <tanaess@gmail.com> */ |
117 | UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999, | 124 | UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999, |
118 | "JMicron", | 125 | "JMicron", |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 8d4f3f1ff799..71df240a467a 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -1956,10 +1956,9 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, | |||
1956 | goto out; | 1956 | goto out; |
1957 | } | 1957 | } |
1958 | /* | 1958 | /* |
1959 | * Now register the TCM vhost virtual I_T Nexus as active with the | 1959 | * Now register the TCM vhost virtual I_T Nexus as active. |
1960 | * call to __transport_register_session() | ||
1961 | */ | 1960 | */ |
1962 | __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, | 1961 | transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, |
1963 | tv_nexus->tvn_se_sess, tv_nexus); | 1962 | tv_nexus->tvn_se_sess, tv_nexus); |
1964 | tpg->tpg_nexus = tv_nexus; | 1963 | tpg->tpg_nexus = tv_nexus; |
1965 | 1964 | ||
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index 9faca6a60bb0..42bd55a6c237 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c | |||
@@ -1659,11 +1659,8 @@ static int scsiback_make_nexus(struct scsiback_tpg *tpg, | |||
1659 | name); | 1659 | name); |
1660 | goto out; | 1660 | goto out; |
1661 | } | 1661 | } |
1662 | /* | 1662 | /* Now register the TCM pvscsi virtual I_T Nexus as active. */ |
1663 | * Now register the TCM pvscsi virtual I_T Nexus as active with the | 1663 | transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, |
1664 | * call to __transport_register_session() | ||
1665 | */ | ||
1666 | __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, | ||
1667 | tv_nexus->tvn_se_sess, tv_nexus); | 1664 | tv_nexus->tvn_se_sess, tv_nexus); |
1668 | tpg->tpg_nexus = tv_nexus; | 1665 | tpg->tpg_nexus = tv_nexus; |
1669 | 1666 | ||
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 84c3b00f3de8..f9c89cae39ee 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -3387,6 +3387,8 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, | |||
3387 | 3387 | ||
3388 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, | 3388 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, |
3389 | struct btrfs_root *root); | 3389 | struct btrfs_root *root); |
3390 | int btrfs_setup_space_cache(struct btrfs_trans_handle *trans, | ||
3391 | struct btrfs_root *root); | ||
3390 | int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr); | 3392 | int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr); |
3391 | int btrfs_free_block_groups(struct btrfs_fs_info *info); | 3393 | int btrfs_free_block_groups(struct btrfs_fs_info *info); |
3392 | int btrfs_read_block_groups(struct btrfs_root *root); | 3394 | int btrfs_read_block_groups(struct btrfs_root *root); |
@@ -3909,6 +3911,9 @@ int btrfs_prealloc_file_range_trans(struct inode *inode, | |||
3909 | loff_t actual_len, u64 *alloc_hint); | 3911 | loff_t actual_len, u64 *alloc_hint); |
3910 | int btrfs_inode_check_errors(struct inode *inode); | 3912 | int btrfs_inode_check_errors(struct inode *inode); |
3911 | extern const struct dentry_operations btrfs_dentry_operations; | 3913 | extern const struct dentry_operations btrfs_dentry_operations; |
3914 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | ||
3915 | void btrfs_test_inode_set_ops(struct inode *inode); | ||
3916 | #endif | ||
3912 | 3917 | ||
3913 | /* ioctl.c */ | 3918 | /* ioctl.c */ |
3914 | long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); | 3919 | long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index f79f38542a73..639f2663ed3f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -3921,7 +3921,7 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, | |||
3921 | } | 3921 | } |
3922 | if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) | 3922 | if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) |
3923 | + sizeof(struct btrfs_chunk)) { | 3923 | + sizeof(struct btrfs_chunk)) { |
3924 | printk(KERN_ERR "BTRFS: system chunk array too small %u < %lu\n", | 3924 | printk(KERN_ERR "BTRFS: system chunk array too small %u < %zu\n", |
3925 | btrfs_super_sys_array_size(sb), | 3925 | btrfs_super_sys_array_size(sb), |
3926 | sizeof(struct btrfs_disk_key) | 3926 | sizeof(struct btrfs_disk_key) |
3927 | + sizeof(struct btrfs_chunk)); | 3927 | + sizeof(struct btrfs_chunk)); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 6f080451fcb1..8b353ad02f03 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -3325,6 +3325,32 @@ out: | |||
3325 | return ret; | 3325 | return ret; |
3326 | } | 3326 | } |
3327 | 3327 | ||
3328 | int btrfs_setup_space_cache(struct btrfs_trans_handle *trans, | ||
3329 | struct btrfs_root *root) | ||
3330 | { | ||
3331 | struct btrfs_block_group_cache *cache, *tmp; | ||
3332 | struct btrfs_transaction *cur_trans = trans->transaction; | ||
3333 | struct btrfs_path *path; | ||
3334 | |||
3335 | if (list_empty(&cur_trans->dirty_bgs) || | ||
3336 | !btrfs_test_opt(root, SPACE_CACHE)) | ||
3337 | return 0; | ||
3338 | |||
3339 | path = btrfs_alloc_path(); | ||
3340 | if (!path) | ||
3341 | return -ENOMEM; | ||
3342 | |||
3343 | /* Could add new block groups, use _safe just in case */ | ||
3344 | list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, | ||
3345 | dirty_list) { | ||
3346 | if (cache->disk_cache_state == BTRFS_DC_CLEAR) | ||
3347 | cache_save_setup(cache, trans, path); | ||
3348 | } | ||
3349 | |||
3350 | btrfs_free_path(path); | ||
3351 | return 0; | ||
3352 | } | ||
3353 | |||
3328 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, | 3354 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, |
3329 | struct btrfs_root *root) | 3355 | struct btrfs_root *root) |
3330 | { | 3356 | { |
@@ -5110,7 +5136,11 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) | |||
5110 | num_bytes = ALIGN(num_bytes, root->sectorsize); | 5136 | num_bytes = ALIGN(num_bytes, root->sectorsize); |
5111 | 5137 | ||
5112 | spin_lock(&BTRFS_I(inode)->lock); | 5138 | spin_lock(&BTRFS_I(inode)->lock); |
5113 | BTRFS_I(inode)->outstanding_extents++; | 5139 | nr_extents = (unsigned)div64_u64(num_bytes + |
5140 | BTRFS_MAX_EXTENT_SIZE - 1, | ||
5141 | BTRFS_MAX_EXTENT_SIZE); | ||
5142 | BTRFS_I(inode)->outstanding_extents += nr_extents; | ||
5143 | nr_extents = 0; | ||
5114 | 5144 | ||
5115 | if (BTRFS_I(inode)->outstanding_extents > | 5145 | if (BTRFS_I(inode)->outstanding_extents > |
5116 | BTRFS_I(inode)->reserved_extents) | 5146 | BTRFS_I(inode)->reserved_extents) |
@@ -5255,6 +5285,9 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) | |||
5255 | if (dropped > 0) | 5285 | if (dropped > 0) |
5256 | to_free += btrfs_calc_trans_metadata_size(root, dropped); | 5286 | to_free += btrfs_calc_trans_metadata_size(root, dropped); |
5257 | 5287 | ||
5288 | if (btrfs_test_is_dummy_root(root)) | ||
5289 | return; | ||
5290 | |||
5258 | trace_btrfs_space_reservation(root->fs_info, "delalloc", | 5291 | trace_btrfs_space_reservation(root->fs_info, "delalloc", |
5259 | btrfs_ino(inode), to_free, 0); | 5292 | btrfs_ino(inode), to_free, 0); |
5260 | if (root->fs_info->quota_enabled) { | 5293 | if (root->fs_info->quota_enabled) { |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index c7233ff1d533..d688cfe5d496 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -4968,6 +4968,12 @@ static int release_extent_buffer(struct extent_buffer *eb) | |||
4968 | 4968 | ||
4969 | /* Should be safe to release our pages at this point */ | 4969 | /* Should be safe to release our pages at this point */ |
4970 | btrfs_release_extent_buffer_page(eb); | 4970 | btrfs_release_extent_buffer_page(eb); |
4971 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | ||
4972 | if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))) { | ||
4973 | __free_extent_buffer(eb); | ||
4974 | return 1; | ||
4975 | } | ||
4976 | #endif | ||
4971 | call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); | 4977 | call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); |
4972 | return 1; | 4978 | return 1; |
4973 | } | 4979 | } |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index da828cf5e8f8..d2e732d7af52 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -108,6 +108,13 @@ static struct extent_map *create_pinned_em(struct inode *inode, u64 start, | |||
108 | 108 | ||
109 | static int btrfs_dirty_inode(struct inode *inode); | 109 | static int btrfs_dirty_inode(struct inode *inode); |
110 | 110 | ||
111 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | ||
112 | void btrfs_test_inode_set_ops(struct inode *inode) | ||
113 | { | ||
114 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; | ||
115 | } | ||
116 | #endif | ||
117 | |||
111 | static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, | 118 | static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, |
112 | struct inode *inode, struct inode *dir, | 119 | struct inode *inode, struct inode *dir, |
113 | const struct qstr *qstr) | 120 | const struct qstr *qstr) |
@@ -1542,30 +1549,17 @@ static void btrfs_split_extent_hook(struct inode *inode, | |||
1542 | u64 new_size; | 1549 | u64 new_size; |
1543 | 1550 | ||
1544 | /* | 1551 | /* |
1545 | * We need the largest size of the remaining extent to see if we | 1552 | * See the explanation in btrfs_merge_extent_hook, the same |
1546 | * need to add a new outstanding extent. Think of the following | 1553 | * applies here, just in reverse. |
1547 | * case | ||
1548 | * | ||
1549 | * [MEAX_EXTENT_SIZEx2 - 4k][4k] | ||
1550 | * | ||
1551 | * The new_size would just be 4k and we'd think we had enough | ||
1552 | * outstanding extents for this if we only took one side of the | ||
1553 | * split, same goes for the other direction. We need to see if | ||
1554 | * the larger size still is the same amount of extents as the | ||
1555 | * original size, because if it is we need to add a new | ||
1556 | * outstanding extent. But if we split up and the larger size | ||
1557 | * is less than the original then we are good to go since we've | ||
1558 | * already accounted for the extra extent in our original | ||
1559 | * accounting. | ||
1560 | */ | 1554 | */ |
1561 | new_size = orig->end - split + 1; | 1555 | new_size = orig->end - split + 1; |
1562 | if ((split - orig->start) > new_size) | 1556 | num_extents = div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, |
1563 | new_size = split - orig->start; | ||
1564 | |||
1565 | num_extents = div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, | ||
1566 | BTRFS_MAX_EXTENT_SIZE); | 1557 | BTRFS_MAX_EXTENT_SIZE); |
1567 | if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, | 1558 | new_size = split - orig->start; |
1568 | BTRFS_MAX_EXTENT_SIZE) < num_extents) | 1559 | num_extents += div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, |
1560 | BTRFS_MAX_EXTENT_SIZE); | ||
1561 | if (div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, | ||
1562 | BTRFS_MAX_EXTENT_SIZE) >= num_extents) | ||
1569 | return; | 1563 | return; |
1570 | } | 1564 | } |
1571 | 1565 | ||
@@ -1591,8 +1585,10 @@ static void btrfs_merge_extent_hook(struct inode *inode, | |||
1591 | if (!(other->state & EXTENT_DELALLOC)) | 1585 | if (!(other->state & EXTENT_DELALLOC)) |
1592 | return; | 1586 | return; |
1593 | 1587 | ||
1594 | old_size = other->end - other->start + 1; | 1588 | if (new->start > other->start) |
1595 | new_size = old_size + (new->end - new->start + 1); | 1589 | new_size = new->end - other->start + 1; |
1590 | else | ||
1591 | new_size = other->end - new->start + 1; | ||
1596 | 1592 | ||
1597 | /* we're not bigger than the max, unreserve the space and go */ | 1593 | /* we're not bigger than the max, unreserve the space and go */ |
1598 | if (new_size <= BTRFS_MAX_EXTENT_SIZE) { | 1594 | if (new_size <= BTRFS_MAX_EXTENT_SIZE) { |
@@ -1603,13 +1599,32 @@ static void btrfs_merge_extent_hook(struct inode *inode, | |||
1603 | } | 1599 | } |
1604 | 1600 | ||
1605 | /* | 1601 | /* |
1606 | * If we grew by another max_extent, just return, we want to keep that | 1602 | * We have to add up either side to figure out how many extents were |
1607 | * reserved amount. | 1603 | * accounted for before we merged into one big extent. If the number of |
1604 | * extents we accounted for is <= the amount we need for the new range | ||
1605 | * then we can return, otherwise drop. Think of it like this | ||
1606 | * | ||
1607 | * [ 4k][MAX_SIZE] | ||
1608 | * | ||
1609 | * So we've grown the extent by a MAX_SIZE extent, this would mean we | ||
1610 | * need 2 outstanding extents, on one side we have 1 and the other side | ||
1611 | * we have 1 so they are == and we can return. But in this case | ||
1612 | * | ||
1613 | * [MAX_SIZE+4k][MAX_SIZE+4k] | ||
1614 | * | ||
1615 | * Each range on their own accounts for 2 extents, but merged together | ||
1616 | * they are only 3 extents worth of accounting, so we need to drop in | ||
1617 | * this case. | ||
1608 | */ | 1618 | */ |
1619 | old_size = other->end - other->start + 1; | ||
1609 | num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, | 1620 | num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, |
1610 | BTRFS_MAX_EXTENT_SIZE); | 1621 | BTRFS_MAX_EXTENT_SIZE); |
1622 | old_size = new->end - new->start + 1; | ||
1623 | num_extents += div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, | ||
1624 | BTRFS_MAX_EXTENT_SIZE); | ||
1625 | |||
1611 | if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, | 1626 | if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, |
1612 | BTRFS_MAX_EXTENT_SIZE) > num_extents) | 1627 | BTRFS_MAX_EXTENT_SIZE) >= num_extents) |
1613 | return; | 1628 | return; |
1614 | 1629 | ||
1615 | spin_lock(&BTRFS_I(inode)->lock); | 1630 | spin_lock(&BTRFS_I(inode)->lock); |
@@ -1686,6 +1701,10 @@ static void btrfs_set_bit_hook(struct inode *inode, | |||
1686 | spin_unlock(&BTRFS_I(inode)->lock); | 1701 | spin_unlock(&BTRFS_I(inode)->lock); |
1687 | } | 1702 | } |
1688 | 1703 | ||
1704 | /* For sanity tests */ | ||
1705 | if (btrfs_test_is_dummy_root(root)) | ||
1706 | return; | ||
1707 | |||
1689 | __percpu_counter_add(&root->fs_info->delalloc_bytes, len, | 1708 | __percpu_counter_add(&root->fs_info->delalloc_bytes, len, |
1690 | root->fs_info->delalloc_batch); | 1709 | root->fs_info->delalloc_batch); |
1691 | spin_lock(&BTRFS_I(inode)->lock); | 1710 | spin_lock(&BTRFS_I(inode)->lock); |
@@ -1741,6 +1760,10 @@ static void btrfs_clear_bit_hook(struct inode *inode, | |||
1741 | root != root->fs_info->tree_root) | 1760 | root != root->fs_info->tree_root) |
1742 | btrfs_delalloc_release_metadata(inode, len); | 1761 | btrfs_delalloc_release_metadata(inode, len); |
1743 | 1762 | ||
1763 | /* For sanity tests. */ | ||
1764 | if (btrfs_test_is_dummy_root(root)) | ||
1765 | return; | ||
1766 | |||
1744 | if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID | 1767 | if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID |
1745 | && do_list && !(state->state & EXTENT_NORESERVE)) | 1768 | && do_list && !(state->state & EXTENT_NORESERVE)) |
1746 | btrfs_free_reserved_data_space(inode, len); | 1769 | btrfs_free_reserved_data_space(inode, len); |
@@ -7213,7 +7236,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, | |||
7213 | u64 start = iblock << inode->i_blkbits; | 7236 | u64 start = iblock << inode->i_blkbits; |
7214 | u64 lockstart, lockend; | 7237 | u64 lockstart, lockend; |
7215 | u64 len = bh_result->b_size; | 7238 | u64 len = bh_result->b_size; |
7216 | u64 orig_len = len; | 7239 | u64 *outstanding_extents = NULL; |
7217 | int unlock_bits = EXTENT_LOCKED; | 7240 | int unlock_bits = EXTENT_LOCKED; |
7218 | int ret = 0; | 7241 | int ret = 0; |
7219 | 7242 | ||
@@ -7225,6 +7248,16 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, | |||
7225 | lockstart = start; | 7248 | lockstart = start; |
7226 | lockend = start + len - 1; | 7249 | lockend = start + len - 1; |
7227 | 7250 | ||
7251 | if (current->journal_info) { | ||
7252 | /* | ||
7253 | * Need to pull our outstanding extents and set journal_info to NULL so | ||
7254 | * that anything that needs to check if there's a transction doesn't get | ||
7255 | * confused. | ||
7256 | */ | ||
7257 | outstanding_extents = current->journal_info; | ||
7258 | current->journal_info = NULL; | ||
7259 | } | ||
7260 | |||
7228 | /* | 7261 | /* |
7229 | * If this errors out it's because we couldn't invalidate pagecache for | 7262 | * If this errors out it's because we couldn't invalidate pagecache for |
7230 | * this range and we need to fallback to buffered. | 7263 | * this range and we need to fallback to buffered. |
@@ -7348,11 +7381,20 @@ unlock: | |||
7348 | if (start + len > i_size_read(inode)) | 7381 | if (start + len > i_size_read(inode)) |
7349 | i_size_write(inode, start + len); | 7382 | i_size_write(inode, start + len); |
7350 | 7383 | ||
7351 | if (len < orig_len) { | 7384 | /* |
7385 | * If we have an outstanding_extents count still set then we're | ||
7386 | * within our reservation, otherwise we need to adjust our inode | ||
7387 | * counter appropriately. | ||
7388 | */ | ||
7389 | if (*outstanding_extents) { | ||
7390 | (*outstanding_extents)--; | ||
7391 | } else { | ||
7352 | spin_lock(&BTRFS_I(inode)->lock); | 7392 | spin_lock(&BTRFS_I(inode)->lock); |
7353 | BTRFS_I(inode)->outstanding_extents++; | 7393 | BTRFS_I(inode)->outstanding_extents++; |
7354 | spin_unlock(&BTRFS_I(inode)->lock); | 7394 | spin_unlock(&BTRFS_I(inode)->lock); |
7355 | } | 7395 | } |
7396 | |||
7397 | current->journal_info = outstanding_extents; | ||
7356 | btrfs_free_reserved_data_space(inode, len); | 7398 | btrfs_free_reserved_data_space(inode, len); |
7357 | } | 7399 | } |
7358 | 7400 | ||
@@ -7376,6 +7418,8 @@ unlock: | |||
7376 | unlock_err: | 7418 | unlock_err: |
7377 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, | 7419 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, |
7378 | unlock_bits, 1, 0, &cached_state, GFP_NOFS); | 7420 | unlock_bits, 1, 0, &cached_state, GFP_NOFS); |
7421 | if (outstanding_extents) | ||
7422 | current->journal_info = outstanding_extents; | ||
7379 | return ret; | 7423 | return ret; |
7380 | } | 7424 | } |
7381 | 7425 | ||
@@ -8075,6 +8119,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, | |||
8075 | { | 8119 | { |
8076 | struct file *file = iocb->ki_filp; | 8120 | struct file *file = iocb->ki_filp; |
8077 | struct inode *inode = file->f_mapping->host; | 8121 | struct inode *inode = file->f_mapping->host; |
8122 | u64 outstanding_extents = 0; | ||
8078 | size_t count = 0; | 8123 | size_t count = 0; |
8079 | int flags = 0; | 8124 | int flags = 0; |
8080 | bool wakeup = true; | 8125 | bool wakeup = true; |
@@ -8112,6 +8157,16 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, | |||
8112 | ret = btrfs_delalloc_reserve_space(inode, count); | 8157 | ret = btrfs_delalloc_reserve_space(inode, count); |
8113 | if (ret) | 8158 | if (ret) |
8114 | goto out; | 8159 | goto out; |
8160 | outstanding_extents = div64_u64(count + | ||
8161 | BTRFS_MAX_EXTENT_SIZE - 1, | ||
8162 | BTRFS_MAX_EXTENT_SIZE); | ||
8163 | |||
8164 | /* | ||
8165 | * We need to know how many extents we reserved so that we can | ||
8166 | * do the accounting properly if we go over the number we | ||
8167 | * originally calculated. Abuse current->journal_info for this. | ||
8168 | */ | ||
8169 | current->journal_info = &outstanding_extents; | ||
8115 | } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, | 8170 | } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, |
8116 | &BTRFS_I(inode)->runtime_flags)) { | 8171 | &BTRFS_I(inode)->runtime_flags)) { |
8117 | inode_dio_done(inode); | 8172 | inode_dio_done(inode); |
@@ -8124,6 +8179,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, | |||
8124 | iter, offset, btrfs_get_blocks_direct, NULL, | 8179 | iter, offset, btrfs_get_blocks_direct, NULL, |
8125 | btrfs_submit_direct, flags); | 8180 | btrfs_submit_direct, flags); |
8126 | if (rw & WRITE) { | 8181 | if (rw & WRITE) { |
8182 | current->journal_info = NULL; | ||
8127 | if (ret < 0 && ret != -EIOCBQUEUED) | 8183 | if (ret < 0 && ret != -EIOCBQUEUED) |
8128 | btrfs_delalloc_release_space(inode, count); | 8184 | btrfs_delalloc_release_space(inode, count); |
8129 | else if (ret >= 0 && (size_t)ret < count) | 8185 | else if (ret >= 0 && (size_t)ret < count) |
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 97159a8e91d4..058c79eecbfb 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c | |||
@@ -1259,7 +1259,7 @@ static int comp_oper(struct btrfs_qgroup_operation *oper1, | |||
1259 | if (oper1->seq < oper2->seq) | 1259 | if (oper1->seq < oper2->seq) |
1260 | return -1; | 1260 | return -1; |
1261 | if (oper1->seq > oper2->seq) | 1261 | if (oper1->seq > oper2->seq) |
1262 | return -1; | 1262 | return 1; |
1263 | if (oper1->ref_root < oper2->ref_root) | 1263 | if (oper1->ref_root < oper2->ref_root) |
1264 | return -1; | 1264 | return -1; |
1265 | if (oper1->ref_root > oper2->ref_root) | 1265 | if (oper1->ref_root > oper2->ref_root) |
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c index a116b55ce788..054fc0d97131 100644 --- a/fs/btrfs/tests/inode-tests.c +++ b/fs/btrfs/tests/inode-tests.c | |||
@@ -911,6 +911,197 @@ out: | |||
911 | return ret; | 911 | return ret; |
912 | } | 912 | } |
913 | 913 | ||
914 | static int test_extent_accounting(void) | ||
915 | { | ||
916 | struct inode *inode = NULL; | ||
917 | struct btrfs_root *root = NULL; | ||
918 | int ret = -ENOMEM; | ||
919 | |||
920 | inode = btrfs_new_test_inode(); | ||
921 | if (!inode) { | ||
922 | test_msg("Couldn't allocate inode\n"); | ||
923 | return ret; | ||
924 | } | ||
925 | |||
926 | root = btrfs_alloc_dummy_root(); | ||
927 | if (IS_ERR(root)) { | ||
928 | test_msg("Couldn't allocate root\n"); | ||
929 | goto out; | ||
930 | } | ||
931 | |||
932 | root->fs_info = btrfs_alloc_dummy_fs_info(); | ||
933 | if (!root->fs_info) { | ||
934 | test_msg("Couldn't allocate dummy fs info\n"); | ||
935 | goto out; | ||
936 | } | ||
937 | |||
938 | BTRFS_I(inode)->root = root; | ||
939 | btrfs_test_inode_set_ops(inode); | ||
940 | |||
941 | /* [BTRFS_MAX_EXTENT_SIZE] */ | ||
942 | BTRFS_I(inode)->outstanding_extents++; | ||
943 | ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1, | ||
944 | NULL); | ||
945 | if (ret) { | ||
946 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | ||
947 | goto out; | ||
948 | } | ||
949 | if (BTRFS_I(inode)->outstanding_extents != 1) { | ||
950 | ret = -EINVAL; | ||
951 | test_msg("Miscount, wanted 1, got %u\n", | ||
952 | BTRFS_I(inode)->outstanding_extents); | ||
953 | goto out; | ||
954 | } | ||
955 | |||
956 | /* [BTRFS_MAX_EXTENT_SIZE][4k] */ | ||
957 | BTRFS_I(inode)->outstanding_extents++; | ||
958 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE, | ||
959 | BTRFS_MAX_EXTENT_SIZE + 4095, NULL); | ||
960 | if (ret) { | ||
961 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | ||
962 | goto out; | ||
963 | } | ||
964 | if (BTRFS_I(inode)->outstanding_extents != 2) { | ||
965 | ret = -EINVAL; | ||
966 | test_msg("Miscount, wanted 2, got %u\n", | ||
967 | BTRFS_I(inode)->outstanding_extents); | ||
968 | goto out; | ||
969 | } | ||
970 | |||
971 | /* [BTRFS_MAX_EXTENT_SIZE/2][4K HOLE][the rest] */ | ||
972 | ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, | ||
973 | BTRFS_MAX_EXTENT_SIZE >> 1, | ||
974 | (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095, | ||
975 | EXTENT_DELALLOC | EXTENT_DIRTY | | ||
976 | EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0, | ||
977 | NULL, GFP_NOFS); | ||
978 | if (ret) { | ||
979 | test_msg("clear_extent_bit returned %d\n", ret); | ||
980 | goto out; | ||
981 | } | ||
982 | if (BTRFS_I(inode)->outstanding_extents != 2) { | ||
983 | ret = -EINVAL; | ||
984 | test_msg("Miscount, wanted 2, got %u\n", | ||
985 | BTRFS_I(inode)->outstanding_extents); | ||
986 | goto out; | ||
987 | } | ||
988 | |||
989 | /* [BTRFS_MAX_EXTENT_SIZE][4K] */ | ||
990 | BTRFS_I(inode)->outstanding_extents++; | ||
991 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1, | ||
992 | (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095, | ||
993 | NULL); | ||
994 | if (ret) { | ||
995 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | ||
996 | goto out; | ||
997 | } | ||
998 | if (BTRFS_I(inode)->outstanding_extents != 2) { | ||
999 | ret = -EINVAL; | ||
1000 | test_msg("Miscount, wanted 2, got %u\n", | ||
1001 | BTRFS_I(inode)->outstanding_extents); | ||
1002 | goto out; | ||
1003 | } | ||
1004 | |||
1005 | /* | ||
1006 | * [BTRFS_MAX_EXTENT_SIZE+4K][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4K] | ||
1007 | * | ||
1008 | * I'm artificially adding 2 to outstanding_extents because in the | ||
1009 | * buffered IO case we'd add things up as we go, but I don't feel like | ||
1010 | * doing that here, this isn't the interesting case we want to test. | ||
1011 | */ | ||
1012 | BTRFS_I(inode)->outstanding_extents += 2; | ||
1013 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE + 8192, | ||
1014 | (BTRFS_MAX_EXTENT_SIZE << 1) + 12287, | ||
1015 | NULL); | ||
1016 | if (ret) { | ||
1017 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | ||
1018 | goto out; | ||
1019 | } | ||
1020 | if (BTRFS_I(inode)->outstanding_extents != 4) { | ||
1021 | ret = -EINVAL; | ||
1022 | test_msg("Miscount, wanted 4, got %u\n", | ||
1023 | BTRFS_I(inode)->outstanding_extents); | ||
1024 | goto out; | ||
1025 | } | ||
1026 | |||
1027 | /* [BTRFS_MAX_EXTENT_SIZE+4k][4k][BTRFS_MAX_EXTENT_SIZE+4k] */ | ||
1028 | BTRFS_I(inode)->outstanding_extents++; | ||
1029 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096, | ||
1030 | BTRFS_MAX_EXTENT_SIZE+8191, NULL); | ||
1031 | if (ret) { | ||
1032 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | ||
1033 | goto out; | ||
1034 | } | ||
1035 | if (BTRFS_I(inode)->outstanding_extents != 3) { | ||
1036 | ret = -EINVAL; | ||
1037 | test_msg("Miscount, wanted 3, got %u\n", | ||
1038 | BTRFS_I(inode)->outstanding_extents); | ||
1039 | goto out; | ||
1040 | } | ||
1041 | |||
1042 | /* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */ | ||
1043 | ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, | ||
1044 | BTRFS_MAX_EXTENT_SIZE+4096, | ||
1045 | BTRFS_MAX_EXTENT_SIZE+8191, | ||
1046 | EXTENT_DIRTY | EXTENT_DELALLOC | | ||
1047 | EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, | ||
1048 | NULL, GFP_NOFS); | ||
1049 | if (ret) { | ||
1050 | test_msg("clear_extent_bit returned %d\n", ret); | ||
1051 | goto out; | ||
1052 | } | ||
1053 | if (BTRFS_I(inode)->outstanding_extents != 4) { | ||
1054 | ret = -EINVAL; | ||
1055 | test_msg("Miscount, wanted 4, got %u\n", | ||
1056 | BTRFS_I(inode)->outstanding_extents); | ||
1057 | goto out; | ||
1058 | } | ||
1059 | |||
1060 | /* | ||
1061 | * Refill the hole again just for good measure, because I thought it | ||
1062 | * might fail and I'd rather satisfy my paranoia at this point. | ||
1063 | */ | ||
1064 | BTRFS_I(inode)->outstanding_extents++; | ||
1065 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096, | ||
1066 | BTRFS_MAX_EXTENT_SIZE+8191, NULL); | ||
1067 | if (ret) { | ||
1068 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | ||
1069 | goto out; | ||
1070 | } | ||
1071 | if (BTRFS_I(inode)->outstanding_extents != 3) { | ||
1072 | ret = -EINVAL; | ||
1073 | test_msg("Miscount, wanted 3, got %u\n", | ||
1074 | BTRFS_I(inode)->outstanding_extents); | ||
1075 | goto out; | ||
1076 | } | ||
1077 | |||
1078 | /* Empty */ | ||
1079 | ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1, | ||
1080 | EXTENT_DIRTY | EXTENT_DELALLOC | | ||
1081 | EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, | ||
1082 | NULL, GFP_NOFS); | ||
1083 | if (ret) { | ||
1084 | test_msg("clear_extent_bit returned %d\n", ret); | ||
1085 | goto out; | ||
1086 | } | ||
1087 | if (BTRFS_I(inode)->outstanding_extents) { | ||
1088 | ret = -EINVAL; | ||
1089 | test_msg("Miscount, wanted 0, got %u\n", | ||
1090 | BTRFS_I(inode)->outstanding_extents); | ||
1091 | goto out; | ||
1092 | } | ||
1093 | ret = 0; | ||
1094 | out: | ||
1095 | if (ret) | ||
1096 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1, | ||
1097 | EXTENT_DIRTY | EXTENT_DELALLOC | | ||
1098 | EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, | ||
1099 | NULL, GFP_NOFS); | ||
1100 | iput(inode); | ||
1101 | btrfs_free_dummy_root(root); | ||
1102 | return ret; | ||
1103 | } | ||
1104 | |||
914 | int btrfs_test_inodes(void) | 1105 | int btrfs_test_inodes(void) |
915 | { | 1106 | { |
916 | int ret; | 1107 | int ret; |
@@ -924,5 +1115,9 @@ int btrfs_test_inodes(void) | |||
924 | if (ret) | 1115 | if (ret) |
925 | return ret; | 1116 | return ret; |
926 | test_msg("Running hole first btrfs_get_extent test\n"); | 1117 | test_msg("Running hole first btrfs_get_extent test\n"); |
927 | return test_hole_first(); | 1118 | ret = test_hole_first(); |
1119 | if (ret) | ||
1120 | return ret; | ||
1121 | test_msg("Running outstanding_extents tests\n"); | ||
1122 | return test_extent_accounting(); | ||
928 | } | 1123 | } |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 88e51aded6bd..8be4278e25e8 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -1023,17 +1023,13 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, | |||
1023 | u64 old_root_bytenr; | 1023 | u64 old_root_bytenr; |
1024 | u64 old_root_used; | 1024 | u64 old_root_used; |
1025 | struct btrfs_root *tree_root = root->fs_info->tree_root; | 1025 | struct btrfs_root *tree_root = root->fs_info->tree_root; |
1026 | bool extent_root = (root->objectid == BTRFS_EXTENT_TREE_OBJECTID); | ||
1027 | 1026 | ||
1028 | old_root_used = btrfs_root_used(&root->root_item); | 1027 | old_root_used = btrfs_root_used(&root->root_item); |
1029 | btrfs_write_dirty_block_groups(trans, root); | ||
1030 | 1028 | ||
1031 | while (1) { | 1029 | while (1) { |
1032 | old_root_bytenr = btrfs_root_bytenr(&root->root_item); | 1030 | old_root_bytenr = btrfs_root_bytenr(&root->root_item); |
1033 | if (old_root_bytenr == root->node->start && | 1031 | if (old_root_bytenr == root->node->start && |
1034 | old_root_used == btrfs_root_used(&root->root_item) && | 1032 | old_root_used == btrfs_root_used(&root->root_item)) |
1035 | (!extent_root || | ||
1036 | list_empty(&trans->transaction->dirty_bgs))) | ||
1037 | break; | 1033 | break; |
1038 | 1034 | ||
1039 | btrfs_set_root_node(&root->root_item, root->node); | 1035 | btrfs_set_root_node(&root->root_item, root->node); |
@@ -1044,14 +1040,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, | |||
1044 | return ret; | 1040 | return ret; |
1045 | 1041 | ||
1046 | old_root_used = btrfs_root_used(&root->root_item); | 1042 | old_root_used = btrfs_root_used(&root->root_item); |
1047 | if (extent_root) { | ||
1048 | ret = btrfs_write_dirty_block_groups(trans, root); | ||
1049 | if (ret) | ||
1050 | return ret; | ||
1051 | } | ||
1052 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | ||
1053 | if (ret) | ||
1054 | return ret; | ||
1055 | } | 1043 | } |
1056 | 1044 | ||
1057 | return 0; | 1045 | return 0; |
@@ -1068,6 +1056,7 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, | |||
1068 | struct btrfs_root *root) | 1056 | struct btrfs_root *root) |
1069 | { | 1057 | { |
1070 | struct btrfs_fs_info *fs_info = root->fs_info; | 1058 | struct btrfs_fs_info *fs_info = root->fs_info; |
1059 | struct list_head *dirty_bgs = &trans->transaction->dirty_bgs; | ||
1071 | struct list_head *next; | 1060 | struct list_head *next; |
1072 | struct extent_buffer *eb; | 1061 | struct extent_buffer *eb; |
1073 | int ret; | 1062 | int ret; |
@@ -1095,11 +1084,15 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, | |||
1095 | if (ret) | 1084 | if (ret) |
1096 | return ret; | 1085 | return ret; |
1097 | 1086 | ||
1087 | ret = btrfs_setup_space_cache(trans, root); | ||
1088 | if (ret) | ||
1089 | return ret; | ||
1090 | |||
1098 | /* run_qgroups might have added some more refs */ | 1091 | /* run_qgroups might have added some more refs */ |
1099 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | 1092 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); |
1100 | if (ret) | 1093 | if (ret) |
1101 | return ret; | 1094 | return ret; |
1102 | 1095 | again: | |
1103 | while (!list_empty(&fs_info->dirty_cowonly_roots)) { | 1096 | while (!list_empty(&fs_info->dirty_cowonly_roots)) { |
1104 | next = fs_info->dirty_cowonly_roots.next; | 1097 | next = fs_info->dirty_cowonly_roots.next; |
1105 | list_del_init(next); | 1098 | list_del_init(next); |
@@ -1112,8 +1105,23 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, | |||
1112 | ret = update_cowonly_root(trans, root); | 1105 | ret = update_cowonly_root(trans, root); |
1113 | if (ret) | 1106 | if (ret) |
1114 | return ret; | 1107 | return ret; |
1108 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | ||
1109 | if (ret) | ||
1110 | return ret; | ||
1115 | } | 1111 | } |
1116 | 1112 | ||
1113 | while (!list_empty(dirty_bgs)) { | ||
1114 | ret = btrfs_write_dirty_block_groups(trans, root); | ||
1115 | if (ret) | ||
1116 | return ret; | ||
1117 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | ||
1118 | if (ret) | ||
1119 | return ret; | ||
1120 | } | ||
1121 | |||
1122 | if (!list_empty(&fs_info->dirty_cowonly_roots)) | ||
1123 | goto again; | ||
1124 | |||
1117 | list_add_tail(&fs_info->extent_root->dirty_list, | 1125 | list_add_tail(&fs_info->extent_root->dirty_list, |
1118 | &trans->transaction->switch_commits); | 1126 | &trans->transaction->switch_commits); |
1119 | btrfs_after_dev_replace_commit(fs_info); | 1127 | btrfs_after_dev_replace_commit(fs_info); |
@@ -1811,6 +1819,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
1811 | 1819 | ||
1812 | wait_for_commit(root, cur_trans); | 1820 | wait_for_commit(root, cur_trans); |
1813 | 1821 | ||
1822 | if (unlikely(cur_trans->aborted)) | ||
1823 | ret = cur_trans->aborted; | ||
1824 | |||
1814 | btrfs_put_transaction(cur_trans); | 1825 | btrfs_put_transaction(cur_trans); |
1815 | 1826 | ||
1816 | return ret; | 1827 | return ret; |
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index b684e8a132e6..2bacb9988566 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c | |||
@@ -207,6 +207,7 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of, | |||
207 | goto out_free; | 207 | goto out_free; |
208 | } | 208 | } |
209 | 209 | ||
210 | of->event = atomic_read(&of->kn->attr.open->event); | ||
210 | ops = kernfs_ops(of->kn); | 211 | ops = kernfs_ops(of->kn); |
211 | if (ops->read) | 212 | if (ops->read) |
212 | len = ops->read(of, buf, len, *ppos); | 213 | len = ops->read(of, buf, len, *ppos); |
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index 3c1bfa155571..1028a0629543 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c | |||
@@ -587,8 +587,6 @@ nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls) | |||
587 | 587 | ||
588 | rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str)); | 588 | rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str)); |
589 | 589 | ||
590 | nfsd4_cb_layout_fail(ls); | ||
591 | |||
592 | printk(KERN_WARNING | 590 | printk(KERN_WARNING |
593 | "nfsd: client %s failed to respond to layout recall. " | 591 | "nfsd: client %s failed to respond to layout recall. " |
594 | " Fencing..\n", addr_str); | 592 | " Fencing..\n", addr_str); |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 2646aed1d3fe..fd23978d93fe 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -375,6 +375,7 @@ int dm_create(int minor, struct mapped_device **md); | |||
375 | */ | 375 | */ |
376 | struct mapped_device *dm_get_md(dev_t dev); | 376 | struct mapped_device *dm_get_md(dev_t dev); |
377 | void dm_get(struct mapped_device *md); | 377 | void dm_get(struct mapped_device *md); |
378 | int dm_hold(struct mapped_device *md); | ||
378 | void dm_put(struct mapped_device *md); | 379 | void dm_put(struct mapped_device *md); |
379 | 380 | ||
380 | /* | 381 | /* |
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h index 534e1f2ac4fc..57639fca223a 100644 --- a/include/net/netfilter/nf_log.h +++ b/include/net/netfilter/nf_log.h | |||
@@ -79,6 +79,16 @@ void nf_log_packet(struct net *net, | |||
79 | const struct nf_loginfo *li, | 79 | const struct nf_loginfo *li, |
80 | const char *fmt, ...); | 80 | const char *fmt, ...); |
81 | 81 | ||
82 | __printf(8, 9) | ||
83 | void nf_log_trace(struct net *net, | ||
84 | u_int8_t pf, | ||
85 | unsigned int hooknum, | ||
86 | const struct sk_buff *skb, | ||
87 | const struct net_device *in, | ||
88 | const struct net_device *out, | ||
89 | const struct nf_loginfo *li, | ||
90 | const char *fmt, ...); | ||
91 | |||
82 | struct nf_log_buf; | 92 | struct nf_log_buf; |
83 | 93 | ||
84 | struct nf_log_buf *nf_log_buf_open(void); | 94 | struct nf_log_buf *nf_log_buf_open(void); |
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index db81c65b8f48..d61be7297b2c 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h | |||
@@ -111,6 +111,7 @@ void array_free(void *array, int n); | |||
111 | void target_core_setup_sub_cits(struct se_subsystem_api *); | 111 | void target_core_setup_sub_cits(struct se_subsystem_api *); |
112 | 112 | ||
113 | /* attribute helpers from target_core_device.c for backend drivers */ | 113 | /* attribute helpers from target_core_device.c for backend drivers */ |
114 | bool se_dev_check_wce(struct se_device *); | ||
114 | int se_dev_set_max_unmap_lba_count(struct se_device *, u32); | 115 | int se_dev_set_max_unmap_lba_count(struct se_device *, u32); |
115 | int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); | 116 | int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); |
116 | int se_dev_set_unmap_granularity(struct se_device *, u32); | 117 | int se_dev_set_unmap_granularity(struct se_device *, u32); |
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index 7a85967060a5..f0f5c5c3de12 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c | |||
@@ -139,6 +139,9 @@ static int lz4_uncompress(const char *source, char *dest, int osize) | |||
139 | /* Error: request to write beyond destination buffer */ | 139 | /* Error: request to write beyond destination buffer */ |
140 | if (cpy > oend) | 140 | if (cpy > oend) |
141 | goto _output_error; | 141 | goto _output_error; |
142 | if ((ref + COPYLENGTH) > oend || | ||
143 | (op + COPYLENGTH) > oend) | ||
144 | goto _output_error; | ||
142 | LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); | 145 | LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); |
143 | while (op < cpy) | 146 | while (op < cpy) |
144 | *op++ = *ref++; | 147 | *op++ = *ref++; |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 99e810f84671..cf5e82f39d3b 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -272,9 +272,9 @@ static void trace_packet(const struct sk_buff *skb, | |||
272 | &chainname, &comment, &rulenum) != 0) | 272 | &chainname, &comment, &rulenum) != 0) |
273 | break; | 273 | break; |
274 | 274 | ||
275 | nf_log_packet(net, AF_INET, hook, skb, in, out, &trace_loginfo, | 275 | nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo, |
276 | "TRACE: %s:%s:%s:%u ", | 276 | "TRACE: %s:%s:%s:%u ", |
277 | tablename, chainname, comment, rulenum); | 277 | tablename, chainname, comment, rulenum); |
278 | } | 278 | } |
279 | #endif | 279 | #endif |
280 | 280 | ||
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index e080fbbbc0e5..bb00c6f2a885 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -298,9 +298,9 @@ static void trace_packet(const struct sk_buff *skb, | |||
298 | &chainname, &comment, &rulenum) != 0) | 298 | &chainname, &comment, &rulenum) != 0) |
299 | break; | 299 | break; |
300 | 300 | ||
301 | nf_log_packet(net, AF_INET6, hook, skb, in, out, &trace_loginfo, | 301 | nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo, |
302 | "TRACE: %s:%s:%s:%u ", | 302 | "TRACE: %s:%s:%s:%u ", |
303 | tablename, chainname, comment, rulenum); | 303 | tablename, chainname, comment, rulenum); |
304 | } | 304 | } |
305 | #endif | 305 | #endif |
306 | 306 | ||
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 0d8448f19dfe..675d12c69e32 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c | |||
@@ -212,6 +212,30 @@ void nf_log_packet(struct net *net, | |||
212 | } | 212 | } |
213 | EXPORT_SYMBOL(nf_log_packet); | 213 | EXPORT_SYMBOL(nf_log_packet); |
214 | 214 | ||
215 | void nf_log_trace(struct net *net, | ||
216 | u_int8_t pf, | ||
217 | unsigned int hooknum, | ||
218 | const struct sk_buff *skb, | ||
219 | const struct net_device *in, | ||
220 | const struct net_device *out, | ||
221 | const struct nf_loginfo *loginfo, const char *fmt, ...) | ||
222 | { | ||
223 | va_list args; | ||
224 | char prefix[NF_LOG_PREFIXLEN]; | ||
225 | const struct nf_logger *logger; | ||
226 | |||
227 | rcu_read_lock(); | ||
228 | logger = rcu_dereference(net->nf.nf_loggers[pf]); | ||
229 | if (logger) { | ||
230 | va_start(args, fmt); | ||
231 | vsnprintf(prefix, sizeof(prefix), fmt, args); | ||
232 | va_end(args); | ||
233 | logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix); | ||
234 | } | ||
235 | rcu_read_unlock(); | ||
236 | } | ||
237 | EXPORT_SYMBOL(nf_log_trace); | ||
238 | |||
215 | #define S_SIZE (1024 - (sizeof(unsigned int) + 1)) | 239 | #define S_SIZE (1024 - (sizeof(unsigned int) + 1)) |
216 | 240 | ||
217 | struct nf_log_buf { | 241 | struct nf_log_buf { |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index f7e3371ce856..363a39a6c286 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -1228,7 +1228,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, | |||
1228 | 1228 | ||
1229 | if (nla[NFTA_CHAIN_POLICY]) { | 1229 | if (nla[NFTA_CHAIN_POLICY]) { |
1230 | if ((chain != NULL && | 1230 | if ((chain != NULL && |
1231 | !(chain->flags & NFT_BASE_CHAIN)) || | 1231 | !(chain->flags & NFT_BASE_CHAIN))) |
1232 | return -EOPNOTSUPP; | ||
1233 | |||
1234 | if (chain == NULL && | ||
1232 | nla[NFTA_CHAIN_HOOK] == NULL) | 1235 | nla[NFTA_CHAIN_HOOK] == NULL) |
1233 | return -EOPNOTSUPP; | 1236 | return -EOPNOTSUPP; |
1234 | 1237 | ||
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index a5599fc51a6f..54330fb5efaf 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c | |||
@@ -77,6 +77,9 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple, | |||
77 | if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM]) | 77 | if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM]) |
78 | return -EINVAL; | 78 | return -EINVAL; |
79 | 79 | ||
80 | /* Not all fields are initialized so first zero the tuple */ | ||
81 | memset(tuple, 0, sizeof(struct nf_conntrack_tuple)); | ||
82 | |||
80 | tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM])); | 83 | tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM])); |
81 | tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]); | 84 | tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]); |
82 | 85 | ||
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 0b0fd4e36294..589b8487cd08 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
@@ -135,6 +135,9 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, | |||
135 | entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; | 135 | entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; |
136 | break; | 136 | break; |
137 | case AF_INET6: | 137 | case AF_INET6: |
138 | if (proto) | ||
139 | entry->e6.ipv6.flags |= IP6T_F_PROTO; | ||
140 | |||
138 | entry->e6.ipv6.proto = proto; | 141 | entry->e6.ipv6.proto = proto; |
139 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; | 142 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; |
140 | break; | 143 | break; |
@@ -348,6 +351,9 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, | |||
348 | entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; | 351 | entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; |
349 | break; | 352 | break; |
350 | case AF_INET6: | 353 | case AF_INET6: |
354 | if (proto) | ||
355 | entry->e6.ipv6.flags |= IP6T_F_PROTO; | ||
356 | |||
351 | entry->e6.ipv6.proto = proto; | 357 | entry->e6.ipv6.proto = proto; |
352 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; | 358 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; |
353 | break; | 359 | break; |
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index 4585c5724391..ad3966976cf5 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c | |||
@@ -138,6 +138,8 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set, | |||
138 | iter->err = err; | 138 | iter->err = err; |
139 | goto out; | 139 | goto out; |
140 | } | 140 | } |
141 | |||
142 | continue; | ||
141 | } | 143 | } |
142 | 144 | ||
143 | if (iter->count < iter->skip) | 145 | if (iter->count < iter->skip) |
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index 165b77ce9aa9..c205b26a2bee 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c | |||
@@ -519,8 +519,8 @@ static int tproxy_tg6_check(const struct xt_tgchk_param *par) | |||
519 | { | 519 | { |
520 | const struct ip6t_ip6 *i = par->entryinfo; | 520 | const struct ip6t_ip6 *i = par->entryinfo; |
521 | 521 | ||
522 | if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) | 522 | if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) && |
523 | && !(i->flags & IP6T_INV_PROTO)) | 523 | !(i->invflags & IP6T_INV_PROTO)) |
524 | return 0; | 524 | return 0; |
525 | 525 | ||
526 | pr_info("Can be used only in combination with " | 526 | pr_info("Can be used only in combination with " |