diff options
188 files changed, 1987 insertions, 1420 deletions
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 3 | 1 | VERSION = 3 |
| 2 | PATCHLEVEL = 8 | 2 | PATCHLEVEL = 8 |
| 3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
| 4 | EXTRAVERSION = -rc6 | 4 | EXTRAVERSION = -rc7 |
| 5 | NAME = Unicycling Gorilla | 5 | NAME = Unicycling Gorilla |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 36ae03a3f5d1..87dfa9026c5b 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c | |||
| @@ -351,6 +351,25 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) | |||
| 351 | irq_set_chained_handler(irq, gic_handle_cascade_irq); | 351 | irq_set_chained_handler(irq, gic_handle_cascade_irq); |
| 352 | } | 352 | } |
| 353 | 353 | ||
| 354 | static u8 gic_get_cpumask(struct gic_chip_data *gic) | ||
| 355 | { | ||
| 356 | void __iomem *base = gic_data_dist_base(gic); | ||
| 357 | u32 mask, i; | ||
| 358 | |||
| 359 | for (i = mask = 0; i < 32; i += 4) { | ||
| 360 | mask = readl_relaxed(base + GIC_DIST_TARGET + i); | ||
| 361 | mask |= mask >> 16; | ||
| 362 | mask |= mask >> 8; | ||
| 363 | if (mask) | ||
| 364 | break; | ||
| 365 | } | ||
| 366 | |||
| 367 | if (!mask) | ||
| 368 | pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); | ||
| 369 | |||
| 370 | return mask; | ||
| 371 | } | ||
| 372 | |||
| 354 | static void __init gic_dist_init(struct gic_chip_data *gic) | 373 | static void __init gic_dist_init(struct gic_chip_data *gic) |
| 355 | { | 374 | { |
| 356 | unsigned int i; | 375 | unsigned int i; |
| @@ -369,7 +388,9 @@ static void __init gic_dist_init(struct gic_chip_data *gic) | |||
| 369 | /* | 388 | /* |
| 370 | * Set all global interrupts to this CPU only. | 389 | * Set all global interrupts to this CPU only. |
| 371 | */ | 390 | */ |
| 372 | cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0); | 391 | cpumask = gic_get_cpumask(gic); |
| 392 | cpumask |= cpumask << 8; | ||
| 393 | cpumask |= cpumask << 16; | ||
| 373 | for (i = 32; i < gic_irqs; i += 4) | 394 | for (i = 32; i < gic_irqs; i += 4) |
| 374 | writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); | 395 | writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); |
| 375 | 396 | ||
| @@ -400,7 +421,7 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) | |||
| 400 | * Get what the GIC says our CPU mask is. | 421 | * Get what the GIC says our CPU mask is. |
| 401 | */ | 422 | */ |
| 402 | BUG_ON(cpu >= NR_GIC_CPU_IF); | 423 | BUG_ON(cpu >= NR_GIC_CPU_IF); |
| 403 | cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0); | 424 | cpu_mask = gic_get_cpumask(gic); |
| 404 | gic_cpu_map[cpu] = cpu_mask; | 425 | gic_cpu_map[cpu] = cpu_mask; |
| 405 | 426 | ||
| 406 | /* | 427 | /* |
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 73cf03aa981e..1c4df27f9332 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
| @@ -37,7 +37,7 @@ | |||
| 37 | */ | 37 | */ |
| 38 | #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) | 38 | #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) |
| 39 | #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) | 39 | #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) |
| 40 | #define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3) | 40 | #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) |
| 41 | 41 | ||
| 42 | /* | 42 | /* |
| 43 | * The maximum size of a 26-bit user space task. | 43 | * The maximum size of a 26-bit user space task. |
diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h index 4eb6d005ffaa..86dff32a0737 100644 --- a/arch/arm/include/asm/smp_scu.h +++ b/arch/arm/include/asm/smp_scu.h | |||
| @@ -7,8 +7,14 @@ | |||
| 7 | 7 | ||
| 8 | #ifndef __ASSEMBLER__ | 8 | #ifndef __ASSEMBLER__ |
| 9 | unsigned int scu_get_core_count(void __iomem *); | 9 | unsigned int scu_get_core_count(void __iomem *); |
| 10 | void scu_enable(void __iomem *); | ||
| 11 | int scu_power_mode(void __iomem *, unsigned int); | 10 | int scu_power_mode(void __iomem *, unsigned int); |
| 11 | |||
| 12 | #ifdef CONFIG_SMP | ||
| 13 | void scu_enable(void __iomem *scu_base); | ||
| 14 | #else | ||
| 15 | static inline void scu_enable(void __iomem *scu_base) {} | ||
| 16 | #endif | ||
| 17 | |||
| 12 | #endif | 18 | #endif |
| 13 | 19 | ||
| 14 | #endif | 20 | #endif |
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index b9f015e843d8..45eac87ed66a 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c | |||
| @@ -75,7 +75,7 @@ void scu_enable(void __iomem *scu_base) | |||
| 75 | int scu_power_mode(void __iomem *scu_base, unsigned int mode) | 75 | int scu_power_mode(void __iomem *scu_base, unsigned int mode) |
| 76 | { | 76 | { |
| 77 | unsigned int val; | 77 | unsigned int val; |
| 78 | int cpu = cpu_logical_map(smp_processor_id()); | 78 | int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0); |
| 79 | 79 | ||
| 80 | if (mode > 3 || mode == 1 || cpu > 3) | 80 | if (mode > 3 || mode == 1 || cpu > 3) |
| 81 | return -EINVAL; | 81 | return -EINVAL; |
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index e103c290bc9e..85afb031b676 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig | |||
| @@ -414,7 +414,7 @@ config MACH_EXYNOS4_DT | |||
| 414 | select CPU_EXYNOS4210 | 414 | select CPU_EXYNOS4210 |
| 415 | select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD | 415 | select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD |
| 416 | select PINCTRL | 416 | select PINCTRL |
| 417 | select PINCTRL_EXYNOS4 | 417 | select PINCTRL_EXYNOS |
| 418 | select USE_OF | 418 | select USE_OF |
| 419 | help | 419 | help |
| 420 | Machine support for Samsung Exynos4 machine with device tree enabled. | 420 | Machine support for Samsung Exynos4 machine with device tree enabled. |
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index 981dc1e1da51..e6c061282939 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | 28 | ||
| 29 | #include <asm/arch_timer.h> | 29 | #include <asm/arch_timer.h> |
| 30 | #include <asm/cacheflush.h> | 30 | #include <asm/cacheflush.h> |
| 31 | #include <asm/cputype.h> | ||
| 31 | #include <asm/smp_plat.h> | 32 | #include <asm/smp_plat.h> |
| 32 | #include <asm/smp_twd.h> | 33 | #include <asm/smp_twd.h> |
| 33 | #include <asm/hardware/arm_timer.h> | 34 | #include <asm/hardware/arm_timer.h> |
| @@ -59,7 +60,7 @@ static void __init highbank_scu_map_io(void) | |||
| 59 | 60 | ||
| 60 | void highbank_set_cpu_jump(int cpu, void *jump_addr) | 61 | void highbank_set_cpu_jump(int cpu, void *jump_addr) |
| 61 | { | 62 | { |
| 62 | cpu = cpu_logical_map(cpu); | 63 | cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0); |
| 63 | writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu)); | 64 | writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu)); |
| 64 | __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16); | 65 | __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16); |
| 65 | outer_clean_range(HB_JUMP_TABLE_PHYS(cpu), | 66 | outer_clean_range(HB_JUMP_TABLE_PHYS(cpu), |
diff --git a/arch/arm/mach-highbank/sysregs.h b/arch/arm/mach-highbank/sysregs.h index 70af9d13fcef..5995df7f2622 100644 --- a/arch/arm/mach-highbank/sysregs.h +++ b/arch/arm/mach-highbank/sysregs.h | |||
| @@ -37,7 +37,7 @@ extern void __iomem *sregs_base; | |||
| 37 | 37 | ||
| 38 | static inline void highbank_set_core_pwr(void) | 38 | static inline void highbank_set_core_pwr(void) |
| 39 | { | 39 | { |
| 40 | int cpu = cpu_logical_map(smp_processor_id()); | 40 | int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0); |
| 41 | if (scu_base_addr) | 41 | if (scu_base_addr) |
| 42 | scu_power_mode(scu_base_addr, SCU_PM_POWEROFF); | 42 | scu_power_mode(scu_base_addr, SCU_PM_POWEROFF); |
| 43 | else | 43 | else |
| @@ -46,7 +46,7 @@ static inline void highbank_set_core_pwr(void) | |||
| 46 | 46 | ||
| 47 | static inline void highbank_clear_core_pwr(void) | 47 | static inline void highbank_clear_core_pwr(void) |
| 48 | { | 48 | { |
| 49 | int cpu = cpu_logical_map(smp_processor_id()); | 49 | int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0); |
| 50 | if (scu_base_addr) | 50 | if (scu_base_addr) |
| 51 | scu_power_mode(scu_base_addr, SCU_PM_NORMAL); | 51 | scu_power_mode(scu_base_addr, SCU_PM_NORMAL); |
| 52 | else | 52 | else |
diff --git a/arch/arm/mach-realview/include/mach/irqs-eb.h b/arch/arm/mach-realview/include/mach/irqs-eb.h index d6b5073692d2..44754230fdcc 100644 --- a/arch/arm/mach-realview/include/mach/irqs-eb.h +++ b/arch/arm/mach-realview/include/mach/irqs-eb.h | |||
| @@ -115,7 +115,7 @@ | |||
| 115 | /* | 115 | /* |
| 116 | * Only define NR_IRQS if less than NR_IRQS_EB | 116 | * Only define NR_IRQS if less than NR_IRQS_EB |
| 117 | */ | 117 | */ |
| 118 | #define NR_IRQS_EB (IRQ_EB_GIC_START + 96) | 118 | #define NR_IRQS_EB (IRQ_EB_GIC_START + 128) |
| 119 | 119 | ||
| 120 | #if defined(CONFIG_MACH_REALVIEW_EB) \ | 120 | #if defined(CONFIG_MACH_REALVIEW_EB) \ |
| 121 | && (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB)) | 121 | && (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB)) |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 076c26d43864..dda3904dc64c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
| @@ -640,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
| 640 | 640 | ||
| 641 | if (is_coherent || nommu()) | 641 | if (is_coherent || nommu()) |
| 642 | addr = __alloc_simple_buffer(dev, size, gfp, &page); | 642 | addr = __alloc_simple_buffer(dev, size, gfp, &page); |
| 643 | else if (gfp & GFP_ATOMIC) | 643 | else if (!(gfp & __GFP_WAIT)) |
| 644 | addr = __alloc_from_pool(size, &page); | 644 | addr = __alloc_from_pool(size, &page); |
| 645 | else if (!IS_ENABLED(CONFIG_CMA)) | 645 | else if (!IS_ENABLED(CONFIG_CMA)) |
| 646 | addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); | 646 | addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); |
diff --git a/arch/avr32/include/asm/dma-mapping.h b/arch/avr32/include/asm/dma-mapping.h index aaf5199d8fcb..b3d18f9f3e8d 100644 --- a/arch/avr32/include/asm/dma-mapping.h +++ b/arch/avr32/include/asm/dma-mapping.h | |||
| @@ -336,4 +336,14 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
| 336 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 336 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
| 337 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 337 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
| 338 | 338 | ||
| 339 | /* drivers/base/dma-mapping.c */ | ||
| 340 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
| 341 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
| 342 | extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
| 343 | void *cpu_addr, dma_addr_t dma_addr, | ||
| 344 | size_t size); | ||
| 345 | |||
| 346 | #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) | ||
| 347 | #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) | ||
| 348 | |||
| 339 | #endif /* __ASM_AVR32_DMA_MAPPING_H */ | 349 | #endif /* __ASM_AVR32_DMA_MAPPING_H */ |
diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h index bbf461076a0a..054d9ec57d9d 100644 --- a/arch/blackfin/include/asm/dma-mapping.h +++ b/arch/blackfin/include/asm/dma-mapping.h | |||
| @@ -154,4 +154,14 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |||
| 154 | _dma_sync((dma_addr_t)vaddr, size, dir); | 154 | _dma_sync((dma_addr_t)vaddr, size, dir); |
| 155 | } | 155 | } |
| 156 | 156 | ||
| 157 | /* drivers/base/dma-mapping.c */ | ||
| 158 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
| 159 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
| 160 | extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
| 161 | void *cpu_addr, dma_addr_t dma_addr, | ||
| 162 | size_t size); | ||
| 163 | |||
| 164 | #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) | ||
| 165 | #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) | ||
| 166 | |||
| 157 | #endif /* _BLACKFIN_DMA_MAPPING_H */ | 167 | #endif /* _BLACKFIN_DMA_MAPPING_H */ |
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h index 3c694065030f..88bd0d899bdb 100644 --- a/arch/c6x/include/asm/dma-mapping.h +++ b/arch/c6x/include/asm/dma-mapping.h | |||
| @@ -89,4 +89,19 @@ extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); | |||
| 89 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f)) | 89 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f)) |
| 90 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h)) | 90 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h)) |
| 91 | 91 | ||
| 92 | /* Not supported for now */ | ||
| 93 | static inline int dma_mmap_coherent(struct device *dev, | ||
| 94 | struct vm_area_struct *vma, void *cpu_addr, | ||
| 95 | dma_addr_t dma_addr, size_t size) | ||
| 96 | { | ||
| 97 | return -EINVAL; | ||
| 98 | } | ||
| 99 | |||
| 100 | static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
| 101 | void *cpu_addr, dma_addr_t dma_addr, | ||
| 102 | size_t size) | ||
| 103 | { | ||
| 104 | return -EINVAL; | ||
| 105 | } | ||
| 106 | |||
| 92 | #endif /* _ASM_C6X_DMA_MAPPING_H */ | 107 | #endif /* _ASM_C6X_DMA_MAPPING_H */ |
diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h index 8588b2ccf854..2f0f654f1b44 100644 --- a/arch/cris/include/asm/dma-mapping.h +++ b/arch/cris/include/asm/dma-mapping.h | |||
| @@ -158,5 +158,15 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |||
| 158 | { | 158 | { |
| 159 | } | 159 | } |
| 160 | 160 | ||
| 161 | /* drivers/base/dma-mapping.c */ | ||
| 162 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
| 163 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
| 164 | extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
| 165 | void *cpu_addr, dma_addr_t dma_addr, | ||
| 166 | size_t size); | ||
| 167 | |||
| 168 | #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) | ||
| 169 | #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) | ||
| 170 | |||
| 161 | 171 | ||
| 162 | #endif | 172 | #endif |
diff --git a/arch/frv/include/asm/dma-mapping.h b/arch/frv/include/asm/dma-mapping.h index dfb811002c64..1746a2b8e6e7 100644 --- a/arch/frv/include/asm/dma-mapping.h +++ b/arch/frv/include/asm/dma-mapping.h | |||
| @@ -132,4 +132,19 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |||
| 132 | flush_write_buffers(); | 132 | flush_write_buffers(); |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | /* Not supported for now */ | ||
| 136 | static inline int dma_mmap_coherent(struct device *dev, | ||
| 137 | struct vm_area_struct *vma, void *cpu_addr, | ||
| 138 | dma_addr_t dma_addr, size_t size) | ||
| 139 | { | ||
| 140 | return -EINVAL; | ||
| 141 | } | ||
| 142 | |||
| 143 | static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
| 144 | void *cpu_addr, dma_addr_t dma_addr, | ||
| 145 | size_t size) | ||
| 146 | { | ||
| 147 | return -EINVAL; | ||
| 148 | } | ||
| 149 | |||
| 135 | #endif /* _ASM_DMA_MAPPING_H */ | 150 | #endif /* _ASM_DMA_MAPPING_H */ |
diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h index 3e6b8445af6a..292805f0762e 100644 --- a/arch/m68k/include/asm/dma-mapping.h +++ b/arch/m68k/include/asm/dma-mapping.h | |||
| @@ -115,4 +115,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t handle) | |||
| 115 | #include <asm-generic/dma-mapping-broken.h> | 115 | #include <asm-generic/dma-mapping-broken.h> |
| 116 | #endif | 116 | #endif |
| 117 | 117 | ||
| 118 | /* drivers/base/dma-mapping.c */ | ||
| 119 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
| 120 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
| 121 | extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
| 122 | void *cpu_addr, dma_addr_t dma_addr, | ||
| 123 | size_t size); | ||
| 124 | |||
| 125 | #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) | ||
| 126 | #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) | ||
| 127 | |||
| 118 | #endif /* _M68K_DMA_MAPPING_H */ | 128 | #endif /* _M68K_DMA_MAPPING_H */ |
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h index ae700f49e51d..b0768a657920 100644 --- a/arch/m68k/include/asm/processor.h +++ b/arch/m68k/include/asm/processor.h | |||
| @@ -130,7 +130,6 @@ extern int handle_kernel_fault(struct pt_regs *regs); | |||
| 130 | #define start_thread(_regs, _pc, _usp) \ | 130 | #define start_thread(_regs, _pc, _usp) \ |
| 131 | do { \ | 131 | do { \ |
| 132 | (_regs)->pc = (_pc); \ | 132 | (_regs)->pc = (_pc); \ |
| 133 | ((struct switch_stack *)(_regs))[-1].a6 = 0; \ | ||
| 134 | setframeformat(_regs); \ | 133 | setframeformat(_regs); \ |
| 135 | if (current->mm) \ | 134 | if (current->mm) \ |
| 136 | (_regs)->d5 = current->mm->start_data; \ | 135 | (_regs)->d5 = current->mm->start_data; \ |
diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h index c1be4397b1ed..a18abfc558eb 100644 --- a/arch/mn10300/include/asm/dma-mapping.h +++ b/arch/mn10300/include/asm/dma-mapping.h | |||
| @@ -168,4 +168,19 @@ void dma_cache_sync(void *vaddr, size_t size, | |||
| 168 | mn10300_dcache_flush_inv(); | 168 | mn10300_dcache_flush_inv(); |
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | /* Not supported for now */ | ||
| 172 | static inline int dma_mmap_coherent(struct device *dev, | ||
| 173 | struct vm_area_struct *vma, void *cpu_addr, | ||
| 174 | dma_addr_t dma_addr, size_t size) | ||
| 175 | { | ||
| 176 | return -EINVAL; | ||
| 177 | } | ||
| 178 | |||
| 179 | static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
| 180 | void *cpu_addr, dma_addr_t dma_addr, | ||
| 181 | size_t size) | ||
| 182 | { | ||
| 183 | return -EINVAL; | ||
| 184 | } | ||
| 185 | |||
| 171 | #endif | 186 | #endif |
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h index 467bbd510eac..106b395688e1 100644 --- a/arch/parisc/include/asm/dma-mapping.h +++ b/arch/parisc/include/asm/dma-mapping.h | |||
| @@ -238,4 +238,19 @@ void * sba_get_iommu(struct parisc_device *dev); | |||
| 238 | /* At the moment, we panic on error for IOMMU resource exaustion */ | 238 | /* At the moment, we panic on error for IOMMU resource exaustion */ |
| 239 | #define dma_mapping_error(dev, x) 0 | 239 | #define dma_mapping_error(dev, x) 0 |
| 240 | 240 | ||
| 241 | /* This API cannot be supported on PA-RISC */ | ||
| 242 | static inline int dma_mmap_coherent(struct device *dev, | ||
| 243 | struct vm_area_struct *vma, void *cpu_addr, | ||
| 244 | dma_addr_t dma_addr, size_t size) | ||
| 245 | { | ||
| 246 | return -EINVAL; | ||
| 247 | } | ||
| 248 | |||
| 249 | static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
| 250 | void *cpu_addr, dma_addr_t dma_addr, | ||
| 251 | size_t size) | ||
| 252 | { | ||
| 253 | return -EINVAL; | ||
| 254 | } | ||
| 255 | |||
| 241 | #endif | 256 | #endif |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index a5f4f5a1d24b..0aa98db8a80d 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
| @@ -120,6 +120,9 @@ static int s390_next_ktime(ktime_t expires, | |||
| 120 | nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires)); | 120 | nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires)); |
| 121 | do_div(nsecs, 125); | 121 | do_div(nsecs, 125); |
| 122 | S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9); | 122 | S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9); |
| 123 | /* Program the maximum value if we have an overflow (== year 2042) */ | ||
| 124 | if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc)) | ||
| 125 | S390_lowcore.clock_comparator = -1ULL; | ||
| 123 | set_clock_comparator(S390_lowcore.clock_comparator); | 126 | set_clock_comparator(S390_lowcore.clock_comparator); |
| 124 | return 0; | 127 | return 0; |
| 125 | } | 128 | } |
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 875d008828b8..1bb7ad4aeff4 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig | |||
| @@ -140,6 +140,8 @@ config ARCH_DEFCONFIG | |||
| 140 | 140 | ||
| 141 | source "init/Kconfig" | 141 | source "init/Kconfig" |
| 142 | 142 | ||
| 143 | source "kernel/Kconfig.freezer" | ||
| 144 | |||
| 143 | menu "Tilera-specific configuration" | 145 | menu "Tilera-specific configuration" |
| 144 | 146 | ||
| 145 | config NR_CPUS | 147 | config NR_CPUS |
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h index 2a9b293fece6..31672918064c 100644 --- a/arch/tile/include/asm/io.h +++ b/arch/tile/include/asm/io.h | |||
| @@ -250,7 +250,9 @@ static inline void writeq(u64 val, unsigned long addr) | |||
| 250 | #define iowrite32 writel | 250 | #define iowrite32 writel |
| 251 | #define iowrite64 writeq | 251 | #define iowrite64 writeq |
| 252 | 252 | ||
| 253 | static inline void memset_io(void *dst, int val, size_t len) | 253 | #if CHIP_HAS_MMIO() || defined(CONFIG_PCI) |
| 254 | |||
| 255 | static inline void memset_io(volatile void *dst, int val, size_t len) | ||
| 254 | { | 256 | { |
| 255 | int x; | 257 | int x; |
| 256 | BUG_ON((unsigned long)dst & 0x3); | 258 | BUG_ON((unsigned long)dst & 0x3); |
| @@ -277,6 +279,8 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, | |||
| 277 | writel(*(u32 *)(src + x), dst + x); | 279 | writel(*(u32 *)(src + x), dst + x); |
| 278 | } | 280 | } |
| 279 | 281 | ||
| 282 | #endif | ||
| 283 | |||
| 280 | /* | 284 | /* |
| 281 | * The Tile architecture does not support IOPORT, even with PCI. | 285 | * The Tile architecture does not support IOPORT, even with PCI. |
| 282 | * Unfortunately we can't yet simply not declare these methods, | 286 | * Unfortunately we can't yet simply not declare these methods, |
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index b4e96fef2cf8..241c0bb60b12 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h | |||
| @@ -18,32 +18,20 @@ | |||
| 18 | #include <arch/interrupts.h> | 18 | #include <arch/interrupts.h> |
| 19 | #include <arch/chip.h> | 19 | #include <arch/chip.h> |
| 20 | 20 | ||
| 21 | #if !defined(__tilegx__) && defined(__ASSEMBLY__) | ||
| 22 | |||
| 23 | /* | 21 | /* |
| 24 | * The set of interrupts we want to allow when interrupts are nominally | 22 | * The set of interrupts we want to allow when interrupts are nominally |
| 25 | * disabled. The remainder are effectively "NMI" interrupts from | 23 | * disabled. The remainder are effectively "NMI" interrupts from |
| 26 | * the point of view of the generic Linux code. Note that synchronous | 24 | * the point of view of the generic Linux code. Note that synchronous |
| 27 | * interrupts (aka "non-queued") are not blocked by the mask in any case. | 25 | * interrupts (aka "non-queued") are not blocked by the mask in any case. |
| 28 | */ | 26 | */ |
| 29 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
| 30 | #define LINUX_MASKABLE_INTERRUPTS_HI \ | ||
| 31 | (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT))) | ||
| 32 | #else | ||
| 33 | #define LINUX_MASKABLE_INTERRUPTS_HI \ | ||
| 34 | (~(INT_MASK_HI(INT_PERF_COUNT))) | ||
| 35 | #endif | ||
| 36 | |||
| 37 | #else | ||
| 38 | |||
| 39 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
| 40 | #define LINUX_MASKABLE_INTERRUPTS \ | ||
| 41 | (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT))) | ||
| 42 | #else | ||
| 43 | #define LINUX_MASKABLE_INTERRUPTS \ | 27 | #define LINUX_MASKABLE_INTERRUPTS \ |
| 44 | (~(INT_MASK(INT_PERF_COUNT))) | 28 | (~((_AC(1,ULL) << INT_PERF_COUNT) | (_AC(1,ULL) << INT_AUX_PERF_COUNT))) |
| 45 | #endif | ||
| 46 | 29 | ||
| 30 | #if CHIP_HAS_SPLIT_INTR_MASK() | ||
| 31 | /* The same macro, but for the two 32-bit SPRs separately. */ | ||
| 32 | #define LINUX_MASKABLE_INTERRUPTS_LO (-1) | ||
| 33 | #define LINUX_MASKABLE_INTERRUPTS_HI \ | ||
| 34 | (~((1 << (INT_PERF_COUNT - 32)) | (1 << (INT_AUX_PERF_COUNT - 32)))) | ||
| 47 | #endif | 35 | #endif |
| 48 | 36 | ||
| 49 | #ifndef __ASSEMBLY__ | 37 | #ifndef __ASSEMBLY__ |
| @@ -126,7 +114,7 @@ | |||
| 126 | * to know our current state. | 114 | * to know our current state. |
| 127 | */ | 115 | */ |
| 128 | DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | 116 | DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); |
| 129 | #define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR) | 117 | #define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR) |
| 130 | 118 | ||
| 131 | /* Disable interrupts. */ | 119 | /* Disable interrupts. */ |
| 132 | #define arch_local_irq_disable() \ | 120 | #define arch_local_irq_disable() \ |
| @@ -165,7 +153,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
| 165 | 153 | ||
| 166 | /* Prevent the given interrupt from being enabled next time we enable irqs. */ | 154 | /* Prevent the given interrupt from being enabled next time we enable irqs. */ |
| 167 | #define arch_local_irq_mask(interrupt) \ | 155 | #define arch_local_irq_mask(interrupt) \ |
| 168 | (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt)) | 156 | (__get_cpu_var(interrupts_enabled_mask) &= ~(1ULL << (interrupt))) |
| 169 | 157 | ||
| 170 | /* Prevent the given interrupt from being enabled immediately. */ | 158 | /* Prevent the given interrupt from being enabled immediately. */ |
| 171 | #define arch_local_irq_mask_now(interrupt) do { \ | 159 | #define arch_local_irq_mask_now(interrupt) do { \ |
| @@ -175,7 +163,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
| 175 | 163 | ||
| 176 | /* Allow the given interrupt to be enabled next time we enable irqs. */ | 164 | /* Allow the given interrupt to be enabled next time we enable irqs. */ |
| 177 | #define arch_local_irq_unmask(interrupt) \ | 165 | #define arch_local_irq_unmask(interrupt) \ |
| 178 | (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt)) | 166 | (__get_cpu_var(interrupts_enabled_mask) |= (1ULL << (interrupt))) |
| 179 | 167 | ||
| 180 | /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ | 168 | /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ |
| 181 | #define arch_local_irq_unmask_now(interrupt) do { \ | 169 | #define arch_local_irq_unmask_now(interrupt) do { \ |
| @@ -250,7 +238,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
| 250 | /* Disable interrupts. */ | 238 | /* Disable interrupts. */ |
| 251 | #define IRQ_DISABLE(tmp0, tmp1) \ | 239 | #define IRQ_DISABLE(tmp0, tmp1) \ |
| 252 | { \ | 240 | { \ |
| 253 | movei tmp0, -1; \ | 241 | movei tmp0, LINUX_MASKABLE_INTERRUPTS_LO; \ |
| 254 | moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \ | 242 | moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \ |
| 255 | }; \ | 243 | }; \ |
| 256 | { \ | 244 | { \ |
diff --git a/arch/tile/include/uapi/arch/interrupts_32.h b/arch/tile/include/uapi/arch/interrupts_32.h index 96b5710505b6..2efe3f68b2d6 100644 --- a/arch/tile/include/uapi/arch/interrupts_32.h +++ b/arch/tile/include/uapi/arch/interrupts_32.h | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #ifndef __ARCH_INTERRUPTS_H__ | 15 | #ifndef __ARCH_INTERRUPTS_H__ |
| 16 | #define __ARCH_INTERRUPTS_H__ | 16 | #define __ARCH_INTERRUPTS_H__ |
| 17 | 17 | ||
| 18 | #ifndef __KERNEL__ | ||
| 18 | /** Mask for an interrupt. */ | 19 | /** Mask for an interrupt. */ |
| 19 | /* Note: must handle breaking interrupts into high and low words manually. */ | 20 | /* Note: must handle breaking interrupts into high and low words manually. */ |
| 20 | #define INT_MASK_LO(intno) (1 << (intno)) | 21 | #define INT_MASK_LO(intno) (1 << (intno)) |
| @@ -23,6 +24,7 @@ | |||
| 23 | #ifndef __ASSEMBLER__ | 24 | #ifndef __ASSEMBLER__ |
| 24 | #define INT_MASK(intno) (1ULL << (intno)) | 25 | #define INT_MASK(intno) (1ULL << (intno)) |
| 25 | #endif | 26 | #endif |
| 27 | #endif | ||
| 26 | 28 | ||
| 27 | 29 | ||
| 28 | /** Where a given interrupt executes */ | 30 | /** Where a given interrupt executes */ |
| @@ -92,216 +94,216 @@ | |||
| 92 | 94 | ||
| 93 | #ifndef __ASSEMBLER__ | 95 | #ifndef __ASSEMBLER__ |
| 94 | #define QUEUED_INTERRUPTS ( \ | 96 | #define QUEUED_INTERRUPTS ( \ |
| 95 | INT_MASK(INT_MEM_ERROR) | \ | 97 | (1ULL << INT_MEM_ERROR) | \ |
| 96 | INT_MASK(INT_DMATLB_MISS) | \ | 98 | (1ULL << INT_DMATLB_MISS) | \ |
| 97 | INT_MASK(INT_DMATLB_ACCESS) | \ | 99 | (1ULL << INT_DMATLB_ACCESS) | \ |
| 98 | INT_MASK(INT_SNITLB_MISS) | \ | 100 | (1ULL << INT_SNITLB_MISS) | \ |
| 99 | INT_MASK(INT_SN_NOTIFY) | \ | 101 | (1ULL << INT_SN_NOTIFY) | \ |
| 100 | INT_MASK(INT_SN_FIREWALL) | \ | 102 | (1ULL << INT_SN_FIREWALL) | \ |
| 101 | INT_MASK(INT_IDN_FIREWALL) | \ | 103 | (1ULL << INT_IDN_FIREWALL) | \ |
| 102 | INT_MASK(INT_UDN_FIREWALL) | \ | 104 | (1ULL << INT_UDN_FIREWALL) | \ |
| 103 | INT_MASK(INT_TILE_TIMER) | \ | 105 | (1ULL << INT_TILE_TIMER) | \ |
| 104 | INT_MASK(INT_IDN_TIMER) | \ | 106 | (1ULL << INT_IDN_TIMER) | \ |
| 105 | INT_MASK(INT_UDN_TIMER) | \ | 107 | (1ULL << INT_UDN_TIMER) | \ |
| 106 | INT_MASK(INT_DMA_NOTIFY) | \ | 108 | (1ULL << INT_DMA_NOTIFY) | \ |
| 107 | INT_MASK(INT_IDN_CA) | \ | 109 | (1ULL << INT_IDN_CA) | \ |
| 108 | INT_MASK(INT_UDN_CA) | \ | 110 | (1ULL << INT_UDN_CA) | \ |
| 109 | INT_MASK(INT_IDN_AVAIL) | \ | 111 | (1ULL << INT_IDN_AVAIL) | \ |
| 110 | INT_MASK(INT_UDN_AVAIL) | \ | 112 | (1ULL << INT_UDN_AVAIL) | \ |
| 111 | INT_MASK(INT_PERF_COUNT) | \ | 113 | (1ULL << INT_PERF_COUNT) | \ |
| 112 | INT_MASK(INT_INTCTRL_3) | \ | 114 | (1ULL << INT_INTCTRL_3) | \ |
| 113 | INT_MASK(INT_INTCTRL_2) | \ | 115 | (1ULL << INT_INTCTRL_2) | \ |
| 114 | INT_MASK(INT_INTCTRL_1) | \ | 116 | (1ULL << INT_INTCTRL_1) | \ |
| 115 | INT_MASK(INT_INTCTRL_0) | \ | 117 | (1ULL << INT_INTCTRL_0) | \ |
| 116 | INT_MASK(INT_BOOT_ACCESS) | \ | 118 | (1ULL << INT_BOOT_ACCESS) | \ |
| 117 | INT_MASK(INT_WORLD_ACCESS) | \ | 119 | (1ULL << INT_WORLD_ACCESS) | \ |
| 118 | INT_MASK(INT_I_ASID) | \ | 120 | (1ULL << INT_I_ASID) | \ |
| 119 | INT_MASK(INT_D_ASID) | \ | 121 | (1ULL << INT_D_ASID) | \ |
| 120 | INT_MASK(INT_DMA_ASID) | \ | 122 | (1ULL << INT_DMA_ASID) | \ |
| 121 | INT_MASK(INT_SNI_ASID) | \ | 123 | (1ULL << INT_SNI_ASID) | \ |
| 122 | INT_MASK(INT_DMA_CPL) | \ | 124 | (1ULL << INT_DMA_CPL) | \ |
| 123 | INT_MASK(INT_SN_CPL) | \ | 125 | (1ULL << INT_SN_CPL) | \ |
| 124 | INT_MASK(INT_DOUBLE_FAULT) | \ | 126 | (1ULL << INT_DOUBLE_FAULT) | \ |
| 125 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 127 | (1ULL << INT_AUX_PERF_COUNT) | \ |
| 126 | 0) | 128 | 0) |
| 127 | #define NONQUEUED_INTERRUPTS ( \ | 129 | #define NONQUEUED_INTERRUPTS ( \ |
| 128 | INT_MASK(INT_ITLB_MISS) | \ | 130 | (1ULL << INT_ITLB_MISS) | \ |
| 129 | INT_MASK(INT_ILL) | \ | 131 | (1ULL << INT_ILL) | \ |
| 130 | INT_MASK(INT_GPV) | \ | 132 | (1ULL << INT_GPV) | \ |
| 131 | INT_MASK(INT_SN_ACCESS) | \ | 133 | (1ULL << INT_SN_ACCESS) | \ |
| 132 | INT_MASK(INT_IDN_ACCESS) | \ | 134 | (1ULL << INT_IDN_ACCESS) | \ |
| 133 | INT_MASK(INT_UDN_ACCESS) | \ | 135 | (1ULL << INT_UDN_ACCESS) | \ |
| 134 | INT_MASK(INT_IDN_REFILL) | \ | 136 | (1ULL << INT_IDN_REFILL) | \ |
| 135 | INT_MASK(INT_UDN_REFILL) | \ | 137 | (1ULL << INT_UDN_REFILL) | \ |
| 136 | INT_MASK(INT_IDN_COMPLETE) | \ | 138 | (1ULL << INT_IDN_COMPLETE) | \ |
| 137 | INT_MASK(INT_UDN_COMPLETE) | \ | 139 | (1ULL << INT_UDN_COMPLETE) | \ |
| 138 | INT_MASK(INT_SWINT_3) | \ | 140 | (1ULL << INT_SWINT_3) | \ |
| 139 | INT_MASK(INT_SWINT_2) | \ | 141 | (1ULL << INT_SWINT_2) | \ |
| 140 | INT_MASK(INT_SWINT_1) | \ | 142 | (1ULL << INT_SWINT_1) | \ |
| 141 | INT_MASK(INT_SWINT_0) | \ | 143 | (1ULL << INT_SWINT_0) | \ |
| 142 | INT_MASK(INT_UNALIGN_DATA) | \ | 144 | (1ULL << INT_UNALIGN_DATA) | \ |
| 143 | INT_MASK(INT_DTLB_MISS) | \ | 145 | (1ULL << INT_DTLB_MISS) | \ |
| 144 | INT_MASK(INT_DTLB_ACCESS) | \ | 146 | (1ULL << INT_DTLB_ACCESS) | \ |
| 145 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | 147 | (1ULL << INT_SN_STATIC_ACCESS) | \ |
| 146 | 0) | 148 | 0) |
| 147 | #define CRITICAL_MASKED_INTERRUPTS ( \ | 149 | #define CRITICAL_MASKED_INTERRUPTS ( \ |
| 148 | INT_MASK(INT_MEM_ERROR) | \ | 150 | (1ULL << INT_MEM_ERROR) | \ |
| 149 | INT_MASK(INT_DMATLB_MISS) | \ | 151 | (1ULL << INT_DMATLB_MISS) | \ |
| 150 | INT_MASK(INT_DMATLB_ACCESS) | \ | 152 | (1ULL << INT_DMATLB_ACCESS) | \ |
| 151 | INT_MASK(INT_SNITLB_MISS) | \ | 153 | (1ULL << INT_SNITLB_MISS) | \ |
| 152 | INT_MASK(INT_SN_NOTIFY) | \ | 154 | (1ULL << INT_SN_NOTIFY) | \ |
| 153 | INT_MASK(INT_SN_FIREWALL) | \ | 155 | (1ULL << INT_SN_FIREWALL) | \ |
| 154 | INT_MASK(INT_IDN_FIREWALL) | \ | 156 | (1ULL << INT_IDN_FIREWALL) | \ |
| 155 | INT_MASK(INT_UDN_FIREWALL) | \ | 157 | (1ULL << INT_UDN_FIREWALL) | \ |
| 156 | INT_MASK(INT_TILE_TIMER) | \ | 158 | (1ULL << INT_TILE_TIMER) | \ |
| 157 | INT_MASK(INT_IDN_TIMER) | \ | 159 | (1ULL << INT_IDN_TIMER) | \ |
| 158 | INT_MASK(INT_UDN_TIMER) | \ | 160 | (1ULL << INT_UDN_TIMER) | \ |
| 159 | INT_MASK(INT_DMA_NOTIFY) | \ | 161 | (1ULL << INT_DMA_NOTIFY) | \ |
| 160 | INT_MASK(INT_IDN_CA) | \ | 162 | (1ULL << INT_IDN_CA) | \ |
| 161 | INT_MASK(INT_UDN_CA) | \ | 163 | (1ULL << INT_UDN_CA) | \ |
| 162 | INT_MASK(INT_IDN_AVAIL) | \ | 164 | (1ULL << INT_IDN_AVAIL) | \ |
| 163 | INT_MASK(INT_UDN_AVAIL) | \ | 165 | (1ULL << INT_UDN_AVAIL) | \ |
| 164 | INT_MASK(INT_PERF_COUNT) | \ | 166 | (1ULL << INT_PERF_COUNT) | \ |
| 165 | INT_MASK(INT_INTCTRL_3) | \ | 167 | (1ULL << INT_INTCTRL_3) | \ |
| 166 | INT_MASK(INT_INTCTRL_2) | \ | 168 | (1ULL << INT_INTCTRL_2) | \ |
| 167 | INT_MASK(INT_INTCTRL_1) | \ | 169 | (1ULL << INT_INTCTRL_1) | \ |
| 168 | INT_MASK(INT_INTCTRL_0) | \ | 170 | (1ULL << INT_INTCTRL_0) | \ |
| 169 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 171 | (1ULL << INT_AUX_PERF_COUNT) | \ |
| 170 | 0) | 172 | 0) |
| 171 | #define CRITICAL_UNMASKED_INTERRUPTS ( \ | 173 | #define CRITICAL_UNMASKED_INTERRUPTS ( \ |
| 172 | INT_MASK(INT_ITLB_MISS) | \ | 174 | (1ULL << INT_ITLB_MISS) | \ |
| 173 | INT_MASK(INT_ILL) | \ | 175 | (1ULL << INT_ILL) | \ |
| 174 | INT_MASK(INT_GPV) | \ | 176 | (1ULL << INT_GPV) | \ |
| 175 | INT_MASK(INT_SN_ACCESS) | \ | 177 | (1ULL << INT_SN_ACCESS) | \ |
| 176 | INT_MASK(INT_IDN_ACCESS) | \ | 178 | (1ULL << INT_IDN_ACCESS) | \ |
| 177 | INT_MASK(INT_UDN_ACCESS) | \ | 179 | (1ULL << INT_UDN_ACCESS) | \ |
| 178 | INT_MASK(INT_IDN_REFILL) | \ | 180 | (1ULL << INT_IDN_REFILL) | \ |
| 179 | INT_MASK(INT_UDN_REFILL) | \ | 181 | (1ULL << INT_UDN_REFILL) | \ |
| 180 | INT_MASK(INT_IDN_COMPLETE) | \ | 182 | (1ULL << INT_IDN_COMPLETE) | \ |
| 181 | INT_MASK(INT_UDN_COMPLETE) | \ | 183 | (1ULL << INT_UDN_COMPLETE) | \ |
| 182 | INT_MASK(INT_SWINT_3) | \ | 184 | (1ULL << INT_SWINT_3) | \ |
| 183 | INT_MASK(INT_SWINT_2) | \ | 185 | (1ULL << INT_SWINT_2) | \ |
| 184 | INT_MASK(INT_SWINT_1) | \ | 186 | (1ULL << INT_SWINT_1) | \ |
| 185 | INT_MASK(INT_SWINT_0) | \ | 187 | (1ULL << INT_SWINT_0) | \ |
| 186 | INT_MASK(INT_UNALIGN_DATA) | \ | 188 | (1ULL << INT_UNALIGN_DATA) | \ |
| 187 | INT_MASK(INT_DTLB_MISS) | \ | 189 | (1ULL << INT_DTLB_MISS) | \ |
| 188 | INT_MASK(INT_DTLB_ACCESS) | \ | 190 | (1ULL << INT_DTLB_ACCESS) | \ |
| 189 | INT_MASK(INT_BOOT_ACCESS) | \ | 191 | (1ULL << INT_BOOT_ACCESS) | \ |
| 190 | INT_MASK(INT_WORLD_ACCESS) | \ | 192 | (1ULL << INT_WORLD_ACCESS) | \ |
| 191 | INT_MASK(INT_I_ASID) | \ | 193 | (1ULL << INT_I_ASID) | \ |
| 192 | INT_MASK(INT_D_ASID) | \ | 194 | (1ULL << INT_D_ASID) | \ |
| 193 | INT_MASK(INT_DMA_ASID) | \ | 195 | (1ULL << INT_DMA_ASID) | \ |
| 194 | INT_MASK(INT_SNI_ASID) | \ | 196 | (1ULL << INT_SNI_ASID) | \ |
| 195 | INT_MASK(INT_DMA_CPL) | \ | 197 | (1ULL << INT_DMA_CPL) | \ |
| 196 | INT_MASK(INT_SN_CPL) | \ | 198 | (1ULL << INT_SN_CPL) | \ |
| 197 | INT_MASK(INT_DOUBLE_FAULT) | \ | 199 | (1ULL << INT_DOUBLE_FAULT) | \ |
| 198 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | 200 | (1ULL << INT_SN_STATIC_ACCESS) | \ |
| 199 | 0) | 201 | 0) |
| 200 | #define MASKABLE_INTERRUPTS ( \ | 202 | #define MASKABLE_INTERRUPTS ( \ |
| 201 | INT_MASK(INT_MEM_ERROR) | \ | 203 | (1ULL << INT_MEM_ERROR) | \ |
| 202 | INT_MASK(INT_IDN_REFILL) | \ | 204 | (1ULL << INT_IDN_REFILL) | \ |
| 203 | INT_MASK(INT_UDN_REFILL) | \ | 205 | (1ULL << INT_UDN_REFILL) | \ |
| 204 | INT_MASK(INT_IDN_COMPLETE) | \ | 206 | (1ULL << INT_IDN_COMPLETE) | \ |
| 205 | INT_MASK(INT_UDN_COMPLETE) | \ | 207 | (1ULL << INT_UDN_COMPLETE) | \ |
| 206 | INT_MASK(INT_DMATLB_MISS) | \ | 208 | (1ULL << INT_DMATLB_MISS) | \ |
| 207 | INT_MASK(INT_DMATLB_ACCESS) | \ | 209 | (1ULL << INT_DMATLB_ACCESS) | \ |
| 208 | INT_MASK(INT_SNITLB_MISS) | \ | 210 | (1ULL << INT_SNITLB_MISS) | \ |
| 209 | INT_MASK(INT_SN_NOTIFY) | \ | 211 | (1ULL << INT_SN_NOTIFY) | \ |
| 210 | INT_MASK(INT_SN_FIREWALL) | \ | 212 | (1ULL << INT_SN_FIREWALL) | \ |
| 211 | INT_MASK(INT_IDN_FIREWALL) | \ | 213 | (1ULL << INT_IDN_FIREWALL) | \ |
| 212 | INT_MASK(INT_UDN_FIREWALL) | \ | 214 | (1ULL << INT_UDN_FIREWALL) | \ |
| 213 | INT_MASK(INT_TILE_TIMER) | \ | 215 | (1ULL << INT_TILE_TIMER) | \ |
| 214 | INT_MASK(INT_IDN_TIMER) | \ | 216 | (1ULL << INT_IDN_TIMER) | \ |
| 215 | INT_MASK(INT_UDN_TIMER) | \ | 217 | (1ULL << INT_UDN_TIMER) | \ |
| 216 | INT_MASK(INT_DMA_NOTIFY) | \ | 218 | (1ULL << INT_DMA_NOTIFY) | \ |
| 217 | INT_MASK(INT_IDN_CA) | \ | 219 | (1ULL << INT_IDN_CA) | \ |
| 218 | INT_MASK(INT_UDN_CA) | \ | 220 | (1ULL << INT_UDN_CA) | \ |
| 219 | INT_MASK(INT_IDN_AVAIL) | \ | 221 | (1ULL << INT_IDN_AVAIL) | \ |
| 220 | INT_MASK(INT_UDN_AVAIL) | \ | 222 | (1ULL << INT_UDN_AVAIL) | \ |
| 221 | INT_MASK(INT_PERF_COUNT) | \ | 223 | (1ULL << INT_PERF_COUNT) | \ |
| 222 | INT_MASK(INT_INTCTRL_3) | \ | 224 | (1ULL << INT_INTCTRL_3) | \ |
| 223 | INT_MASK(INT_INTCTRL_2) | \ | 225 | (1ULL << INT_INTCTRL_2) | \ |
| 224 | INT_MASK(INT_INTCTRL_1) | \ | 226 | (1ULL << INT_INTCTRL_1) | \ |
| 225 | INT_MASK(INT_INTCTRL_0) | \ | 227 | (1ULL << INT_INTCTRL_0) | \ |
| 226 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 228 | (1ULL << INT_AUX_PERF_COUNT) | \ |
| 227 | 0) | 229 | 0) |
| 228 | #define UNMASKABLE_INTERRUPTS ( \ | 230 | #define UNMASKABLE_INTERRUPTS ( \ |
| 229 | INT_MASK(INT_ITLB_MISS) | \ | 231 | (1ULL << INT_ITLB_MISS) | \ |
| 230 | INT_MASK(INT_ILL) | \ | 232 | (1ULL << INT_ILL) | \ |
| 231 | INT_MASK(INT_GPV) | \ | 233 | (1ULL << INT_GPV) | \ |
| 232 | INT_MASK(INT_SN_ACCESS) | \ | 234 | (1ULL << INT_SN_ACCESS) | \ |
| 233 | INT_MASK(INT_IDN_ACCESS) | \ | 235 | (1ULL << INT_IDN_ACCESS) | \ |
| 234 | INT_MASK(INT_UDN_ACCESS) | \ | 236 | (1ULL << INT_UDN_ACCESS) | \ |
| 235 | INT_MASK(INT_SWINT_3) | \ | 237 | (1ULL << INT_SWINT_3) | \ |
| 236 | INT_MASK(INT_SWINT_2) | \ | 238 | (1ULL << INT_SWINT_2) | \ |
| 237 | INT_MASK(INT_SWINT_1) | \ | 239 | (1ULL << INT_SWINT_1) | \ |
| 238 | INT_MASK(INT_SWINT_0) | \ | 240 | (1ULL << INT_SWINT_0) | \ |
| 239 | INT_MASK(INT_UNALIGN_DATA) | \ | 241 | (1ULL << INT_UNALIGN_DATA) | \ |
| 240 | INT_MASK(INT_DTLB_MISS) | \ | 242 | (1ULL << INT_DTLB_MISS) | \ |
| 241 | INT_MASK(INT_DTLB_ACCESS) | \ | 243 | (1ULL << INT_DTLB_ACCESS) | \ |
| 242 | INT_MASK(INT_BOOT_ACCESS) | \ | 244 | (1ULL << INT_BOOT_ACCESS) | \ |
| 243 | INT_MASK(INT_WORLD_ACCESS) | \ | 245 | (1ULL << INT_WORLD_ACCESS) | \ |
| 244 | INT_MASK(INT_I_ASID) | \ | 246 | (1ULL << INT_I_ASID) | \ |
| 245 | INT_MASK(INT_D_ASID) | \ | 247 | (1ULL << INT_D_ASID) | \ |
| 246 | INT_MASK(INT_DMA_ASID) | \ | 248 | (1ULL << INT_DMA_ASID) | \ |
| 247 | INT_MASK(INT_SNI_ASID) | \ | 249 | (1ULL << INT_SNI_ASID) | \ |
| 248 | INT_MASK(INT_DMA_CPL) | \ | 250 | (1ULL << INT_DMA_CPL) | \ |
| 249 | INT_MASK(INT_SN_CPL) | \ | 251 | (1ULL << INT_SN_CPL) | \ |
| 250 | INT_MASK(INT_DOUBLE_FAULT) | \ | 252 | (1ULL << INT_DOUBLE_FAULT) | \ |
| 251 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | 253 | (1ULL << INT_SN_STATIC_ACCESS) | \ |
| 252 | 0) | 254 | 0) |
| 253 | #define SYNC_INTERRUPTS ( \ | 255 | #define SYNC_INTERRUPTS ( \ |
| 254 | INT_MASK(INT_ITLB_MISS) | \ | 256 | (1ULL << INT_ITLB_MISS) | \ |
| 255 | INT_MASK(INT_ILL) | \ | 257 | (1ULL << INT_ILL) | \ |
| 256 | INT_MASK(INT_GPV) | \ | 258 | (1ULL << INT_GPV) | \ |
| 257 | INT_MASK(INT_SN_ACCESS) | \ | 259 | (1ULL << INT_SN_ACCESS) | \ |
| 258 | INT_MASK(INT_IDN_ACCESS) | \ | 260 | (1ULL << INT_IDN_ACCESS) | \ |
| 259 | INT_MASK(INT_UDN_ACCESS) | \ | 261 | (1ULL << INT_UDN_ACCESS) | \ |
| 260 | INT_MASK(INT_IDN_REFILL) | \ | 262 | (1ULL << INT_IDN_REFILL) | \ |
| 261 | INT_MASK(INT_UDN_REFILL) | \ | 263 | (1ULL << INT_UDN_REFILL) | \ |
| 262 | INT_MASK(INT_IDN_COMPLETE) | \ | 264 | (1ULL << INT_IDN_COMPLETE) | \ |
| 263 | INT_MASK(INT_UDN_COMPLETE) | \ | 265 | (1ULL << INT_UDN_COMPLETE) | \ |
| 264 | INT_MASK(INT_SWINT_3) | \ | 266 | (1ULL << INT_SWINT_3) | \ |
| 265 | INT_MASK(INT_SWINT_2) | \ | 267 | (1ULL << INT_SWINT_2) | \ |
| 266 | INT_MASK(INT_SWINT_1) | \ | 268 | (1ULL << INT_SWINT_1) | \ |
| 267 | INT_MASK(INT_SWINT_0) | \ | 269 | (1ULL << INT_SWINT_0) | \ |
| 268 | INT_MASK(INT_UNALIGN_DATA) | \ | 270 | (1ULL << INT_UNALIGN_DATA) | \ |
| 269 | INT_MASK(INT_DTLB_MISS) | \ | 271 | (1ULL << INT_DTLB_MISS) | \ |
| 270 | INT_MASK(INT_DTLB_ACCESS) | \ | 272 | (1ULL << INT_DTLB_ACCESS) | \ |
| 271 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | 273 | (1ULL << INT_SN_STATIC_ACCESS) | \ |
| 272 | 0) | 274 | 0) |
| 273 | #define NON_SYNC_INTERRUPTS ( \ | 275 | #define NON_SYNC_INTERRUPTS ( \ |
| 274 | INT_MASK(INT_MEM_ERROR) | \ | 276 | (1ULL << INT_MEM_ERROR) | \ |
| 275 | INT_MASK(INT_DMATLB_MISS) | \ | 277 | (1ULL << INT_DMATLB_MISS) | \ |
| 276 | INT_MASK(INT_DMATLB_ACCESS) | \ | 278 | (1ULL << INT_DMATLB_ACCESS) | \ |
| 277 | INT_MASK(INT_SNITLB_MISS) | \ | 279 | (1ULL << INT_SNITLB_MISS) | \ |
| 278 | INT_MASK(INT_SN_NOTIFY) | \ | 280 | (1ULL << INT_SN_NOTIFY) | \ |
| 279 | INT_MASK(INT_SN_FIREWALL) | \ | 281 | (1ULL << INT_SN_FIREWALL) | \ |
| 280 | INT_MASK(INT_IDN_FIREWALL) | \ | 282 | (1ULL << INT_IDN_FIREWALL) | \ |
| 281 | INT_MASK(INT_UDN_FIREWALL) | \ | 283 | (1ULL << INT_UDN_FIREWALL) | \ |
| 282 | INT_MASK(INT_TILE_TIMER) | \ | 284 | (1ULL << INT_TILE_TIMER) | \ |
| 283 | INT_MASK(INT_IDN_TIMER) | \ | 285 | (1ULL << INT_IDN_TIMER) | \ |
| 284 | INT_MASK(INT_UDN_TIMER) | \ | 286 | (1ULL << INT_UDN_TIMER) | \ |
| 285 | INT_MASK(INT_DMA_NOTIFY) | \ | 287 | (1ULL << INT_DMA_NOTIFY) | \ |
| 286 | INT_MASK(INT_IDN_CA) | \ | 288 | (1ULL << INT_IDN_CA) | \ |
| 287 | INT_MASK(INT_UDN_CA) | \ | 289 | (1ULL << INT_UDN_CA) | \ |
| 288 | INT_MASK(INT_IDN_AVAIL) | \ | 290 | (1ULL << INT_IDN_AVAIL) | \ |
| 289 | INT_MASK(INT_UDN_AVAIL) | \ | 291 | (1ULL << INT_UDN_AVAIL) | \ |
| 290 | INT_MASK(INT_PERF_COUNT) | \ | 292 | (1ULL << INT_PERF_COUNT) | \ |
| 291 | INT_MASK(INT_INTCTRL_3) | \ | 293 | (1ULL << INT_INTCTRL_3) | \ |
| 292 | INT_MASK(INT_INTCTRL_2) | \ | 294 | (1ULL << INT_INTCTRL_2) | \ |
| 293 | INT_MASK(INT_INTCTRL_1) | \ | 295 | (1ULL << INT_INTCTRL_1) | \ |
| 294 | INT_MASK(INT_INTCTRL_0) | \ | 296 | (1ULL << INT_INTCTRL_0) | \ |
| 295 | INT_MASK(INT_BOOT_ACCESS) | \ | 297 | (1ULL << INT_BOOT_ACCESS) | \ |
| 296 | INT_MASK(INT_WORLD_ACCESS) | \ | 298 | (1ULL << INT_WORLD_ACCESS) | \ |
| 297 | INT_MASK(INT_I_ASID) | \ | 299 | (1ULL << INT_I_ASID) | \ |
| 298 | INT_MASK(INT_D_ASID) | \ | 300 | (1ULL << INT_D_ASID) | \ |
| 299 | INT_MASK(INT_DMA_ASID) | \ | 301 | (1ULL << INT_DMA_ASID) | \ |
| 300 | INT_MASK(INT_SNI_ASID) | \ | 302 | (1ULL << INT_SNI_ASID) | \ |
| 301 | INT_MASK(INT_DMA_CPL) | \ | 303 | (1ULL << INT_DMA_CPL) | \ |
| 302 | INT_MASK(INT_SN_CPL) | \ | 304 | (1ULL << INT_SN_CPL) | \ |
| 303 | INT_MASK(INT_DOUBLE_FAULT) | \ | 305 | (1ULL << INT_DOUBLE_FAULT) | \ |
| 304 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 306 | (1ULL << INT_AUX_PERF_COUNT) | \ |
| 305 | 0) | 307 | 0) |
| 306 | #endif /* !__ASSEMBLER__ */ | 308 | #endif /* !__ASSEMBLER__ */ |
| 307 | #endif /* !__ARCH_INTERRUPTS_H__ */ | 309 | #endif /* !__ARCH_INTERRUPTS_H__ */ |
diff --git a/arch/tile/include/uapi/arch/interrupts_64.h b/arch/tile/include/uapi/arch/interrupts_64.h index 5bb58b2e4e6f..13c9f9182348 100644 --- a/arch/tile/include/uapi/arch/interrupts_64.h +++ b/arch/tile/include/uapi/arch/interrupts_64.h | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #ifndef __ARCH_INTERRUPTS_H__ | 15 | #ifndef __ARCH_INTERRUPTS_H__ |
| 16 | #define __ARCH_INTERRUPTS_H__ | 16 | #define __ARCH_INTERRUPTS_H__ |
| 17 | 17 | ||
| 18 | #ifndef __KERNEL__ | ||
| 18 | /** Mask for an interrupt. */ | 19 | /** Mask for an interrupt. */ |
| 19 | #ifdef __ASSEMBLER__ | 20 | #ifdef __ASSEMBLER__ |
| 20 | /* Note: must handle breaking interrupts into high and low words manually. */ | 21 | /* Note: must handle breaking interrupts into high and low words manually. */ |
| @@ -22,6 +23,7 @@ | |||
| 22 | #else | 23 | #else |
| 23 | #define INT_MASK(intno) (1ULL << (intno)) | 24 | #define INT_MASK(intno) (1ULL << (intno)) |
| 24 | #endif | 25 | #endif |
| 26 | #endif | ||
| 25 | 27 | ||
| 26 | 28 | ||
| 27 | /** Where a given interrupt executes */ | 29 | /** Where a given interrupt executes */ |
| @@ -85,192 +87,192 @@ | |||
| 85 | 87 | ||
| 86 | #ifndef __ASSEMBLER__ | 88 | #ifndef __ASSEMBLER__ |
| 87 | #define QUEUED_INTERRUPTS ( \ | 89 | #define QUEUED_INTERRUPTS ( \ |
| 88 | INT_MASK(INT_MEM_ERROR) | \ | 90 | (1ULL << INT_MEM_ERROR) | \ |
| 89 | INT_MASK(INT_IDN_COMPLETE) | \ | 91 | (1ULL << INT_IDN_COMPLETE) | \ |
| 90 | INT_MASK(INT_UDN_COMPLETE) | \ | 92 | (1ULL << INT_UDN_COMPLETE) | \ |
| 91 | INT_MASK(INT_IDN_FIREWALL) | \ | 93 | (1ULL << INT_IDN_FIREWALL) | \ |
| 92 | INT_MASK(INT_UDN_FIREWALL) | \ | 94 | (1ULL << INT_UDN_FIREWALL) | \ |
| 93 | INT_MASK(INT_TILE_TIMER) | \ | 95 | (1ULL << INT_TILE_TIMER) | \ |
| 94 | INT_MASK(INT_AUX_TILE_TIMER) | \ | 96 | (1ULL << INT_AUX_TILE_TIMER) | \ |
| 95 | INT_MASK(INT_IDN_TIMER) | \ | 97 | (1ULL << INT_IDN_TIMER) | \ |
| 96 | INT_MASK(INT_UDN_TIMER) | \ | 98 | (1ULL << INT_UDN_TIMER) | \ |
| 97 | INT_MASK(INT_IDN_AVAIL) | \ | 99 | (1ULL << INT_IDN_AVAIL) | \ |
| 98 | INT_MASK(INT_UDN_AVAIL) | \ | 100 | (1ULL << INT_UDN_AVAIL) | \ |
| 99 | INT_MASK(INT_IPI_3) | \ | 101 | (1ULL << INT_IPI_3) | \ |
| 100 | INT_MASK(INT_IPI_2) | \ | 102 | (1ULL << INT_IPI_2) | \ |
| 101 | INT_MASK(INT_IPI_1) | \ | 103 | (1ULL << INT_IPI_1) | \ |
| 102 | INT_MASK(INT_IPI_0) | \ | 104 | (1ULL << INT_IPI_0) | \ |
| 103 | INT_MASK(INT_PERF_COUNT) | \ | 105 | (1ULL << INT_PERF_COUNT) | \ |
| 104 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 106 | (1ULL << INT_AUX_PERF_COUNT) | \ |
| 105 | INT_MASK(INT_INTCTRL_3) | \ | 107 | (1ULL << INT_INTCTRL_3) | \ |
| 106 | INT_MASK(INT_INTCTRL_2) | \ | 108 | (1ULL << INT_INTCTRL_2) | \ |
| 107 | INT_MASK(INT_INTCTRL_1) | \ | 109 | (1ULL << INT_INTCTRL_1) | \ |
| 108 | INT_MASK(INT_INTCTRL_0) | \ | 110 | (1ULL << INT_INTCTRL_0) | \ |
| 109 | INT_MASK(INT_BOOT_ACCESS) | \ | 111 | (1ULL << INT_BOOT_ACCESS) | \ |
| 110 | INT_MASK(INT_WORLD_ACCESS) | \ | 112 | (1ULL << INT_WORLD_ACCESS) | \ |
| 111 | INT_MASK(INT_I_ASID) | \ | 113 | (1ULL << INT_I_ASID) | \ |
| 112 | INT_MASK(INT_D_ASID) | \ | 114 | (1ULL << INT_D_ASID) | \ |
| 113 | INT_MASK(INT_DOUBLE_FAULT) | \ | 115 | (1ULL << INT_DOUBLE_FAULT) | \ |
| 114 | 0) | 116 | 0) |
| 115 | #define NONQUEUED_INTERRUPTS ( \ | 117 | #define NONQUEUED_INTERRUPTS ( \ |
| 116 | INT_MASK(INT_SINGLE_STEP_3) | \ | 118 | (1ULL << INT_SINGLE_STEP_3) | \ |
| 117 | INT_MASK(INT_SINGLE_STEP_2) | \ | 119 | (1ULL << INT_SINGLE_STEP_2) | \ |
| 118 | INT_MASK(INT_SINGLE_STEP_1) | \ | 120 | (1ULL << INT_SINGLE_STEP_1) | \ |
| 119 | INT_MASK(INT_SINGLE_STEP_0) | \ | 121 | (1ULL << INT_SINGLE_STEP_0) | \ |
| 120 | INT_MASK(INT_ITLB_MISS) | \ | 122 | (1ULL << INT_ITLB_MISS) | \ |
| 121 | INT_MASK(INT_ILL) | \ | 123 | (1ULL << INT_ILL) | \ |
| 122 | INT_MASK(INT_GPV) | \ | 124 | (1ULL << INT_GPV) | \ |
| 123 | INT_MASK(INT_IDN_ACCESS) | \ | 125 | (1ULL << INT_IDN_ACCESS) | \ |
| 124 | INT_MASK(INT_UDN_ACCESS) | \ | 126 | (1ULL << INT_UDN_ACCESS) | \ |
| 125 | INT_MASK(INT_SWINT_3) | \ | 127 | (1ULL << INT_SWINT_3) | \ |
| 126 | INT_MASK(INT_SWINT_2) | \ | 128 | (1ULL << INT_SWINT_2) | \ |
| 127 | INT_MASK(INT_SWINT_1) | \ | 129 | (1ULL << INT_SWINT_1) | \ |
| 128 | INT_MASK(INT_SWINT_0) | \ | 130 | (1ULL << INT_SWINT_0) | \ |
| 129 | INT_MASK(INT_ILL_TRANS) | \ | 131 | (1ULL << INT_ILL_TRANS) | \ |
| 130 | INT_MASK(INT_UNALIGN_DATA) | \ | 132 | (1ULL << INT_UNALIGN_DATA) | \ |
| 131 | INT_MASK(INT_DTLB_MISS) | \ | 133 | (1ULL << INT_DTLB_MISS) | \ |
| 132 | INT_MASK(INT_DTLB_ACCESS) | \ | 134 | (1ULL << INT_DTLB_ACCESS) | \ |
| 133 | 0) | 135 | 0) |
| 134 | #define CRITICAL_MASKED_INTERRUPTS ( \ | 136 | #define CRITICAL_MASKED_INTERRUPTS ( \ |
| 135 | INT_MASK(INT_MEM_ERROR) | \ | 137 | (1ULL << INT_MEM_ERROR) | \ |
| 136 | INT_MASK(INT_SINGLE_STEP_3) | \ | 138 | (1ULL << INT_SINGLE_STEP_3) | \ |
| 137 | INT_MASK(INT_SINGLE_STEP_2) | \ | 139 | (1ULL << INT_SINGLE_STEP_2) | \ |
| 138 | INT_MASK(INT_SINGLE_STEP_1) | \ | 140 | (1ULL << INT_SINGLE_STEP_1) | \ |
| 139 | INT_MASK(INT_SINGLE_STEP_0) | \ | 141 | (1ULL << INT_SINGLE_STEP_0) | \ |
| 140 | INT_MASK(INT_IDN_COMPLETE) | \ | 142 | (1ULL << INT_IDN_COMPLETE) | \ |
| 141 | INT_MASK(INT_UDN_COMPLETE) | \ | 143 | (1ULL << INT_UDN_COMPLETE) | \ |
| 142 | INT_MASK(INT_IDN_FIREWALL) | \ | 144 | (1ULL << INT_IDN_FIREWALL) | \ |
| 143 | INT_MASK(INT_UDN_FIREWALL) | \ | 145 | (1ULL << INT_UDN_FIREWALL) | \ |
| 144 | INT_MASK(INT_TILE_TIMER) | \ | 146 | (1ULL << INT_TILE_TIMER) | \ |
| 145 | INT_MASK(INT_AUX_TILE_TIMER) | \ | 147 | (1ULL << INT_AUX_TILE_TIMER) | \ |
| 146 | INT_MASK(INT_IDN_TIMER) | \ | 148 | (1ULL << INT_IDN_TIMER) | \ |
| 147 | INT_MASK(INT_UDN_TIMER) | \ | 149 | (1ULL << INT_UDN_TIMER) | \ |
| 148 | INT_MASK(INT_IDN_AVAIL) | \ | 150 | (1ULL << INT_IDN_AVAIL) | \ |
| 149 | INT_MASK(INT_UDN_AVAIL) | \ | 151 | (1ULL << INT_UDN_AVAIL) | \ |
| 150 | INT_MASK(INT_IPI_3) | \ | 152 | (1ULL << INT_IPI_3) | \ |
| 151 | INT_MASK(INT_IPI_2) | \ | 153 | (1ULL << INT_IPI_2) | \ |
| 152 | INT_MASK(INT_IPI_1) | \ | 154 | (1ULL << INT_IPI_1) | \ |
| 153 | INT_MASK(INT_IPI_0) | \ | 155 | (1ULL << INT_IPI_0) | \ |
| 154 | INT_MASK(INT_PERF_COUNT) | \ | 156 | (1ULL << INT_PERF_COUNT) | \ |
| 155 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 157 | (1ULL << INT_AUX_PERF_COUNT) | \ |
| 156 | INT_MASK(INT_INTCTRL_3) | \ | 158 | (1ULL << INT_INTCTRL_3) | \ |
| 157 | INT_MASK(INT_INTCTRL_2) | \ | 159 | (1ULL << INT_INTCTRL_2) | \ |
| 158 | INT_MASK(INT_INTCTRL_1) | \ | 160 | (1ULL << INT_INTCTRL_1) | \ |
| 159 | INT_MASK(INT_INTCTRL_0) | \ | 161 | (1ULL << INT_INTCTRL_0) | \ |
| 160 | 0) | 162 | 0) |
| 161 | #define CRITICAL_UNMASKED_INTERRUPTS ( \ | 163 | #define CRITICAL_UNMASKED_INTERRUPTS ( \ |
| 162 | INT_MASK(INT_ITLB_MISS) | \ | 164 | (1ULL << INT_ITLB_MISS) | \ |
| 163 | INT_MASK(INT_ILL) | \ | 165 | (1ULL << INT_ILL) | \ |
| 164 | INT_MASK(INT_GPV) | \ | 166 | (1ULL << INT_GPV) | \ |
| 165 | INT_MASK(INT_IDN_ACCESS) | \ | 167 | (1ULL << INT_IDN_ACCESS) | \ |
| 166 | INT_MASK(INT_UDN_ACCESS) | \ | 168 | (1ULL << INT_UDN_ACCESS) | \ |
| 167 | INT_MASK(INT_SWINT_3) | \ | 169 | (1ULL << INT_SWINT_3) | \ |
| 168 | INT_MASK(INT_SWINT_2) | \ | 170 | (1ULL << INT_SWINT_2) | \ |
| 169 | INT_MASK(INT_SWINT_1) | \ | 171 | (1ULL << INT_SWINT_1) | \ |
| 170 | INT_MASK(INT_SWINT_0) | \ | 172 | (1ULL << INT_SWINT_0) | \ |
| 171 | INT_MASK(INT_ILL_TRANS) | \ | 173 | (1ULL << INT_ILL_TRANS) | \ |
| 172 | INT_MASK(INT_UNALIGN_DATA) | \ | 174 | (1ULL << INT_UNALIGN_DATA) | \ |
| 173 | INT_MASK(INT_DTLB_MISS) | \ | 175 | (1ULL << INT_DTLB_MISS) | \ |
| 174 | INT_MASK(INT_DTLB_ACCESS) | \ | 176 | (1ULL << INT_DTLB_ACCESS) | \ |
| 175 | INT_MASK(INT_BOOT_ACCESS) | \ | 177 | (1ULL << INT_BOOT_ACCESS) | \ |
| 176 | INT_MASK(INT_WORLD_ACCESS) | \ | 178 | (1ULL << INT_WORLD_ACCESS) | \ |
| 177 | INT_MASK(INT_I_ASID) | \ | 179 | (1ULL << INT_I_ASID) | \ |
| 178 | INT_MASK(INT_D_ASID) | \ | 180 | (1ULL << INT_D_ASID) | \ |
| 179 | INT_MASK(INT_DOUBLE_FAULT) | \ | 181 | (1ULL << INT_DOUBLE_FAULT) | \ |
| 180 | 0) | 182 | 0) |
| 181 | #define MASKABLE_INTERRUPTS ( \ | 183 | #define MASKABLE_INTERRUPTS ( \ |
| 182 | INT_MASK(INT_MEM_ERROR) | \ | 184 | (1ULL << INT_MEM_ERROR) | \ |
| 183 | INT_MASK(INT_SINGLE_STEP_3) | \ | 185 | (1ULL << INT_SINGLE_STEP_3) | \ |
| 184 | INT_MASK(INT_SINGLE_STEP_2) | \ | 186 | (1ULL << INT_SINGLE_STEP_2) | \ |
| 185 | INT_MASK(INT_SINGLE_STEP_1) | \ | 187 | (1ULL << INT_SINGLE_STEP_1) | \ |
| 186 | INT_MASK(INT_SINGLE_STEP_0) | \ | 188 | (1ULL << INT_SINGLE_STEP_0) | \ |
| 187 | INT_MASK(INT_IDN_COMPLETE) | \ | 189 | (1ULL << INT_IDN_COMPLETE) | \ |
| 188 | INT_MASK(INT_UDN_COMPLETE) | \ | 190 | (1ULL << INT_UDN_COMPLETE) | \ |
| 189 | INT_MASK(INT_IDN_FIREWALL) | \ | 191 | (1ULL << INT_IDN_FIREWALL) | \ |
| 190 | INT_MASK(INT_UDN_FIREWALL) | \ | 192 | (1ULL << INT_UDN_FIREWALL) | \ |
| 191 | INT_MASK(INT_TILE_TIMER) | \ | 193 | (1ULL << INT_TILE_TIMER) | \ |
| 192 | INT_MASK(INT_AUX_TILE_TIMER) | \ | 194 | (1ULL << INT_AUX_TILE_TIMER) | \ |
| 193 | INT_MASK(INT_IDN_TIMER) | \ | 195 | (1ULL << INT_IDN_TIMER) | \ |
| 194 | INT_MASK(INT_UDN_TIMER) | \ | 196 | (1ULL << INT_UDN_TIMER) | \ |
| 195 | INT_MASK(INT_IDN_AVAIL) | \ | 197 | (1ULL << INT_IDN_AVAIL) | \ |
| 196 | INT_MASK(INT_UDN_AVAIL) | \ | 198 | (1ULL << INT_UDN_AVAIL) | \ |
| 197 | INT_MASK(INT_IPI_3) | \ | 199 | (1ULL << INT_IPI_3) | \ |
| 198 | INT_MASK(INT_IPI_2) | \ | 200 | (1ULL << INT_IPI_2) | \ |
| 199 | INT_MASK(INT_IPI_1) | \ | 201 | (1ULL << INT_IPI_1) | \ |
| 200 | INT_MASK(INT_IPI_0) | \ | 202 | (1ULL << INT_IPI_0) | \ |
| 201 | INT_MASK(INT_PERF_COUNT) | \ | 203 | (1ULL << INT_PERF_COUNT) | \ |
| 202 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 204 | (1ULL << INT_AUX_PERF_COUNT) | \ |
| 203 | INT_MASK(INT_INTCTRL_3) | \ | 205 | (1ULL << INT_INTCTRL_3) | \ |
| 204 | INT_MASK(INT_INTCTRL_2) | \ | 206 | (1ULL << INT_INTCTRL_2) | \ |
| 205 | INT_MASK(INT_INTCTRL_1) | \ | 207 | (1ULL << INT_INTCTRL_1) | \ |
| 206 | INT_MASK(INT_INTCTRL_0) | \ | 208 | (1ULL << INT_INTCTRL_0) | \ |
| 207 | 0) | 209 | 0) |
| 208 | #define UNMASKABLE_INTERRUPTS ( \ | 210 | #define UNMASKABLE_INTERRUPTS ( \ |
| 209 | INT_MASK(INT_ITLB_MISS) | \ | 211 | (1ULL << INT_ITLB_MISS) | \ |
| 210 | INT_MASK(INT_ILL) | \ | 212 | (1ULL << INT_ILL) | \ |
| 211 | INT_MASK(INT_GPV) | \ | 213 | (1ULL << INT_GPV) | \ |
| 212 | INT_MASK(INT_IDN_ACCESS) | \ | 214 | (1ULL << INT_IDN_ACCESS) | \ |
| 213 | INT_MASK(INT_UDN_ACCESS) | \ | 215 | (1ULL << INT_UDN_ACCESS) | \ |
| 214 | INT_MASK(INT_SWINT_3) | \ | 216 | (1ULL << INT_SWINT_3) | \ |
| 215 | INT_MASK(INT_SWINT_2) | \ | 217 | (1ULL << INT_SWINT_2) | \ |
| 216 | INT_MASK(INT_SWINT_1) | \ | 218 | (1ULL << INT_SWINT_1) | \ |
| 217 | INT_MASK(INT_SWINT_0) | \ | 219 | (1ULL << INT_SWINT_0) | \ |
| 218 | INT_MASK(INT_ILL_TRANS) | \ | 220 | (1ULL << INT_ILL_TRANS) | \ |
| 219 | INT_MASK(INT_UNALIGN_DATA) | \ | 221 | (1ULL << INT_UNALIGN_DATA) | \ |
| 220 | INT_MASK(INT_DTLB_MISS) | \ | 222 | (1ULL << INT_DTLB_MISS) | \ |
| 221 | INT_MASK(INT_DTLB_ACCESS) | \ | 223 | (1ULL << INT_DTLB_ACCESS) | \ |
| 222 | INT_MASK(INT_BOOT_ACCESS) | \ | 224 | (1ULL << INT_BOOT_ACCESS) | \ |
| 223 | INT_MASK(INT_WORLD_ACCESS) | \ | 225 | (1ULL << INT_WORLD_ACCESS) | \ |
| 224 | INT_MASK(INT_I_ASID) | \ | 226 | (1ULL << INT_I_ASID) | \ |
| 225 | INT_MASK(INT_D_ASID) | \ | 227 | (1ULL << INT_D_ASID) | \ |
| 226 | INT_MASK(INT_DOUBLE_FAULT) | \ | 228 | (1ULL << INT_DOUBLE_FAULT) | \ |
| 227 | 0) | 229 | 0) |
| 228 | #define SYNC_INTERRUPTS ( \ | 230 | #define SYNC_INTERRUPTS ( \ |
| 229 | INT_MASK(INT_SINGLE_STEP_3) | \ | 231 | (1ULL << INT_SINGLE_STEP_3) | \ |
| 230 | INT_MASK(INT_SINGLE_STEP_2) | \ | 232 | (1ULL << INT_SINGLE_STEP_2) | \ |
| 231 | INT_MASK(INT_SINGLE_STEP_1) | \ | 233 | (1ULL << INT_SINGLE_STEP_1) | \ |
| 232 | INT_MASK(INT_SINGLE_STEP_0) | \ | 234 | (1ULL << INT_SINGLE_STEP_0) | \ |
| 233 | INT_MASK(INT_IDN_COMPLETE) | \ | 235 | (1ULL << INT_IDN_COMPLETE) | \ |
| 234 | INT_MASK(INT_UDN_COMPLETE) | \ | 236 | (1ULL << INT_UDN_COMPLETE) | \ |
| 235 | INT_MASK(INT_ITLB_MISS) | \ | 237 | (1ULL << INT_ITLB_MISS) | \ |
| 236 | INT_MASK(INT_ILL) | \ | 238 | (1ULL << INT_ILL) | \ |
| 237 | INT_MASK(INT_GPV) | \ | 239 | (1ULL << INT_GPV) | \ |
| 238 | INT_MASK(INT_IDN_ACCESS) | \ | 240 | (1ULL << INT_IDN_ACCESS) | \ |
| 239 | INT_MASK(INT_UDN_ACCESS) | \ | 241 | (1ULL << INT_UDN_ACCESS) | \ |
| 240 | INT_MASK(INT_SWINT_3) | \ | 242 | (1ULL << INT_SWINT_3) | \ |
| 241 | INT_MASK(INT_SWINT_2) | \ | 243 | (1ULL << INT_SWINT_2) | \ |
| 242 | INT_MASK(INT_SWINT_1) | \ | 244 | (1ULL << INT_SWINT_1) | \ |
| 243 | INT_MASK(INT_SWINT_0) | \ | 245 | (1ULL << INT_SWINT_0) | \ |
| 244 | INT_MASK(INT_ILL_TRANS) | \ | 246 | (1ULL << INT_ILL_TRANS) | \ |
| 245 | INT_MASK(INT_UNALIGN_DATA) | \ | 247 | (1ULL << INT_UNALIGN_DATA) | \ |
| 246 | INT_MASK(INT_DTLB_MISS) | \ | 248 | (1ULL << INT_DTLB_MISS) | \ |
| 247 | INT_MASK(INT_DTLB_ACCESS) | \ | 249 | (1ULL << INT_DTLB_ACCESS) | \ |
| 248 | 0) | 250 | 0) |
| 249 | #define NON_SYNC_INTERRUPTS ( \ | 251 | #define NON_SYNC_INTERRUPTS ( \ |
| 250 | INT_MASK(INT_MEM_ERROR) | \ | 252 | (1ULL << INT_MEM_ERROR) | \ |
| 251 | INT_MASK(INT_IDN_FIREWALL) | \ | 253 | (1ULL << INT_IDN_FIREWALL) | \ |
| 252 | INT_MASK(INT_UDN_FIREWALL) | \ | 254 | (1ULL << INT_UDN_FIREWALL) | \ |
| 253 | INT_MASK(INT_TILE_TIMER) | \ | 255 | (1ULL << INT_TILE_TIMER) | \ |
| 254 | INT_MASK(INT_AUX_TILE_TIMER) | \ | 256 | (1ULL << INT_AUX_TILE_TIMER) | \ |
| 255 | INT_MASK(INT_IDN_TIMER) | \ | 257 | (1ULL << INT_IDN_TIMER) | \ |
| 256 | INT_MASK(INT_UDN_TIMER) | \ | 258 | (1ULL << INT_UDN_TIMER) | \ |
| 257 | INT_MASK(INT_IDN_AVAIL) | \ | 259 | (1ULL << INT_IDN_AVAIL) | \ |
| 258 | INT_MASK(INT_UDN_AVAIL) | \ | 260 | (1ULL << INT_UDN_AVAIL) | \ |
| 259 | INT_MASK(INT_IPI_3) | \ | 261 | (1ULL << INT_IPI_3) | \ |
| 260 | INT_MASK(INT_IPI_2) | \ | 262 | (1ULL << INT_IPI_2) | \ |
| 261 | INT_MASK(INT_IPI_1) | \ | 263 | (1ULL << INT_IPI_1) | \ |
| 262 | INT_MASK(INT_IPI_0) | \ | 264 | (1ULL << INT_IPI_0) | \ |
| 263 | INT_MASK(INT_PERF_COUNT) | \ | 265 | (1ULL << INT_PERF_COUNT) | \ |
| 264 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 266 | (1ULL << INT_AUX_PERF_COUNT) | \ |
| 265 | INT_MASK(INT_INTCTRL_3) | \ | 267 | (1ULL << INT_INTCTRL_3) | \ |
| 266 | INT_MASK(INT_INTCTRL_2) | \ | 268 | (1ULL << INT_INTCTRL_2) | \ |
| 267 | INT_MASK(INT_INTCTRL_1) | \ | 269 | (1ULL << INT_INTCTRL_1) | \ |
| 268 | INT_MASK(INT_INTCTRL_0) | \ | 270 | (1ULL << INT_INTCTRL_0) | \ |
| 269 | INT_MASK(INT_BOOT_ACCESS) | \ | 271 | (1ULL << INT_BOOT_ACCESS) | \ |
| 270 | INT_MASK(INT_WORLD_ACCESS) | \ | 272 | (1ULL << INT_WORLD_ACCESS) | \ |
| 271 | INT_MASK(INT_I_ASID) | \ | 273 | (1ULL << INT_I_ASID) | \ |
| 272 | INT_MASK(INT_D_ASID) | \ | 274 | (1ULL << INT_D_ASID) | \ |
| 273 | INT_MASK(INT_DOUBLE_FAULT) | \ | 275 | (1ULL << INT_DOUBLE_FAULT) | \ |
| 274 | 0) | 276 | 0) |
| 275 | #endif /* !__ASSEMBLER__ */ | 277 | #endif /* !__ASSEMBLER__ */ |
| 276 | #endif /* !__ARCH_INTERRUPTS_H__ */ | 278 | #endif /* !__ARCH_INTERRUPTS_H__ */ |
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S index 54bc9a6678e8..4ea080902654 100644 --- a/arch/tile/kernel/intvec_64.S +++ b/arch/tile/kernel/intvec_64.S | |||
| @@ -1035,7 +1035,9 @@ handle_syscall: | |||
| 1035 | /* Ensure that the syscall number is within the legal range. */ | 1035 | /* Ensure that the syscall number is within the legal range. */ |
| 1036 | { | 1036 | { |
| 1037 | moveli r20, hw2(sys_call_table) | 1037 | moveli r20, hw2(sys_call_table) |
| 1038 | #ifdef CONFIG_COMPAT | ||
| 1038 | blbs r30, .Lcompat_syscall | 1039 | blbs r30, .Lcompat_syscall |
| 1040 | #endif | ||
| 1039 | } | 1041 | } |
| 1040 | { | 1042 | { |
| 1041 | cmpltu r21, TREG_SYSCALL_NR_NAME, r21 | 1043 | cmpltu r21, TREG_SYSCALL_NR_NAME, r21 |
| @@ -1093,6 +1095,7 @@ handle_syscall: | |||
| 1093 | j .Lresume_userspace /* jump into middle of interrupt_return */ | 1095 | j .Lresume_userspace /* jump into middle of interrupt_return */ |
| 1094 | } | 1096 | } |
| 1095 | 1097 | ||
| 1098 | #ifdef CONFIG_COMPAT | ||
| 1096 | .Lcompat_syscall: | 1099 | .Lcompat_syscall: |
| 1097 | /* | 1100 | /* |
| 1098 | * Load the base of the compat syscall table in r20, and | 1101 | * Load the base of the compat syscall table in r20, and |
| @@ -1117,6 +1120,7 @@ handle_syscall: | |||
| 1117 | { move r15, r4; addxi r4, r4, 0 } | 1120 | { move r15, r4; addxi r4, r4, 0 } |
| 1118 | { move r16, r5; addxi r5, r5, 0 } | 1121 | { move r16, r5; addxi r5, r5, 0 } |
| 1119 | j .Lload_syscall_pointer | 1122 | j .Lload_syscall_pointer |
| 1123 | #endif | ||
| 1120 | 1124 | ||
| 1121 | .Linvalid_syscall: | 1125 | .Linvalid_syscall: |
| 1122 | /* Report an invalid syscall back to the user program */ | 1126 | /* Report an invalid syscall back to the user program */ |
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 0e5661e7d00d..caf93ae11793 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c | |||
| @@ -159,7 +159,7 @@ static void save_arch_state(struct thread_struct *t); | |||
| 159 | int copy_thread(unsigned long clone_flags, unsigned long sp, | 159 | int copy_thread(unsigned long clone_flags, unsigned long sp, |
| 160 | unsigned long arg, struct task_struct *p) | 160 | unsigned long arg, struct task_struct *p) |
| 161 | { | 161 | { |
| 162 | struct pt_regs *childregs = task_pt_regs(p), *regs = current_pt_regs(); | 162 | struct pt_regs *childregs = task_pt_regs(p); |
| 163 | unsigned long ksp; | 163 | unsigned long ksp; |
| 164 | unsigned long *callee_regs; | 164 | unsigned long *callee_regs; |
| 165 | 165 | ||
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c index baa3d905fee2..d1b5c913ae72 100644 --- a/arch/tile/kernel/reboot.c +++ b/arch/tile/kernel/reboot.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/reboot.h> | 16 | #include <linux/reboot.h> |
| 17 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
| 18 | #include <linux/pm.h> | 18 | #include <linux/pm.h> |
| 19 | #include <linux/export.h> | ||
| 19 | #include <asm/page.h> | 20 | #include <asm/page.h> |
| 20 | #include <asm/setup.h> | 21 | #include <asm/setup.h> |
| 21 | #include <hv/hypervisor.h> | 22 | #include <hv/hypervisor.h> |
| @@ -49,3 +50,4 @@ void machine_restart(char *cmd) | |||
| 49 | 50 | ||
| 50 | /* No interesting distinction to be made here. */ | 51 | /* No interesting distinction to be made here. */ |
| 51 | void (*pm_power_off)(void) = NULL; | 52 | void (*pm_power_off)(void) = NULL; |
| 53 | EXPORT_SYMBOL(pm_power_off); | ||
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 6a649a4462d3..d1e15f7b59c6 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | #include <linux/timex.h> | 31 | #include <linux/timex.h> |
| 32 | #include <linux/hugetlb.h> | 32 | #include <linux/hugetlb.h> |
| 33 | #include <linux/start_kernel.h> | 33 | #include <linux/start_kernel.h> |
| 34 | #include <linux/screen_info.h> | ||
| 34 | #include <asm/setup.h> | 35 | #include <asm/setup.h> |
| 35 | #include <asm/sections.h> | 36 | #include <asm/sections.h> |
| 36 | #include <asm/cacheflush.h> | 37 | #include <asm/cacheflush.h> |
| @@ -49,6 +50,10 @@ static inline int ABS(int x) { return x >= 0 ? x : -x; } | |||
| 49 | /* Chip information */ | 50 | /* Chip information */ |
| 50 | char chip_model[64] __write_once; | 51 | char chip_model[64] __write_once; |
| 51 | 52 | ||
| 53 | #ifdef CONFIG_VT | ||
| 54 | struct screen_info screen_info; | ||
| 55 | #endif | ||
| 56 | |||
| 52 | struct pglist_data node_data[MAX_NUMNODES] __read_mostly; | 57 | struct pglist_data node_data[MAX_NUMNODES] __read_mostly; |
| 53 | EXPORT_SYMBOL(node_data); | 58 | EXPORT_SYMBOL(node_data); |
| 54 | 59 | ||
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index b2f44c28dda6..ed258b8ae320 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c | |||
| @@ -112,7 +112,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) | |||
| 112 | p->pc, p->sp, p->ex1); | 112 | p->pc, p->sp, p->ex1); |
| 113 | p = NULL; | 113 | p = NULL; |
| 114 | } | 114 | } |
| 115 | if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0) | 115 | if (!kbt->profile || ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) == 0) |
| 116 | return p; | 116 | return p; |
| 117 | return NULL; | 117 | return NULL; |
| 118 | } | 118 | } |
| @@ -484,6 +484,7 @@ void save_stack_trace(struct stack_trace *trace) | |||
| 484 | { | 484 | { |
| 485 | save_stack_trace_tsk(NULL, trace); | 485 | save_stack_trace_tsk(NULL, trace); |
| 486 | } | 486 | } |
| 487 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
| 487 | 488 | ||
| 488 | #endif | 489 | #endif |
| 489 | 490 | ||
diff --git a/arch/tile/lib/cacheflush.c b/arch/tile/lib/cacheflush.c index db4fb89e12d8..8f8ad814b139 100644 --- a/arch/tile/lib/cacheflush.c +++ b/arch/tile/lib/cacheflush.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | * more details. | 12 | * more details. |
| 13 | */ | 13 | */ |
| 14 | 14 | ||
| 15 | #include <linux/export.h> | ||
| 15 | #include <asm/page.h> | 16 | #include <asm/page.h> |
| 16 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
| 17 | #include <arch/icache.h> | 18 | #include <arch/icache.h> |
| @@ -165,3 +166,4 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh) | |||
| 165 | __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf); | 166 | __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf); |
| 166 | #endif | 167 | #endif |
| 167 | } | 168 | } |
| 169 | EXPORT_SYMBOL_GPL(finv_buffer_remote); | ||
diff --git a/arch/tile/lib/cpumask.c b/arch/tile/lib/cpumask.c index fdc403614d12..75947edccb26 100644 --- a/arch/tile/lib/cpumask.c +++ b/arch/tile/lib/cpumask.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/ctype.h> | 16 | #include <linux/ctype.h> |
| 17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
| 18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
| 19 | #include <linux/export.h> | ||
| 19 | 20 | ||
| 20 | /* | 21 | /* |
| 21 | * Allow cropping out bits beyond the end of the array. | 22 | * Allow cropping out bits beyond the end of the array. |
| @@ -50,3 +51,4 @@ int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits) | |||
| 50 | } while (*bp != '\0' && *bp != '\n'); | 51 | } while (*bp != '\0' && *bp != '\n'); |
| 51 | return 0; | 52 | return 0; |
| 52 | } | 53 | } |
| 54 | EXPORT_SYMBOL(bitmap_parselist_crop); | ||
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c index dd5f0a33fdaf..4385cb6fa00a 100644 --- a/arch/tile/lib/exports.c +++ b/arch/tile/lib/exports.c | |||
| @@ -55,6 +55,8 @@ EXPORT_SYMBOL(hv_dev_poll_cancel); | |||
| 55 | EXPORT_SYMBOL(hv_dev_close); | 55 | EXPORT_SYMBOL(hv_dev_close); |
| 56 | EXPORT_SYMBOL(hv_sysconf); | 56 | EXPORT_SYMBOL(hv_sysconf); |
| 57 | EXPORT_SYMBOL(hv_confstr); | 57 | EXPORT_SYMBOL(hv_confstr); |
| 58 | EXPORT_SYMBOL(hv_get_rtc); | ||
| 59 | EXPORT_SYMBOL(hv_set_rtc); | ||
| 58 | 60 | ||
| 59 | /* libgcc.a */ | 61 | /* libgcc.a */ |
| 60 | uint32_t __udivsi3(uint32_t dividend, uint32_t divisor); | 62 | uint32_t __udivsi3(uint32_t dividend, uint32_t divisor); |
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index 5f7868dcd6d4..1ae911939a18 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c | |||
| @@ -408,6 +408,7 @@ void homecache_change_page_home(struct page *page, int order, int home) | |||
| 408 | __set_pte(ptep, pte_set_home(pteval, home)); | 408 | __set_pte(ptep, pte_set_home(pteval, home)); |
| 409 | } | 409 | } |
| 410 | } | 410 | } |
| 411 | EXPORT_SYMBOL(homecache_change_page_home); | ||
| 411 | 412 | ||
| 412 | struct page *homecache_alloc_pages(gfp_t gfp_mask, | 413 | struct page *homecache_alloc_pages(gfp_t gfp_mask, |
| 413 | unsigned int order, int home) | 414 | unsigned int order, int home) |
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 4acb5feba1fb..172a02a6ad14 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h | |||
| @@ -170,4 +170,19 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |||
| 170 | consistent_sync(vaddr, size, direction); | 170 | consistent_sync(vaddr, size, direction); |
| 171 | } | 171 | } |
| 172 | 172 | ||
| 173 | /* Not supported for now */ | ||
| 174 | static inline int dma_mmap_coherent(struct device *dev, | ||
| 175 | struct vm_area_struct *vma, void *cpu_addr, | ||
| 176 | dma_addr_t dma_addr, size_t size) | ||
| 177 | { | ||
| 178 | return -EINVAL; | ||
| 179 | } | ||
| 180 | |||
| 181 | static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
| 182 | void *cpu_addr, dma_addr_t dma_addr, | ||
| 183 | size_t size) | ||
| 184 | { | ||
| 185 | return -EINVAL; | ||
| 186 | } | ||
| 187 | |||
| 173 | #endif /* _XTENSA_DMA_MAPPING_H */ | 188 | #endif /* _XTENSA_DMA_MAPPING_H */ |
diff --git a/block/genhd.c b/block/genhd.c index 9a289d7c84bb..3993ebf4135f 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
| @@ -35,6 +35,8 @@ static DEFINE_IDR(ext_devt_idr); | |||
| 35 | 35 | ||
| 36 | static struct device_type disk_type; | 36 | static struct device_type disk_type; |
| 37 | 37 | ||
| 38 | static void disk_check_events(struct disk_events *ev, | ||
| 39 | unsigned int *clearing_ptr); | ||
| 38 | static void disk_alloc_events(struct gendisk *disk); | 40 | static void disk_alloc_events(struct gendisk *disk); |
| 39 | static void disk_add_events(struct gendisk *disk); | 41 | static void disk_add_events(struct gendisk *disk); |
| 40 | static void disk_del_events(struct gendisk *disk); | 42 | static void disk_del_events(struct gendisk *disk); |
| @@ -1549,6 +1551,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) | |||
| 1549 | const struct block_device_operations *bdops = disk->fops; | 1551 | const struct block_device_operations *bdops = disk->fops; |
| 1550 | struct disk_events *ev = disk->ev; | 1552 | struct disk_events *ev = disk->ev; |
| 1551 | unsigned int pending; | 1553 | unsigned int pending; |
| 1554 | unsigned int clearing = mask; | ||
| 1552 | 1555 | ||
| 1553 | if (!ev) { | 1556 | if (!ev) { |
| 1554 | /* for drivers still using the old ->media_changed method */ | 1557 | /* for drivers still using the old ->media_changed method */ |
| @@ -1558,34 +1561,53 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) | |||
| 1558 | return 0; | 1561 | return 0; |
| 1559 | } | 1562 | } |
| 1560 | 1563 | ||
| 1561 | /* tell the workfn about the events being cleared */ | 1564 | disk_block_events(disk); |
| 1565 | |||
| 1566 | /* | ||
| 1567 | * store the union of mask and ev->clearing on the stack so that the | ||
| 1568 | * race with disk_flush_events does not cause ambiguity (ev->clearing | ||
| 1569 | * can still be modified even if events are blocked). | ||
| 1570 | */ | ||
| 1562 | spin_lock_irq(&ev->lock); | 1571 | spin_lock_irq(&ev->lock); |
| 1563 | ev->clearing |= mask; | 1572 | clearing |= ev->clearing; |
| 1573 | ev->clearing = 0; | ||
| 1564 | spin_unlock_irq(&ev->lock); | 1574 | spin_unlock_irq(&ev->lock); |
| 1565 | 1575 | ||
| 1566 | /* uncondtionally schedule event check and wait for it to finish */ | 1576 | disk_check_events(ev, &clearing); |
| 1567 | disk_block_events(disk); | 1577 | /* |
| 1568 | queue_delayed_work(system_freezable_wq, &ev->dwork, 0); | 1578 | * if ev->clearing is not 0, the disk_flush_events got called in the |
| 1569 | flush_delayed_work(&ev->dwork); | 1579 | * middle of this function, so we want to run the workfn without delay. |
| 1570 | __disk_unblock_events(disk, false); | 1580 | */ |
| 1581 | __disk_unblock_events(disk, ev->clearing ? true : false); | ||
| 1571 | 1582 | ||
| 1572 | /* then, fetch and clear pending events */ | 1583 | /* then, fetch and clear pending events */ |
| 1573 | spin_lock_irq(&ev->lock); | 1584 | spin_lock_irq(&ev->lock); |
| 1574 | WARN_ON_ONCE(ev->clearing & mask); /* cleared by workfn */ | ||
| 1575 | pending = ev->pending & mask; | 1585 | pending = ev->pending & mask; |
| 1576 | ev->pending &= ~mask; | 1586 | ev->pending &= ~mask; |
| 1577 | spin_unlock_irq(&ev->lock); | 1587 | spin_unlock_irq(&ev->lock); |
| 1588 | WARN_ON_ONCE(clearing & mask); | ||
| 1578 | 1589 | ||
| 1579 | return pending; | 1590 | return pending; |
| 1580 | } | 1591 | } |
| 1581 | 1592 | ||
| 1593 | /* | ||
| 1594 | * Separate this part out so that a different pointer for clearing_ptr can be | ||
| 1595 | * passed in for disk_clear_events. | ||
| 1596 | */ | ||
| 1582 | static void disk_events_workfn(struct work_struct *work) | 1597 | static void disk_events_workfn(struct work_struct *work) |
| 1583 | { | 1598 | { |
| 1584 | struct delayed_work *dwork = to_delayed_work(work); | 1599 | struct delayed_work *dwork = to_delayed_work(work); |
| 1585 | struct disk_events *ev = container_of(dwork, struct disk_events, dwork); | 1600 | struct disk_events *ev = container_of(dwork, struct disk_events, dwork); |
| 1601 | |||
| 1602 | disk_check_events(ev, &ev->clearing); | ||
| 1603 | } | ||
| 1604 | |||
| 1605 | static void disk_check_events(struct disk_events *ev, | ||
| 1606 | unsigned int *clearing_ptr) | ||
| 1607 | { | ||
| 1586 | struct gendisk *disk = ev->disk; | 1608 | struct gendisk *disk = ev->disk; |
| 1587 | char *envp[ARRAY_SIZE(disk_uevents) + 1] = { }; | 1609 | char *envp[ARRAY_SIZE(disk_uevents) + 1] = { }; |
| 1588 | unsigned int clearing = ev->clearing; | 1610 | unsigned int clearing = *clearing_ptr; |
| 1589 | unsigned int events; | 1611 | unsigned int events; |
| 1590 | unsigned long intv; | 1612 | unsigned long intv; |
| 1591 | int nr_events = 0, i; | 1613 | int nr_events = 0, i; |
| @@ -1598,7 +1620,7 @@ static void disk_events_workfn(struct work_struct *work) | |||
| 1598 | 1620 | ||
| 1599 | events &= ~ev->pending; | 1621 | events &= ~ev->pending; |
| 1600 | ev->pending |= events; | 1622 | ev->pending |= events; |
| 1601 | ev->clearing &= ~clearing; | 1623 | *clearing_ptr &= ~clearing; |
| 1602 | 1624 | ||
| 1603 | intv = disk_events_poll_jiffies(disk); | 1625 | intv = disk_events_poll_jiffies(disk); |
| 1604 | if (!ev->block && intv) | 1626 | if (!ev->block && intv) |
diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h index 6a0955e6d4fc..53ecac5a2161 100644 --- a/drivers/atm/iphase.h +++ b/drivers/atm/iphase.h | |||
| @@ -636,82 +636,82 @@ struct rx_buf_desc { | |||
| 636 | #define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE | 636 | #define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE |
| 637 | #define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE | 637 | #define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE |
| 638 | 638 | ||
| 639 | typedef volatile u_int freg_t; | 639 | typedef volatile u_int ffreg_t; |
| 640 | typedef u_int rreg_t; | 640 | typedef u_int rreg_t; |
| 641 | 641 | ||
| 642 | typedef struct _ffredn_t { | 642 | typedef struct _ffredn_t { |
| 643 | freg_t idlehead_high; /* Idle cell header (high) */ | 643 | ffreg_t idlehead_high; /* Idle cell header (high) */ |
| 644 | freg_t idlehead_low; /* Idle cell header (low) */ | 644 | ffreg_t idlehead_low; /* Idle cell header (low) */ |
| 645 | freg_t maxrate; /* Maximum rate */ | 645 | ffreg_t maxrate; /* Maximum rate */ |
| 646 | freg_t stparms; /* Traffic Management Parameters */ | 646 | ffreg_t stparms; /* Traffic Management Parameters */ |
| 647 | freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */ | 647 | ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */ |
| 648 | freg_t rm_type; /* */ | 648 | ffreg_t rm_type; /* */ |
| 649 | u_int filler5[0x17 - 0x06]; | 649 | u_int filler5[0x17 - 0x06]; |
| 650 | freg_t cmd_reg; /* Command register */ | 650 | ffreg_t cmd_reg; /* Command register */ |
| 651 | u_int filler18[0x20 - 0x18]; | 651 | u_int filler18[0x20 - 0x18]; |
| 652 | freg_t cbr_base; /* CBR Pointer Base */ | 652 | ffreg_t cbr_base; /* CBR Pointer Base */ |
| 653 | freg_t vbr_base; /* VBR Pointer Base */ | 653 | ffreg_t vbr_base; /* VBR Pointer Base */ |
| 654 | freg_t abr_base; /* ABR Pointer Base */ | 654 | ffreg_t abr_base; /* ABR Pointer Base */ |
| 655 | freg_t ubr_base; /* UBR Pointer Base */ | 655 | ffreg_t ubr_base; /* UBR Pointer Base */ |
| 656 | u_int filler24; | 656 | u_int filler24; |
| 657 | freg_t vbrwq_base; /* VBR Wait Queue Base */ | 657 | ffreg_t vbrwq_base; /* VBR Wait Queue Base */ |
| 658 | freg_t abrwq_base; /* ABR Wait Queue Base */ | 658 | ffreg_t abrwq_base; /* ABR Wait Queue Base */ |
| 659 | freg_t ubrwq_base; /* UBR Wait Queue Base */ | 659 | ffreg_t ubrwq_base; /* UBR Wait Queue Base */ |
| 660 | freg_t vct_base; /* Main VC Table Base */ | 660 | ffreg_t vct_base; /* Main VC Table Base */ |
| 661 | freg_t vcte_base; /* Extended Main VC Table Base */ | 661 | ffreg_t vcte_base; /* Extended Main VC Table Base */ |
| 662 | u_int filler2a[0x2C - 0x2A]; | 662 | u_int filler2a[0x2C - 0x2A]; |
| 663 | freg_t cbr_tab_beg; /* CBR Table Begin */ | 663 | ffreg_t cbr_tab_beg; /* CBR Table Begin */ |
| 664 | freg_t cbr_tab_end; /* CBR Table End */ | 664 | ffreg_t cbr_tab_end; /* CBR Table End */ |
| 665 | freg_t cbr_pointer; /* CBR Pointer */ | 665 | ffreg_t cbr_pointer; /* CBR Pointer */ |
| 666 | u_int filler2f[0x30 - 0x2F]; | 666 | u_int filler2f[0x30 - 0x2F]; |
| 667 | freg_t prq_st_adr; /* Packet Ready Queue Start Address */ | 667 | ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */ |
| 668 | freg_t prq_ed_adr; /* Packet Ready Queue End Address */ | 668 | ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */ |
| 669 | freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */ | 669 | ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */ |
| 670 | freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */ | 670 | ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */ |
| 671 | freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/ | 671 | ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/ |
| 672 | freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */ | 672 | ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */ |
| 673 | freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */ | 673 | ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */ |
| 674 | freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/ | 674 | ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/ |
| 675 | u_int filler38[0x40 - 0x38]; | 675 | u_int filler38[0x40 - 0x38]; |
| 676 | freg_t queue_base; /* Base address for PRQ and TCQ */ | 676 | ffreg_t queue_base; /* Base address for PRQ and TCQ */ |
| 677 | freg_t desc_base; /* Base address of descriptor table */ | 677 | ffreg_t desc_base; /* Base address of descriptor table */ |
| 678 | u_int filler42[0x45 - 0x42]; | 678 | u_int filler42[0x45 - 0x42]; |
| 679 | freg_t mode_reg_0; /* Mode register 0 */ | 679 | ffreg_t mode_reg_0; /* Mode register 0 */ |
| 680 | freg_t mode_reg_1; /* Mode register 1 */ | 680 | ffreg_t mode_reg_1; /* Mode register 1 */ |
| 681 | freg_t intr_status_reg;/* Interrupt Status register */ | 681 | ffreg_t intr_status_reg;/* Interrupt Status register */ |
| 682 | freg_t mask_reg; /* Mask Register */ | 682 | ffreg_t mask_reg; /* Mask Register */ |
| 683 | freg_t cell_ctr_high1; /* Total cell transfer count (high) */ | 683 | ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */ |
| 684 | freg_t cell_ctr_lo1; /* Total cell transfer count (low) */ | 684 | ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */ |
| 685 | freg_t state_reg; /* Status register */ | 685 | ffreg_t state_reg; /* Status register */ |
| 686 | u_int filler4c[0x58 - 0x4c]; | 686 | u_int filler4c[0x58 - 0x4c]; |
| 687 | freg_t curr_desc_num; /* Contains the current descriptor num */ | 687 | ffreg_t curr_desc_num; /* Contains the current descriptor num */ |
| 688 | freg_t next_desc; /* Next descriptor */ | 688 | ffreg_t next_desc; /* Next descriptor */ |
| 689 | freg_t next_vc; /* Next VC */ | 689 | ffreg_t next_vc; /* Next VC */ |
| 690 | u_int filler5b[0x5d - 0x5b]; | 690 | u_int filler5b[0x5d - 0x5b]; |
| 691 | freg_t present_slot_cnt;/* Present slot count */ | 691 | ffreg_t present_slot_cnt;/* Present slot count */ |
| 692 | u_int filler5e[0x6a - 0x5e]; | 692 | u_int filler5e[0x6a - 0x5e]; |
| 693 | freg_t new_desc_num; /* New descriptor number */ | 693 | ffreg_t new_desc_num; /* New descriptor number */ |
| 694 | freg_t new_vc; /* New VC */ | 694 | ffreg_t new_vc; /* New VC */ |
| 695 | freg_t sched_tbl_ptr; /* Schedule table pointer */ | 695 | ffreg_t sched_tbl_ptr; /* Schedule table pointer */ |
| 696 | freg_t vbrwq_wptr; /* VBR wait queue write pointer */ | 696 | ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */ |
| 697 | freg_t vbrwq_rptr; /* VBR wait queue read pointer */ | 697 | ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */ |
| 698 | freg_t abrwq_wptr; /* ABR wait queue write pointer */ | 698 | ffreg_t abrwq_wptr; /* ABR wait queue write pointer */ |
| 699 | freg_t abrwq_rptr; /* ABR wait queue read pointer */ | 699 | ffreg_t abrwq_rptr; /* ABR wait queue read pointer */ |
| 700 | freg_t ubrwq_wptr; /* UBR wait queue write pointer */ | 700 | ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */ |
| 701 | freg_t ubrwq_rptr; /* UBR wait queue read pointer */ | 701 | ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */ |
| 702 | freg_t cbr_vc; /* CBR VC */ | 702 | ffreg_t cbr_vc; /* CBR VC */ |
| 703 | freg_t vbr_sb_vc; /* VBR SB VC */ | 703 | ffreg_t vbr_sb_vc; /* VBR SB VC */ |
| 704 | freg_t abr_sb_vc; /* ABR SB VC */ | 704 | ffreg_t abr_sb_vc; /* ABR SB VC */ |
| 705 | freg_t ubr_sb_vc; /* UBR SB VC */ | 705 | ffreg_t ubr_sb_vc; /* UBR SB VC */ |
| 706 | freg_t vbr_next_link; /* VBR next link */ | 706 | ffreg_t vbr_next_link; /* VBR next link */ |
| 707 | freg_t abr_next_link; /* ABR next link */ | 707 | ffreg_t abr_next_link; /* ABR next link */ |
| 708 | freg_t ubr_next_link; /* UBR next link */ | 708 | ffreg_t ubr_next_link; /* UBR next link */ |
| 709 | u_int filler7a[0x7c-0x7a]; | 709 | u_int filler7a[0x7c-0x7a]; |
| 710 | freg_t out_rate_head; /* Out of rate head */ | 710 | ffreg_t out_rate_head; /* Out of rate head */ |
| 711 | u_int filler7d[0xca-0x7d]; /* pad out to full address space */ | 711 | u_int filler7d[0xca-0x7d]; /* pad out to full address space */ |
| 712 | freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */ | 712 | ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */ |
| 713 | freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */ | 713 | ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */ |
| 714 | u_int fillercc[0x100-0xcc]; /* pad out to full address space */ | 714 | u_int fillercc[0x100-0xcc]; /* pad out to full address space */ |
| 715 | } ffredn_t; | 715 | } ffredn_t; |
| 716 | 716 | ||
| 717 | typedef struct _rfredn_t { | 717 | typedef struct _rfredn_t { |
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h index 19e3fbfd5757..cb0c45488572 100644 --- a/drivers/bcma/bcma_private.h +++ b/drivers/bcma/bcma_private.h | |||
| @@ -94,11 +94,16 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc); | |||
| 94 | #ifdef CONFIG_BCMA_DRIVER_GPIO | 94 | #ifdef CONFIG_BCMA_DRIVER_GPIO |
| 95 | /* driver_gpio.c */ | 95 | /* driver_gpio.c */ |
| 96 | int bcma_gpio_init(struct bcma_drv_cc *cc); | 96 | int bcma_gpio_init(struct bcma_drv_cc *cc); |
| 97 | int bcma_gpio_unregister(struct bcma_drv_cc *cc); | ||
| 97 | #else | 98 | #else |
| 98 | static inline int bcma_gpio_init(struct bcma_drv_cc *cc) | 99 | static inline int bcma_gpio_init(struct bcma_drv_cc *cc) |
| 99 | { | 100 | { |
| 100 | return -ENOTSUPP; | 101 | return -ENOTSUPP; |
| 101 | } | 102 | } |
| 103 | static inline int bcma_gpio_unregister(struct bcma_drv_cc *cc) | ||
| 104 | { | ||
| 105 | return 0; | ||
| 106 | } | ||
| 102 | #endif /* CONFIG_BCMA_DRIVER_GPIO */ | 107 | #endif /* CONFIG_BCMA_DRIVER_GPIO */ |
| 103 | 108 | ||
| 104 | #endif | 109 | #endif |
diff --git a/drivers/bcma/driver_chipcommon_nflash.c b/drivers/bcma/driver_chipcommon_nflash.c index dbda91e4dff5..1f0b83e18f68 100644 --- a/drivers/bcma/driver_chipcommon_nflash.c +++ b/drivers/bcma/driver_chipcommon_nflash.c | |||
| @@ -21,7 +21,7 @@ int bcma_nflash_init(struct bcma_drv_cc *cc) | |||
| 21 | struct bcma_bus *bus = cc->core->bus; | 21 | struct bcma_bus *bus = cc->core->bus; |
| 22 | 22 | ||
| 23 | if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 && | 23 | if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 && |
| 24 | cc->core->id.rev != 0x38) { | 24 | cc->core->id.rev != 38) { |
| 25 | bcma_err(bus, "NAND flash on unsupported board!\n"); | 25 | bcma_err(bus, "NAND flash on unsupported board!\n"); |
| 26 | return -ENOTSUPP; | 26 | return -ENOTSUPP; |
| 27 | } | 27 | } |
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c index 9a6f585da2d9..71f755c06fc6 100644 --- a/drivers/bcma/driver_gpio.c +++ b/drivers/bcma/driver_gpio.c | |||
| @@ -96,3 +96,8 @@ int bcma_gpio_init(struct bcma_drv_cc *cc) | |||
| 96 | 96 | ||
| 97 | return gpiochip_add(chip); | 97 | return gpiochip_add(chip); |
| 98 | } | 98 | } |
| 99 | |||
| 100 | int bcma_gpio_unregister(struct bcma_drv_cc *cc) | ||
| 101 | { | ||
| 102 | return gpiochip_remove(&cc->gpio); | ||
| 103 | } | ||
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 4a92f647b58b..324f9debda88 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c | |||
| @@ -268,6 +268,13 @@ int bcma_bus_register(struct bcma_bus *bus) | |||
| 268 | void bcma_bus_unregister(struct bcma_bus *bus) | 268 | void bcma_bus_unregister(struct bcma_bus *bus) |
| 269 | { | 269 | { |
| 270 | struct bcma_device *cores[3]; | 270 | struct bcma_device *cores[3]; |
| 271 | int err; | ||
| 272 | |||
| 273 | err = bcma_gpio_unregister(&bus->drv_cc); | ||
| 274 | if (err == -EBUSY) | ||
| 275 | bcma_err(bus, "Some GPIOs are still in use.\n"); | ||
| 276 | else if (err) | ||
| 277 | bcma_err(bus, "Can not unregister GPIO driver: %i\n", err); | ||
| 271 | 278 | ||
| 272 | cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K); | 279 | cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K); |
| 273 | cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE); | 280 | cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE); |
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index f58a4a4b4dfb..2b8303ad63c9 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
| @@ -168,7 +168,7 @@ static void wake_all_senders(struct drbd_tconn *tconn) { | |||
| 168 | } | 168 | } |
| 169 | 169 | ||
| 170 | /* must hold resource->req_lock */ | 170 | /* must hold resource->req_lock */ |
| 171 | static void start_new_tl_epoch(struct drbd_tconn *tconn) | 171 | void start_new_tl_epoch(struct drbd_tconn *tconn) |
| 172 | { | 172 | { |
| 173 | /* no point closing an epoch, if it is empty, anyways. */ | 173 | /* no point closing an epoch, if it is empty, anyways. */ |
| 174 | if (tconn->current_tle_writes == 0) | 174 | if (tconn->current_tle_writes == 0) |
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index 016de6b8bb57..c08d22964d06 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h | |||
| @@ -267,6 +267,7 @@ struct bio_and_error { | |||
| 267 | int error; | 267 | int error; |
| 268 | }; | 268 | }; |
| 269 | 269 | ||
| 270 | extern void start_new_tl_epoch(struct drbd_tconn *tconn); | ||
| 270 | extern void drbd_req_destroy(struct kref *kref); | 271 | extern void drbd_req_destroy(struct kref *kref); |
| 271 | extern void _req_may_be_done(struct drbd_request *req, | 272 | extern void _req_may_be_done(struct drbd_request *req, |
| 272 | struct bio_and_error *m); | 273 | struct bio_and_error *m); |
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index 53bf6182bac4..0fe220cfb9e9 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c | |||
| @@ -931,6 +931,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, | |||
| 931 | enum drbd_state_rv rv = SS_SUCCESS; | 931 | enum drbd_state_rv rv = SS_SUCCESS; |
| 932 | enum sanitize_state_warnings ssw; | 932 | enum sanitize_state_warnings ssw; |
| 933 | struct after_state_chg_work *ascw; | 933 | struct after_state_chg_work *ascw; |
| 934 | bool did_remote, should_do_remote; | ||
| 934 | 935 | ||
| 935 | os = drbd_read_state(mdev); | 936 | os = drbd_read_state(mdev); |
| 936 | 937 | ||
| @@ -981,11 +982,17 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, | |||
| 981 | (os.disk != D_DISKLESS && ns.disk == D_DISKLESS)) | 982 | (os.disk != D_DISKLESS && ns.disk == D_DISKLESS)) |
| 982 | atomic_inc(&mdev->local_cnt); | 983 | atomic_inc(&mdev->local_cnt); |
| 983 | 984 | ||
| 985 | did_remote = drbd_should_do_remote(mdev->state); | ||
| 984 | mdev->state.i = ns.i; | 986 | mdev->state.i = ns.i; |
| 987 | should_do_remote = drbd_should_do_remote(mdev->state); | ||
| 985 | mdev->tconn->susp = ns.susp; | 988 | mdev->tconn->susp = ns.susp; |
| 986 | mdev->tconn->susp_nod = ns.susp_nod; | 989 | mdev->tconn->susp_nod = ns.susp_nod; |
| 987 | mdev->tconn->susp_fen = ns.susp_fen; | 990 | mdev->tconn->susp_fen = ns.susp_fen; |
| 988 | 991 | ||
| 992 | /* put replicated vs not-replicated requests in seperate epochs */ | ||
| 993 | if (did_remote != should_do_remote) | ||
| 994 | start_new_tl_epoch(mdev->tconn); | ||
| 995 | |||
| 989 | if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING) | 996 | if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING) |
| 990 | drbd_print_uuids(mdev, "attached to UUIDs"); | 997 | drbd_print_uuids(mdev, "attached to UUIDs"); |
| 991 | 998 | ||
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 9694dd99bbbc..3fd100990453 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
| @@ -626,12 +626,13 @@ static void mtip_timeout_function(unsigned long int data) | |||
| 626 | } | 626 | } |
| 627 | } | 627 | } |
| 628 | 628 | ||
| 629 | if (cmdto_cnt && !test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { | 629 | if (cmdto_cnt) { |
| 630 | print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); | 630 | print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); |
| 631 | 631 | if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { | |
| 632 | mtip_restart_port(port); | 632 | mtip_restart_port(port); |
| 633 | wake_up_interruptible(&port->svc_wait); | ||
| 634 | } | ||
| 633 | clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); | 635 | clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); |
| 634 | wake_up_interruptible(&port->svc_wait); | ||
| 635 | } | 636 | } |
| 636 | 637 | ||
| 637 | if (port->ic_pause_timer) { | 638 | if (port->ic_pause_timer) { |
| @@ -3887,7 +3888,12 @@ static int mtip_block_remove(struct driver_data *dd) | |||
| 3887 | * Delete our gendisk structure. This also removes the device | 3888 | * Delete our gendisk structure. This also removes the device |
| 3888 | * from /dev | 3889 | * from /dev |
| 3889 | */ | 3890 | */ |
| 3890 | del_gendisk(dd->disk); | 3891 | if (dd->disk) { |
| 3892 | if (dd->disk->queue) | ||
| 3893 | del_gendisk(dd->disk); | ||
| 3894 | else | ||
| 3895 | put_disk(dd->disk); | ||
| 3896 | } | ||
| 3891 | 3897 | ||
| 3892 | spin_lock(&rssd_index_lock); | 3898 | spin_lock(&rssd_index_lock); |
| 3893 | ida_remove(&rssd_index_ida, dd->index); | 3899 | ida_remove(&rssd_index_ida, dd->index); |
| @@ -3921,7 +3927,13 @@ static int mtip_block_shutdown(struct driver_data *dd) | |||
| 3921 | "Shutting down %s ...\n", dd->disk->disk_name); | 3927 | "Shutting down %s ...\n", dd->disk->disk_name); |
| 3922 | 3928 | ||
| 3923 | /* Delete our gendisk structure, and cleanup the blk queue. */ | 3929 | /* Delete our gendisk structure, and cleanup the blk queue. */ |
| 3924 | del_gendisk(dd->disk); | 3930 | if (dd->disk) { |
| 3931 | if (dd->disk->queue) | ||
| 3932 | del_gendisk(dd->disk); | ||
| 3933 | else | ||
| 3934 | put_disk(dd->disk); | ||
| 3935 | } | ||
| 3936 | |||
| 3925 | 3937 | ||
| 3926 | spin_lock(&rssd_index_lock); | 3938 | spin_lock(&rssd_index_lock); |
| 3927 | ida_remove(&rssd_index_ida, dd->index); | 3939 | ida_remove(&rssd_index_ida, dd->index); |
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 74374fb762aa..5ac841ff6cc7 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
| @@ -161,10 +161,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
| 161 | static void make_response(struct xen_blkif *blkif, u64 id, | 161 | static void make_response(struct xen_blkif *blkif, u64 id, |
| 162 | unsigned short op, int st); | 162 | unsigned short op, int st); |
| 163 | 163 | ||
| 164 | #define foreach_grant(pos, rbtree, node) \ | 164 | #define foreach_grant_safe(pos, n, rbtree, node) \ |
| 165 | for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node); \ | 165 | for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ |
| 166 | (n) = rb_next(&(pos)->node); \ | ||
| 166 | &(pos)->node != NULL; \ | 167 | &(pos)->node != NULL; \ |
| 167 | (pos) = container_of(rb_next(&(pos)->node), typeof(*(pos)), node)) | 168 | (pos) = container_of(n, typeof(*(pos)), node), \ |
| 169 | (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) | ||
| 168 | 170 | ||
| 169 | 171 | ||
| 170 | static void add_persistent_gnt(struct rb_root *root, | 172 | static void add_persistent_gnt(struct rb_root *root, |
| @@ -217,10 +219,11 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num) | |||
| 217 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 219 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
| 218 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 220 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
| 219 | struct persistent_gnt *persistent_gnt; | 221 | struct persistent_gnt *persistent_gnt; |
| 222 | struct rb_node *n; | ||
| 220 | int ret = 0; | 223 | int ret = 0; |
| 221 | int segs_to_unmap = 0; | 224 | int segs_to_unmap = 0; |
| 222 | 225 | ||
| 223 | foreach_grant(persistent_gnt, root, node) { | 226 | foreach_grant_safe(persistent_gnt, n, root, node) { |
| 224 | BUG_ON(persistent_gnt->handle == | 227 | BUG_ON(persistent_gnt->handle == |
| 225 | BLKBACK_INVALID_HANDLE); | 228 | BLKBACK_INVALID_HANDLE); |
| 226 | gnttab_set_unmap_op(&unmap[segs_to_unmap], | 229 | gnttab_set_unmap_op(&unmap[segs_to_unmap], |
| @@ -230,9 +233,6 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num) | |||
| 230 | persistent_gnt->handle); | 233 | persistent_gnt->handle); |
| 231 | 234 | ||
| 232 | pages[segs_to_unmap] = persistent_gnt->page; | 235 | pages[segs_to_unmap] = persistent_gnt->page; |
| 233 | rb_erase(&persistent_gnt->node, root); | ||
| 234 | kfree(persistent_gnt); | ||
| 235 | num--; | ||
| 236 | 236 | ||
| 237 | if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || | 237 | if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || |
| 238 | !rb_next(&persistent_gnt->node)) { | 238 | !rb_next(&persistent_gnt->node)) { |
| @@ -241,6 +241,10 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num) | |||
| 241 | BUG_ON(ret); | 241 | BUG_ON(ret); |
| 242 | segs_to_unmap = 0; | 242 | segs_to_unmap = 0; |
| 243 | } | 243 | } |
| 244 | |||
| 245 | rb_erase(&persistent_gnt->node, root); | ||
| 246 | kfree(persistent_gnt); | ||
| 247 | num--; | ||
| 244 | } | 248 | } |
| 245 | BUG_ON(num != 0); | 249 | BUG_ON(num != 0); |
| 246 | } | 250 | } |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 96e9b00db081..11043c18ac5a 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
| @@ -792,6 +792,7 @@ static void blkif_free(struct blkfront_info *info, int suspend) | |||
| 792 | { | 792 | { |
| 793 | struct llist_node *all_gnts; | 793 | struct llist_node *all_gnts; |
| 794 | struct grant *persistent_gnt; | 794 | struct grant *persistent_gnt; |
| 795 | struct llist_node *n; | ||
| 795 | 796 | ||
| 796 | /* Prevent new requests being issued until we fix things up. */ | 797 | /* Prevent new requests being issued until we fix things up. */ |
| 797 | spin_lock_irq(&info->io_lock); | 798 | spin_lock_irq(&info->io_lock); |
| @@ -804,7 +805,7 @@ static void blkif_free(struct blkfront_info *info, int suspend) | |||
| 804 | /* Remove all persistent grants */ | 805 | /* Remove all persistent grants */ |
| 805 | if (info->persistent_gnts_c) { | 806 | if (info->persistent_gnts_c) { |
| 806 | all_gnts = llist_del_all(&info->persistent_gnts); | 807 | all_gnts = llist_del_all(&info->persistent_gnts); |
| 807 | llist_for_each_entry(persistent_gnt, all_gnts, node) { | 808 | llist_for_each_entry_safe(persistent_gnt, n, all_gnts, node) { |
| 808 | gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); | 809 | gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); |
| 809 | __free_page(pfn_to_page(persistent_gnt->pfn)); | 810 | __free_page(pfn_to_page(persistent_gnt->pfn)); |
| 810 | kfree(persistent_gnt); | 811 | kfree(persistent_gnt); |
| @@ -835,7 +836,7 @@ static void blkif_free(struct blkfront_info *info, int suspend) | |||
| 835 | static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, | 836 | static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, |
| 836 | struct blkif_response *bret) | 837 | struct blkif_response *bret) |
| 837 | { | 838 | { |
| 838 | int i; | 839 | int i = 0; |
| 839 | struct bio_vec *bvec; | 840 | struct bio_vec *bvec; |
| 840 | struct req_iterator iter; | 841 | struct req_iterator iter; |
| 841 | unsigned long flags; | 842 | unsigned long flags; |
| @@ -852,7 +853,8 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, | |||
| 852 | */ | 853 | */ |
| 853 | rq_for_each_segment(bvec, s->request, iter) { | 854 | rq_for_each_segment(bvec, s->request, iter) { |
| 854 | BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE); | 855 | BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE); |
| 855 | i = offset >> PAGE_SHIFT; | 856 | if (bvec->bv_offset < offset) |
| 857 | i++; | ||
| 856 | BUG_ON(i >= s->req.u.rw.nr_segments); | 858 | BUG_ON(i >= s->req.u.rw.nr_segments); |
| 857 | shared_data = kmap_atomic( | 859 | shared_data = kmap_atomic( |
| 858 | pfn_to_page(s->grants_used[i]->pfn)); | 860 | pfn_to_page(s->grants_used[i]->pfn)); |
| @@ -861,7 +863,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, | |||
| 861 | bvec->bv_len); | 863 | bvec->bv_len); |
| 862 | bvec_kunmap_irq(bvec_data, &flags); | 864 | bvec_kunmap_irq(bvec_data, &flags); |
| 863 | kunmap_atomic(shared_data); | 865 | kunmap_atomic(shared_data); |
| 864 | offset += bvec->bv_len; | 866 | offset = bvec->bv_offset + bvec->bv_len; |
| 865 | } | 867 | } |
| 866 | } | 868 | } |
| 867 | /* Add the persistent grant into the list of free grants */ | 869 | /* Add the persistent grant into the list of free grants */ |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 684b0d53764f..ee4dbeafb377 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
| @@ -2062,7 +2062,8 @@ static void virtcons_remove(struct virtio_device *vdev) | |||
| 2062 | /* Disable interrupts for vqs */ | 2062 | /* Disable interrupts for vqs */ |
| 2063 | vdev->config->reset(vdev); | 2063 | vdev->config->reset(vdev); |
| 2064 | /* Finish up work that's lined up */ | 2064 | /* Finish up work that's lined up */ |
| 2065 | cancel_work_sync(&portdev->control_work); | 2065 | if (use_multiport(portdev)) |
| 2066 | cancel_work_sync(&portdev->control_work); | ||
| 2066 | 2067 | ||
| 2067 | list_for_each_entry_safe(port, port2, &portdev->ports, list) | 2068 | list_for_each_entry_safe(port, port2, &portdev->ports, list) |
| 2068 | unplug_port(port); | 2069 | unplug_port(port); |
diff --git a/drivers/gpu/drm/nouveau/core/core/falcon.c b/drivers/gpu/drm/nouveau/core/core/falcon.c index 6b0843c33877..e05c15777588 100644 --- a/drivers/gpu/drm/nouveau/core/core/falcon.c +++ b/drivers/gpu/drm/nouveau/core/core/falcon.c | |||
| @@ -73,8 +73,11 @@ _nouveau_falcon_init(struct nouveau_object *object) | |||
| 73 | nv_debug(falcon, "data limit: %d\n", falcon->data.limit); | 73 | nv_debug(falcon, "data limit: %d\n", falcon->data.limit); |
| 74 | 74 | ||
| 75 | /* wait for 'uc halted' to be signalled before continuing */ | 75 | /* wait for 'uc halted' to be signalled before continuing */ |
| 76 | if (falcon->secret) { | 76 | if (falcon->secret && falcon->version < 4) { |
| 77 | nv_wait(falcon, 0x008, 0x00000010, 0x00000010); | 77 | if (!falcon->version) |
| 78 | nv_wait(falcon, 0x008, 0x00000010, 0x00000010); | ||
| 79 | else | ||
| 80 | nv_wait(falcon, 0x180, 0x80000000, 0); | ||
| 78 | nv_wo32(falcon, 0x004, 0x00000010); | 81 | nv_wo32(falcon, 0x004, 0x00000010); |
| 79 | } | 82 | } |
| 80 | 83 | ||
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c index f74c30aa33a0..48f06378d3f9 100644 --- a/drivers/gpu/drm/nouveau/core/core/subdev.c +++ b/drivers/gpu/drm/nouveau/core/core/subdev.c | |||
| @@ -99,7 +99,7 @@ nouveau_subdev_create_(struct nouveau_object *parent, | |||
| 99 | if (ret) | 99 | if (ret) |
| 100 | return ret; | 100 | return ret; |
| 101 | 101 | ||
| 102 | mutex_init(&subdev->mutex); | 102 | __mutex_init(&subdev->mutex, subname, &oclass->lock_class_key); |
| 103 | subdev->name = subname; | 103 | subdev->name = subname; |
| 104 | 104 | ||
| 105 | if (parent) { | 105 | if (parent) { |
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h index 5982935ee23a..106bb19fdd9a 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/object.h +++ b/drivers/gpu/drm/nouveau/core/include/core/object.h | |||
| @@ -50,10 +50,13 @@ int nouveau_object_fini(struct nouveau_object *, bool suspend); | |||
| 50 | 50 | ||
| 51 | extern struct nouveau_ofuncs nouveau_object_ofuncs; | 51 | extern struct nouveau_ofuncs nouveau_object_ofuncs; |
| 52 | 52 | ||
| 53 | /* Don't allocate dynamically, because lockdep needs lock_class_keys to be in | ||
| 54 | * ".data". */ | ||
| 53 | struct nouveau_oclass { | 55 | struct nouveau_oclass { |
| 54 | u32 handle; | 56 | u32 handle; |
| 55 | struct nouveau_ofuncs *ofuncs; | 57 | struct nouveau_ofuncs * const ofuncs; |
| 56 | struct nouveau_omthds *omthds; | 58 | struct nouveau_omthds * const omthds; |
| 59 | struct lock_class_key lock_class_key; | ||
| 57 | }; | 60 | }; |
| 58 | 61 | ||
| 59 | #define nv_oclass(o) nv_object(o)->oclass | 62 | #define nv_oclass(o) nv_object(o)->oclass |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c index d6d16007ec1a..d62045f454b2 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c | |||
| @@ -86,8 +86,8 @@ nouveau_fb_preinit(struct nouveau_fb *pfb) | |||
| 86 | return ret; | 86 | return ret; |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | if (!nouveau_mm_initialised(&pfb->tags) && tags) { | 89 | if (!nouveau_mm_initialised(&pfb->tags)) { |
| 90 | ret = nouveau_mm_init(&pfb->tags, 0, ++tags, 1); | 90 | ret = nouveau_mm_init(&pfb->tags, 0, tags ? ++tags : 0, 1); |
| 91 | if (ret) | 91 | if (ret) |
| 92 | return ret; | 92 | return ret; |
| 93 | } | 93 | } |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c index 487cb8c6c204..eac236ed19b2 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c | |||
| @@ -99,7 +99,7 @@ nv50_fb_vram_init(struct nouveau_fb *pfb) | |||
| 99 | struct nouveau_bios *bios = nouveau_bios(device); | 99 | struct nouveau_bios *bios = nouveau_bios(device); |
| 100 | const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ | 100 | const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ |
| 101 | const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ | 101 | const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ |
| 102 | u32 size; | 102 | u32 size, tags = 0; |
| 103 | int ret; | 103 | int ret; |
| 104 | 104 | ||
| 105 | pfb->ram.size = nv_rd32(pfb, 0x10020c); | 105 | pfb->ram.size = nv_rd32(pfb, 0x10020c); |
| @@ -140,10 +140,11 @@ nv50_fb_vram_init(struct nouveau_fb *pfb) | |||
| 140 | return ret; | 140 | return ret; |
| 141 | 141 | ||
| 142 | pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1; | 142 | pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1; |
| 143 | tags = nv_rd32(pfb, 0x100320); | ||
| 143 | break; | 144 | break; |
| 144 | } | 145 | } |
| 145 | 146 | ||
| 146 | return nv_rd32(pfb, 0x100320); | 147 | return tags; |
| 147 | } | 148 | } |
| 148 | 149 | ||
| 149 | static int | 150 | static int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 69d7b1d0b9d6..1699a9083a2f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | */ | 28 | */ |
| 29 | 29 | ||
| 30 | #include <core/engine.h> | 30 | #include <core/engine.h> |
| 31 | #include <linux/swiotlb.h> | ||
| 31 | 32 | ||
| 32 | #include <subdev/fb.h> | 33 | #include <subdev/fb.h> |
| 33 | #include <subdev/vm.h> | 34 | #include <subdev/vm.h> |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 8b090f1eb51d..5e7aef23825a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
| @@ -245,6 +245,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev, | |||
| 245 | return 0; | 245 | return 0; |
| 246 | } | 246 | } |
| 247 | 247 | ||
| 248 | static struct lock_class_key drm_client_lock_class_key; | ||
| 249 | |||
| 248 | static int | 250 | static int |
| 249 | nouveau_drm_load(struct drm_device *dev, unsigned long flags) | 251 | nouveau_drm_load(struct drm_device *dev, unsigned long flags) |
| 250 | { | 252 | { |
| @@ -256,6 +258,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
| 256 | ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); | 258 | ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); |
| 257 | if (ret) | 259 | if (ret) |
| 258 | return ret; | 260 | return ret; |
| 261 | lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key); | ||
| 259 | 262 | ||
| 260 | dev->dev_private = drm; | 263 | dev->dev_private = drm; |
| 261 | drm->dev = dev; | 264 | drm->dev = dev; |
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 7a445666e71f..ee4cff534f10 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
| @@ -2909,14 +2909,14 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
| 2909 | return -EINVAL; | 2909 | return -EINVAL; |
| 2910 | } | 2910 | } |
| 2911 | if (tiled) { | 2911 | if (tiled) { |
| 2912 | dst_offset = ib[idx+1]; | 2912 | dst_offset = radeon_get_ib_value(p, idx+1); |
| 2913 | dst_offset <<= 8; | 2913 | dst_offset <<= 8; |
| 2914 | 2914 | ||
| 2915 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 2915 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); |
| 2916 | p->idx += count + 7; | 2916 | p->idx += count + 7; |
| 2917 | } else { | 2917 | } else { |
| 2918 | dst_offset = ib[idx+1]; | 2918 | dst_offset = radeon_get_ib_value(p, idx+1); |
| 2919 | dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32; | 2919 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; |
| 2920 | 2920 | ||
| 2921 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2921 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); |
| 2922 | ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 2922 | ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; |
| @@ -2954,12 +2954,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
| 2954 | DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n"); | 2954 | DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n"); |
| 2955 | return -EINVAL; | 2955 | return -EINVAL; |
| 2956 | } | 2956 | } |
| 2957 | dst_offset = ib[idx+1]; | 2957 | dst_offset = radeon_get_ib_value(p, idx+1); |
| 2958 | dst_offset <<= 8; | 2958 | dst_offset <<= 8; |
| 2959 | dst2_offset = ib[idx+2]; | 2959 | dst2_offset = radeon_get_ib_value(p, idx+2); |
| 2960 | dst2_offset <<= 8; | 2960 | dst2_offset <<= 8; |
| 2961 | src_offset = ib[idx+8]; | 2961 | src_offset = radeon_get_ib_value(p, idx+8); |
| 2962 | src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; | 2962 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; |
| 2963 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { | 2963 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { |
| 2964 | dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n", | 2964 | dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n", |
| 2965 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); | 2965 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); |
| @@ -3014,12 +3014,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
| 3014 | DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); | 3014 | DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); |
| 3015 | return -EINVAL; | 3015 | return -EINVAL; |
| 3016 | } | 3016 | } |
| 3017 | dst_offset = ib[idx+1]; | 3017 | dst_offset = radeon_get_ib_value(p, idx+1); |
| 3018 | dst_offset <<= 8; | 3018 | dst_offset <<= 8; |
| 3019 | dst2_offset = ib[idx+2]; | 3019 | dst2_offset = radeon_get_ib_value(p, idx+2); |
| 3020 | dst2_offset <<= 8; | 3020 | dst2_offset <<= 8; |
| 3021 | src_offset = ib[idx+8]; | 3021 | src_offset = radeon_get_ib_value(p, idx+8); |
| 3022 | src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; | 3022 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; |
| 3023 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { | 3023 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { |
| 3024 | dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", | 3024 | dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", |
| 3025 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); | 3025 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); |
| @@ -3046,22 +3046,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
| 3046 | /* detile bit */ | 3046 | /* detile bit */ |
| 3047 | if (idx_value & (1 << 31)) { | 3047 | if (idx_value & (1 << 31)) { |
| 3048 | /* tiled src, linear dst */ | 3048 | /* tiled src, linear dst */ |
| 3049 | src_offset = ib[idx+1]; | 3049 | src_offset = radeon_get_ib_value(p, idx+1); |
| 3050 | src_offset <<= 8; | 3050 | src_offset <<= 8; |
| 3051 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); | 3051 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); |
| 3052 | 3052 | ||
| 3053 | dst_offset = ib[idx+7]; | 3053 | dst_offset = radeon_get_ib_value(p, idx+7); |
| 3054 | dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; | 3054 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; |
| 3055 | ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 3055 | ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); |
| 3056 | ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 3056 | ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; |
| 3057 | } else { | 3057 | } else { |
| 3058 | /* linear src, tiled dst */ | 3058 | /* linear src, tiled dst */ |
| 3059 | src_offset = ib[idx+7]; | 3059 | src_offset = radeon_get_ib_value(p, idx+7); |
| 3060 | src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; | 3060 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; |
| 3061 | ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 3061 | ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); |
| 3062 | ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 3062 | ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; |
| 3063 | 3063 | ||
| 3064 | dst_offset = ib[idx+1]; | 3064 | dst_offset = radeon_get_ib_value(p, idx+1); |
| 3065 | dst_offset <<= 8; | 3065 | dst_offset <<= 8; |
| 3066 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 3066 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); |
| 3067 | } | 3067 | } |
| @@ -3098,12 +3098,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
| 3098 | DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); | 3098 | DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); |
| 3099 | return -EINVAL; | 3099 | return -EINVAL; |
| 3100 | } | 3100 | } |
| 3101 | dst_offset = ib[idx+1]; | 3101 | dst_offset = radeon_get_ib_value(p, idx+1); |
| 3102 | dst_offset <<= 8; | 3102 | dst_offset <<= 8; |
| 3103 | dst2_offset = ib[idx+2]; | 3103 | dst2_offset = radeon_get_ib_value(p, idx+2); |
| 3104 | dst2_offset <<= 8; | 3104 | dst2_offset <<= 8; |
| 3105 | src_offset = ib[idx+8]; | 3105 | src_offset = radeon_get_ib_value(p, idx+8); |
| 3106 | src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; | 3106 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; |
| 3107 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { | 3107 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { |
| 3108 | dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", | 3108 | dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", |
| 3109 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); | 3109 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); |
| @@ -3135,22 +3135,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
| 3135 | /* detile bit */ | 3135 | /* detile bit */ |
| 3136 | if (idx_value & (1 << 31)) { | 3136 | if (idx_value & (1 << 31)) { |
| 3137 | /* tiled src, linear dst */ | 3137 | /* tiled src, linear dst */ |
| 3138 | src_offset = ib[idx+1]; | 3138 | src_offset = radeon_get_ib_value(p, idx+1); |
| 3139 | src_offset <<= 8; | 3139 | src_offset <<= 8; |
| 3140 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); | 3140 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); |
| 3141 | 3141 | ||
| 3142 | dst_offset = ib[idx+7]; | 3142 | dst_offset = radeon_get_ib_value(p, idx+7); |
| 3143 | dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; | 3143 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; |
| 3144 | ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 3144 | ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); |
| 3145 | ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 3145 | ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; |
| 3146 | } else { | 3146 | } else { |
| 3147 | /* linear src, tiled dst */ | 3147 | /* linear src, tiled dst */ |
| 3148 | src_offset = ib[idx+7]; | 3148 | src_offset = radeon_get_ib_value(p, idx+7); |
| 3149 | src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; | 3149 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; |
| 3150 | ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 3150 | ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); |
| 3151 | ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 3151 | ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; |
| 3152 | 3152 | ||
| 3153 | dst_offset = ib[idx+1]; | 3153 | dst_offset = radeon_get_ib_value(p, idx+1); |
| 3154 | dst_offset <<= 8; | 3154 | dst_offset <<= 8; |
| 3155 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 3155 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); |
| 3156 | } | 3156 | } |
| @@ -3176,10 +3176,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
| 3176 | switch (misc) { | 3176 | switch (misc) { |
| 3177 | case 0: | 3177 | case 0: |
| 3178 | /* L2L, byte */ | 3178 | /* L2L, byte */ |
| 3179 | src_offset = ib[idx+2]; | 3179 | src_offset = radeon_get_ib_value(p, idx+2); |
| 3180 | src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; | 3180 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; |
| 3181 | dst_offset = ib[idx+1]; | 3181 | dst_offset = radeon_get_ib_value(p, idx+1); |
| 3182 | dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; | 3182 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; |
| 3183 | if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) { | 3183 | if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) { |
| 3184 | dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n", | 3184 | dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n", |
| 3185 | src_offset + count, radeon_bo_size(src_reloc->robj)); | 3185 | src_offset + count, radeon_bo_size(src_reloc->robj)); |
| @@ -3216,12 +3216,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
| 3216 | DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n"); | 3216 | DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n"); |
| 3217 | return -EINVAL; | 3217 | return -EINVAL; |
| 3218 | } | 3218 | } |
| 3219 | dst_offset = ib[idx+1]; | 3219 | dst_offset = radeon_get_ib_value(p, idx+1); |
| 3220 | dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; | 3220 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; |
| 3221 | dst2_offset = ib[idx+2]; | 3221 | dst2_offset = radeon_get_ib_value(p, idx+2); |
| 3222 | dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32; | 3222 | dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32; |
| 3223 | src_offset = ib[idx+3]; | 3223 | src_offset = radeon_get_ib_value(p, idx+3); |
| 3224 | src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; | 3224 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; |
| 3225 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { | 3225 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { |
| 3226 | dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n", | 3226 | dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n", |
| 3227 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); | 3227 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); |
| @@ -3251,10 +3251,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
| 3251 | } | 3251 | } |
| 3252 | } else { | 3252 | } else { |
| 3253 | /* L2L, dw */ | 3253 | /* L2L, dw */ |
| 3254 | src_offset = ib[idx+2]; | 3254 | src_offset = radeon_get_ib_value(p, idx+2); |
| 3255 | src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; | 3255 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; |
| 3256 | dst_offset = ib[idx+1]; | 3256 | dst_offset = radeon_get_ib_value(p, idx+1); |
| 3257 | dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; | 3257 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; |
| 3258 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { | 3258 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { |
| 3259 | dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n", | 3259 | dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n", |
| 3260 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); | 3260 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); |
| @@ -3279,8 +3279,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
| 3279 | DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n"); | 3279 | DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n"); |
| 3280 | return -EINVAL; | 3280 | return -EINVAL; |
| 3281 | } | 3281 | } |
| 3282 | dst_offset = ib[idx+1]; | 3282 | dst_offset = radeon_get_ib_value(p, idx+1); |
| 3283 | dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16; | 3283 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; |
| 3284 | if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { | 3284 | if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { |
| 3285 | dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", | 3285 | dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", |
| 3286 | dst_offset, radeon_bo_size(dst_reloc->robj)); | 3286 | dst_offset, radeon_bo_size(dst_reloc->robj)); |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 69ec24ab8d63..9b2512bf1a46 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
| @@ -2623,14 +2623,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) | |||
| 2623 | return -EINVAL; | 2623 | return -EINVAL; |
| 2624 | } | 2624 | } |
| 2625 | if (tiled) { | 2625 | if (tiled) { |
| 2626 | dst_offset = ib[idx+1]; | 2626 | dst_offset = radeon_get_ib_value(p, idx+1); |
| 2627 | dst_offset <<= 8; | 2627 | dst_offset <<= 8; |
| 2628 | 2628 | ||
| 2629 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 2629 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); |
| 2630 | p->idx += count + 5; | 2630 | p->idx += count + 5; |
| 2631 | } else { | 2631 | } else { |
| 2632 | dst_offset = ib[idx+1]; | 2632 | dst_offset = radeon_get_ib_value(p, idx+1); |
| 2633 | dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32; | 2633 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; |
| 2634 | 2634 | ||
| 2635 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2635 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); |
| 2636 | ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 2636 | ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; |
| @@ -2658,32 +2658,32 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) | |||
| 2658 | /* detile bit */ | 2658 | /* detile bit */ |
| 2659 | if (idx_value & (1 << 31)) { | 2659 | if (idx_value & (1 << 31)) { |
| 2660 | /* tiled src, linear dst */ | 2660 | /* tiled src, linear dst */ |
| 2661 | src_offset = ib[idx+1]; | 2661 | src_offset = radeon_get_ib_value(p, idx+1); |
| 2662 | src_offset <<= 8; | 2662 | src_offset <<= 8; |
| 2663 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); | 2663 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); |
| 2664 | 2664 | ||
| 2665 | dst_offset = ib[idx+5]; | 2665 | dst_offset = radeon_get_ib_value(p, idx+5); |
| 2666 | dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; | 2666 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; |
| 2667 | ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2667 | ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); |
| 2668 | ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 2668 | ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; |
| 2669 | } else { | 2669 | } else { |
| 2670 | /* linear src, tiled dst */ | 2670 | /* linear src, tiled dst */ |
| 2671 | src_offset = ib[idx+5]; | 2671 | src_offset = radeon_get_ib_value(p, idx+5); |
| 2672 | src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; | 2672 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; |
| 2673 | ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 2673 | ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); |
| 2674 | ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 2674 | ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; |
| 2675 | 2675 | ||
| 2676 | dst_offset = ib[idx+1]; | 2676 | dst_offset = radeon_get_ib_value(p, idx+1); |
| 2677 | dst_offset <<= 8; | 2677 | dst_offset <<= 8; |
| 2678 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 2678 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); |
| 2679 | } | 2679 | } |
| 2680 | p->idx += 7; | 2680 | p->idx += 7; |
| 2681 | } else { | 2681 | } else { |
| 2682 | if (p->family >= CHIP_RV770) { | 2682 | if (p->family >= CHIP_RV770) { |
| 2683 | src_offset = ib[idx+2]; | 2683 | src_offset = radeon_get_ib_value(p, idx+2); |
| 2684 | src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; | 2684 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; |
| 2685 | dst_offset = ib[idx+1]; | 2685 | dst_offset = radeon_get_ib_value(p, idx+1); |
| 2686 | dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; | 2686 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; |
| 2687 | 2687 | ||
| 2688 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2688 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); |
| 2689 | ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 2689 | ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); |
| @@ -2691,10 +2691,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) | |||
| 2691 | ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 2691 | ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; |
| 2692 | p->idx += 5; | 2692 | p->idx += 5; |
| 2693 | } else { | 2693 | } else { |
| 2694 | src_offset = ib[idx+2]; | 2694 | src_offset = radeon_get_ib_value(p, idx+2); |
| 2695 | src_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; | 2695 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; |
| 2696 | dst_offset = ib[idx+1]; | 2696 | dst_offset = radeon_get_ib_value(p, idx+1); |
| 2697 | dst_offset |= ((u64)(ib[idx+3] & 0xff0000)) << 16; | 2697 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16; |
| 2698 | 2698 | ||
| 2699 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2699 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); |
| 2700 | ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 2700 | ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); |
| @@ -2724,8 +2724,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) | |||
| 2724 | DRM_ERROR("bad DMA_PACKET_WRITE\n"); | 2724 | DRM_ERROR("bad DMA_PACKET_WRITE\n"); |
| 2725 | return -EINVAL; | 2725 | return -EINVAL; |
| 2726 | } | 2726 | } |
| 2727 | dst_offset = ib[idx+1]; | 2727 | dst_offset = radeon_get_ib_value(p, idx+1); |
| 2728 | dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16; | 2728 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; |
| 2729 | if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { | 2729 | if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { |
| 2730 | dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", | 2730 | dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", |
| 2731 | dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); | 2731 | dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 1d8ff2f850ba..93f760e27a92 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include <drm/radeon_drm.h> | 38 | #include <drm/radeon_drm.h> |
| 39 | #include <linux/seq_file.h> | 39 | #include <linux/seq_file.h> |
| 40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
| 41 | #include <linux/swiotlb.h> | ||
| 41 | #include "radeon_reg.h" | 42 | #include "radeon_reg.h" |
| 42 | #include "radeon.h" | 43 | #include "radeon.h" |
| 43 | 44 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 44420fca7dfa..8be35c809c7b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
| @@ -429,7 +429,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |||
| 429 | struct ttm_bo_device *bdev = bo->bdev; | 429 | struct ttm_bo_device *bdev = bo->bdev; |
| 430 | struct ttm_bo_driver *driver = bdev->driver; | 430 | struct ttm_bo_driver *driver = bdev->driver; |
| 431 | 431 | ||
| 432 | fbo = kzalloc(sizeof(*fbo), GFP_KERNEL); | 432 | fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); |
| 433 | if (!fbo) | 433 | if (!fbo) |
| 434 | return -ENOMEM; | 434 | return -ENOMEM; |
| 435 | 435 | ||
| @@ -448,7 +448,12 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |||
| 448 | fbo->vm_node = NULL; | 448 | fbo->vm_node = NULL; |
| 449 | atomic_set(&fbo->cpu_writers, 0); | 449 | atomic_set(&fbo->cpu_writers, 0); |
| 450 | 450 | ||
| 451 | fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); | 451 | spin_lock(&bdev->fence_lock); |
| 452 | if (bo->sync_obj) | ||
| 453 | fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); | ||
| 454 | else | ||
| 455 | fbo->sync_obj = NULL; | ||
| 456 | spin_unlock(&bdev->fence_lock); | ||
| 452 | kref_init(&fbo->list_kref); | 457 | kref_init(&fbo->list_kref); |
| 453 | kref_init(&fbo->kref); | 458 | kref_init(&fbo->kref); |
| 454 | fbo->destroy = &ttm_transfered_destroy; | 459 | fbo->destroy = &ttm_transfered_destroy; |
| @@ -661,13 +666,11 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
| 661 | */ | 666 | */ |
| 662 | 667 | ||
| 663 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | 668 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
| 664 | |||
| 665 | /* ttm_buffer_object_transfer accesses bo->sync_obj */ | ||
| 666 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); | ||
| 667 | spin_unlock(&bdev->fence_lock); | 669 | spin_unlock(&bdev->fence_lock); |
| 668 | if (tmp_obj) | 670 | if (tmp_obj) |
| 669 | driver->sync_obj_unref(&tmp_obj); | 671 | driver->sync_obj_unref(&tmp_obj); |
| 670 | 672 | ||
| 673 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); | ||
| 671 | if (ret) | 674 | if (ret) |
| 672 | return ret; | 675 | return ret; |
| 673 | 676 | ||
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 4850d03870c2..35275099cafd 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c | |||
| @@ -263,20 +263,15 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) | |||
| 263 | struct qib_qp __rcu **qpp; | 263 | struct qib_qp __rcu **qpp; |
| 264 | 264 | ||
| 265 | qpp = &dev->qp_table[n]; | 265 | qpp = &dev->qp_table[n]; |
| 266 | q = rcu_dereference_protected(*qpp, | 266 | for (; (q = rcu_dereference_protected(*qpp, |
| 267 | lockdep_is_held(&dev->qpt_lock)); | 267 | lockdep_is_held(&dev->qpt_lock))) != NULL; |
| 268 | for (; q; qpp = &q->next) { | 268 | qpp = &q->next) |
| 269 | if (q == qp) { | 269 | if (q == qp) { |
| 270 | atomic_dec(&qp->refcount); | 270 | atomic_dec(&qp->refcount); |
| 271 | *qpp = qp->next; | 271 | *qpp = qp->next; |
| 272 | rcu_assign_pointer(qp->next, NULL); | 272 | rcu_assign_pointer(qp->next, NULL); |
| 273 | q = rcu_dereference_protected(*qpp, | ||
| 274 | lockdep_is_held(&dev->qpt_lock)); | ||
| 275 | break; | 273 | break; |
| 276 | } | 274 | } |
| 277 | q = rcu_dereference_protected(*qpp, | ||
| 278 | lockdep_is_held(&dev->qpt_lock)); | ||
| 279 | } | ||
| 280 | } | 275 | } |
| 281 | 276 | ||
| 282 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | 277 | spin_unlock_irqrestore(&dev->qpt_lock, flags); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 03103d2bd641..67b0c1d23678 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
| @@ -741,6 +741,9 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ | |||
| 741 | 741 | ||
| 742 | tx_req->mapping = addr; | 742 | tx_req->mapping = addr; |
| 743 | 743 | ||
| 744 | skb_orphan(skb); | ||
| 745 | skb_dst_drop(skb); | ||
| 746 | |||
| 744 | rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), | 747 | rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), |
| 745 | addr, skb->len); | 748 | addr, skb->len); |
| 746 | if (unlikely(rc)) { | 749 | if (unlikely(rc)) { |
| @@ -752,9 +755,6 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ | |||
| 752 | dev->trans_start = jiffies; | 755 | dev->trans_start = jiffies; |
| 753 | ++tx->tx_head; | 756 | ++tx->tx_head; |
| 754 | 757 | ||
| 755 | skb_orphan(skb); | ||
| 756 | skb_dst_drop(skb); | ||
| 757 | |||
| 758 | if (++priv->tx_outstanding == ipoib_sendq_size) { | 758 | if (++priv->tx_outstanding == ipoib_sendq_size) { |
| 759 | ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", | 759 | ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", |
| 760 | tx->qp->qp_num); | 760 | tx->qp->qp_num); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index a1bca70e20aa..2cfa76f5d99e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
| @@ -600,6 +600,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, | |||
| 600 | netif_stop_queue(dev); | 600 | netif_stop_queue(dev); |
| 601 | } | 601 | } |
| 602 | 602 | ||
| 603 | skb_orphan(skb); | ||
| 604 | skb_dst_drop(skb); | ||
| 605 | |||
| 603 | rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), | 606 | rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), |
| 604 | address->ah, qpn, tx_req, phead, hlen); | 607 | address->ah, qpn, tx_req, phead, hlen); |
| 605 | if (unlikely(rc)) { | 608 | if (unlikely(rc)) { |
| @@ -615,9 +618,6 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, | |||
| 615 | 618 | ||
| 616 | address->last_send = priv->tx_head; | 619 | address->last_send = priv->tx_head; |
| 617 | ++priv->tx_head; | 620 | ++priv->tx_head; |
| 618 | |||
| 619 | skb_orphan(skb); | ||
| 620 | skb_dst_drop(skb); | ||
| 621 | } | 621 | } |
| 622 | 622 | ||
| 623 | if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) | 623 | if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) |
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c index 358cd7ee905b..7cd74e29cbc8 100644 --- a/drivers/input/joystick/analog.c +++ b/drivers/input/joystick/analog.c | |||
| @@ -162,7 +162,7 @@ static unsigned int get_time_pit(void) | |||
| 162 | #define GET_TIME(x) do { x = get_cycles(); } while (0) | 162 | #define GET_TIME(x) do { x = get_cycles(); } while (0) |
| 163 | #define DELTA(x,y) ((y)-(x)) | 163 | #define DELTA(x,y) ((y)-(x)) |
| 164 | #define TIME_NAME "PCC" | 164 | #define TIME_NAME "PCC" |
| 165 | #elif defined(CONFIG_MN10300) | 165 | #elif defined(CONFIG_MN10300) || defined(CONFIG_TILE) |
| 166 | #define GET_TIME(x) do { x = get_cycles(); } while (0) | 166 | #define GET_TIME(x) do { x = get_cycles(); } while (0) |
| 167 | #define DELTA(x, y) ((x) - (y)) | 167 | #define DELTA(x, y) ((x) - (y)) |
| 168 | #define TIME_NAME "TSC" | 168 | #define TIME_NAME "TSC" |
diff --git a/drivers/media/radio/radio-keene.c b/drivers/media/radio/radio-keene.c index e10e525f33e5..296941a9ae25 100644 --- a/drivers/media/radio/radio-keene.c +++ b/drivers/media/radio/radio-keene.c | |||
| @@ -374,6 +374,7 @@ static int usb_keene_probe(struct usb_interface *intf, | |||
| 374 | radio->vdev.ioctl_ops = &usb_keene_ioctl_ops; | 374 | radio->vdev.ioctl_ops = &usb_keene_ioctl_ops; |
| 375 | radio->vdev.lock = &radio->lock; | 375 | radio->vdev.lock = &radio->lock; |
| 376 | radio->vdev.release = video_device_release_empty; | 376 | radio->vdev.release = video_device_release_empty; |
| 377 | radio->vdev.vfl_dir = VFL_DIR_TX; | ||
| 377 | 378 | ||
| 378 | radio->usbdev = interface_to_usbdev(intf); | 379 | radio->usbdev = interface_to_usbdev(intf); |
| 379 | radio->intf = intf; | 380 | radio->intf = intf; |
diff --git a/drivers/media/radio/radio-si4713.c b/drivers/media/radio/radio-si4713.c index a082e400ed0f..1507c9d508d7 100644 --- a/drivers/media/radio/radio-si4713.c +++ b/drivers/media/radio/radio-si4713.c | |||
| @@ -250,6 +250,7 @@ static struct video_device radio_si4713_vdev_template = { | |||
| 250 | .name = "radio-si4713", | 250 | .name = "radio-si4713", |
| 251 | .release = video_device_release, | 251 | .release = video_device_release, |
| 252 | .ioctl_ops = &radio_si4713_ioctl_ops, | 252 | .ioctl_ops = &radio_si4713_ioctl_ops, |
| 253 | .vfl_dir = VFL_DIR_TX, | ||
| 253 | }; | 254 | }; |
| 254 | 255 | ||
| 255 | /* Platform driver interface */ | 256 | /* Platform driver interface */ |
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c index c48be195bbad..cabbe3adf435 100644 --- a/drivers/media/radio/radio-wl1273.c +++ b/drivers/media/radio/radio-wl1273.c | |||
| @@ -1971,6 +1971,7 @@ static struct video_device wl1273_viddev_template = { | |||
| 1971 | .ioctl_ops = &wl1273_ioctl_ops, | 1971 | .ioctl_ops = &wl1273_ioctl_ops, |
| 1972 | .name = WL1273_FM_DRIVER_NAME, | 1972 | .name = WL1273_FM_DRIVER_NAME, |
| 1973 | .release = wl1273_vdev_release, | 1973 | .release = wl1273_vdev_release, |
| 1974 | .vfl_dir = VFL_DIR_TX, | ||
| 1974 | }; | 1975 | }; |
| 1975 | 1976 | ||
| 1976 | static int wl1273_fm_radio_remove(struct platform_device *pdev) | 1977 | static int wl1273_fm_radio_remove(struct platform_device *pdev) |
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c index 048de4536036..0a8ee8fab924 100644 --- a/drivers/media/radio/wl128x/fmdrv_v4l2.c +++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c | |||
| @@ -518,6 +518,16 @@ static struct video_device fm_viddev_template = { | |||
| 518 | .ioctl_ops = &fm_drv_ioctl_ops, | 518 | .ioctl_ops = &fm_drv_ioctl_ops, |
| 519 | .name = FM_DRV_NAME, | 519 | .name = FM_DRV_NAME, |
| 520 | .release = video_device_release, | 520 | .release = video_device_release, |
| 521 | /* | ||
| 522 | * To ensure both the tuner and modulator ioctls are accessible we | ||
| 523 | * set the vfl_dir to M2M to indicate this. | ||
| 524 | * | ||
| 525 | * It is not really a mem2mem device of course, but it can both receive | ||
| 526 | * and transmit using the same radio device. It's the only radio driver | ||
| 527 | * that does this and it should really be split in two radio devices, | ||
| 528 | * but that would affect applications using this driver. | ||
| 529 | */ | ||
| 530 | .vfl_dir = VFL_DIR_M2M, | ||
| 521 | }; | 531 | }; |
| 522 | 532 | ||
| 523 | int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr) | 533 | int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr) |
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 1877ed7ca086..1c9e09fbdff8 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c | |||
| @@ -1053,6 +1053,7 @@ static ssize_t bonding_store_primary(struct device *d, | |||
| 1053 | pr_info("%s: Setting primary slave to None.\n", | 1053 | pr_info("%s: Setting primary slave to None.\n", |
| 1054 | bond->dev->name); | 1054 | bond->dev->name); |
| 1055 | bond->primary_slave = NULL; | 1055 | bond->primary_slave = NULL; |
| 1056 | memset(bond->params.primary, 0, sizeof(bond->params.primary)); | ||
| 1056 | bond_select_active_slave(bond); | 1057 | bond_select_active_slave(bond); |
| 1057 | goto out; | 1058 | goto out; |
| 1058 | } | 1059 | } |
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 58607f196c9e..2282b1ae9765 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c | |||
| @@ -488,8 +488,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface, | |||
| 488 | 488 | ||
| 489 | priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), | 489 | priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), |
| 490 | IFX_WRITE_LOW_16BIT(mask)); | 490 | IFX_WRITE_LOW_16BIT(mask)); |
| 491 | |||
| 492 | /* According to C_CAN documentation, the reserved bit | ||
| 493 | * in IFx_MASK2 register is fixed 1 | ||
| 494 | */ | ||
| 491 | priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), | 495 | priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), |
| 492 | IFX_WRITE_HIGH_16BIT(mask)); | 496 | IFX_WRITE_HIGH_16BIT(mask) | BIT(13)); |
| 493 | 497 | ||
| 494 | priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), | 498 | priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), |
| 495 | IFX_WRITE_LOW_16BIT(id)); | 499 | IFX_WRITE_LOW_16BIT(id)); |
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 4eba17b83ba8..f1b3df167ff2 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h | |||
| @@ -36,13 +36,13 @@ | |||
| 36 | 36 | ||
| 37 | #define DRV_VER "4.4.161.0u" | 37 | #define DRV_VER "4.4.161.0u" |
| 38 | #define DRV_NAME "be2net" | 38 | #define DRV_NAME "be2net" |
| 39 | #define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" | 39 | #define BE_NAME "Emulex BladeEngine2" |
| 40 | #define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" | 40 | #define BE3_NAME "Emulex BladeEngine3" |
| 41 | #define OC_NAME "Emulex OneConnect 10Gbps NIC" | 41 | #define OC_NAME "Emulex OneConnect" |
| 42 | #define OC_NAME_BE OC_NAME "(be3)" | 42 | #define OC_NAME_BE OC_NAME "(be3)" |
| 43 | #define OC_NAME_LANCER OC_NAME "(Lancer)" | 43 | #define OC_NAME_LANCER OC_NAME "(Lancer)" |
| 44 | #define OC_NAME_SH OC_NAME "(Skyhawk)" | 44 | #define OC_NAME_SH OC_NAME "(Skyhawk)" |
| 45 | #define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver" | 45 | #define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver" |
| 46 | 46 | ||
| 47 | #define BE_VENDOR_ID 0x19a2 | 47 | #define BE_VENDOR_ID 0x19a2 |
| 48 | #define EMULEX_VENDOR_ID 0x10df | 48 | #define EMULEX_VENDOR_ID 0x10df |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 5c995700e534..4d6f3c54427a 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
| @@ -25,7 +25,7 @@ | |||
| 25 | MODULE_VERSION(DRV_VER); | 25 | MODULE_VERSION(DRV_VER); |
| 26 | MODULE_DEVICE_TABLE(pci, be_dev_ids); | 26 | MODULE_DEVICE_TABLE(pci, be_dev_ids); |
| 27 | MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); | 27 | MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); |
| 28 | MODULE_AUTHOR("ServerEngines Corporation"); | 28 | MODULE_AUTHOR("Emulex Corporation"); |
| 29 | MODULE_LICENSE("GPL"); | 29 | MODULE_LICENSE("GPL"); |
| 30 | 30 | ||
| 31 | static unsigned int num_vfs; | 31 | static unsigned int num_vfs; |
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 02a12b69555f..4dab6fc265a2 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h | |||
| @@ -232,6 +232,7 @@ | |||
| 232 | #define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ | 232 | #define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ |
| 233 | #define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */ | 233 | #define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */ |
| 234 | #define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */ | 234 | #define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */ |
| 235 | #define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */ | ||
| 235 | #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ | 236 | #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ |
| 236 | #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ | 237 | #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ |
| 237 | #define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ | 238 | #define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ |
| @@ -389,6 +390,12 @@ | |||
| 389 | 390 | ||
| 390 | #define E1000_PBS_16K E1000_PBA_16K | 391 | #define E1000_PBS_16K E1000_PBA_16K |
| 391 | 392 | ||
| 393 | /* Uncorrectable/correctable ECC Error counts and enable bits */ | ||
| 394 | #define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF | ||
| 395 | #define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00 | ||
| 396 | #define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8 | ||
| 397 | #define E1000_PBECCSTS_ECC_ENABLE 0x00010000 | ||
| 398 | |||
| 392 | #define IFS_MAX 80 | 399 | #define IFS_MAX 80 |
| 393 | #define IFS_MIN 40 | 400 | #define IFS_MIN 40 |
| 394 | #define IFS_RATIO 4 | 401 | #define IFS_RATIO 4 |
| @@ -408,6 +415,7 @@ | |||
| 408 | #define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ | 415 | #define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ |
| 409 | #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ | 416 | #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ |
| 410 | #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ | 417 | #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ |
| 418 | #define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ | ||
| 411 | #define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ | 419 | #define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ |
| 412 | #define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ | 420 | #define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ |
| 413 | #define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ | 421 | #define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ |
| @@ -443,6 +451,7 @@ | |||
| 443 | #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ | 451 | #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ |
| 444 | #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ | 452 | #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ |
| 445 | #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ | 453 | #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ |
| 454 | #define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ | ||
| 446 | #define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ | 455 | #define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ |
| 447 | #define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ | 456 | #define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ |
| 448 | #define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */ | 457 | #define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */ |
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 6782a2eea1bc..7e95f221d60b 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h | |||
| @@ -309,6 +309,8 @@ struct e1000_adapter { | |||
| 309 | 309 | ||
| 310 | struct napi_struct napi; | 310 | struct napi_struct napi; |
| 311 | 311 | ||
| 312 | unsigned int uncorr_errors; /* uncorrectable ECC errors */ | ||
| 313 | unsigned int corr_errors; /* correctable ECC errors */ | ||
| 312 | unsigned int restart_queue; | 314 | unsigned int restart_queue; |
| 313 | u32 txd_cmd; | 315 | u32 txd_cmd; |
| 314 | 316 | ||
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index f95bc6ee1c22..fd4772a2691c 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c | |||
| @@ -108,6 +108,8 @@ static const struct e1000_stats e1000_gstrings_stats[] = { | |||
| 108 | E1000_STAT("dropped_smbus", stats.mgpdc), | 108 | E1000_STAT("dropped_smbus", stats.mgpdc), |
| 109 | E1000_STAT("rx_dma_failed", rx_dma_failed), | 109 | E1000_STAT("rx_dma_failed", rx_dma_failed), |
| 110 | E1000_STAT("tx_dma_failed", tx_dma_failed), | 110 | E1000_STAT("tx_dma_failed", tx_dma_failed), |
| 111 | E1000_STAT("uncorr_ecc_errors", uncorr_errors), | ||
| 112 | E1000_STAT("corr_ecc_errors", corr_errors), | ||
| 111 | }; | 113 | }; |
| 112 | 114 | ||
| 113 | #define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) | 115 | #define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) |
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index cf217777586c..b88676ff3d86 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h | |||
| @@ -77,6 +77,7 @@ enum e1e_registers { | |||
| 77 | #define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ | 77 | #define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ |
| 78 | E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ | 78 | E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ |
| 79 | E1000_PBS = 0x01008, /* Packet Buffer Size */ | 79 | E1000_PBS = 0x01008, /* Packet Buffer Size */ |
| 80 | E1000_PBECCSTS = 0x0100C, /* Packet Buffer ECC Status - RW */ | ||
| 80 | E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ | 81 | E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ |
| 81 | E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */ | 82 | E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */ |
| 82 | E1000_FLOP = 0x0103C, /* FLASH Opcode Register */ | 83 | E1000_FLOP = 0x0103C, /* FLASH Opcode Register */ |
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 976336547607..24d9f61956f0 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c | |||
| @@ -3624,6 +3624,17 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) | |||
| 3624 | if (hw->mac.type == e1000_ich8lan) | 3624 | if (hw->mac.type == e1000_ich8lan) |
| 3625 | reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); | 3625 | reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); |
| 3626 | ew32(RFCTL, reg); | 3626 | ew32(RFCTL, reg); |
| 3627 | |||
| 3628 | /* Enable ECC on Lynxpoint */ | ||
| 3629 | if (hw->mac.type == e1000_pch_lpt) { | ||
| 3630 | reg = er32(PBECCSTS); | ||
| 3631 | reg |= E1000_PBECCSTS_ECC_ENABLE; | ||
| 3632 | ew32(PBECCSTS, reg); | ||
| 3633 | |||
| 3634 | reg = er32(CTRL); | ||
| 3635 | reg |= E1000_CTRL_MEHE; | ||
| 3636 | ew32(CTRL, reg); | ||
| 3637 | } | ||
| 3627 | } | 3638 | } |
| 3628 | 3639 | ||
| 3629 | /** | 3640 | /** |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index fbf75fdca994..643c883dd795 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
| @@ -1678,6 +1678,23 @@ static irqreturn_t e1000_intr_msi(int irq, void *data) | |||
| 1678 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | 1678 | mod_timer(&adapter->watchdog_timer, jiffies + 1); |
| 1679 | } | 1679 | } |
| 1680 | 1680 | ||
| 1681 | /* Reset on uncorrectable ECC error */ | ||
| 1682 | if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { | ||
| 1683 | u32 pbeccsts = er32(PBECCSTS); | ||
| 1684 | |||
| 1685 | adapter->corr_errors += | ||
| 1686 | pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; | ||
| 1687 | adapter->uncorr_errors += | ||
| 1688 | (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> | ||
| 1689 | E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; | ||
| 1690 | |||
| 1691 | /* Do the reset outside of interrupt context */ | ||
| 1692 | schedule_work(&adapter->reset_task); | ||
| 1693 | |||
| 1694 | /* return immediately since reset is imminent */ | ||
| 1695 | return IRQ_HANDLED; | ||
| 1696 | } | ||
| 1697 | |||
| 1681 | if (napi_schedule_prep(&adapter->napi)) { | 1698 | if (napi_schedule_prep(&adapter->napi)) { |
| 1682 | adapter->total_tx_bytes = 0; | 1699 | adapter->total_tx_bytes = 0; |
| 1683 | adapter->total_tx_packets = 0; | 1700 | adapter->total_tx_packets = 0; |
| @@ -1741,6 +1758,23 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
| 1741 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | 1758 | mod_timer(&adapter->watchdog_timer, jiffies + 1); |
| 1742 | } | 1759 | } |
| 1743 | 1760 | ||
| 1761 | /* Reset on uncorrectable ECC error */ | ||
| 1762 | if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { | ||
| 1763 | u32 pbeccsts = er32(PBECCSTS); | ||
| 1764 | |||
| 1765 | adapter->corr_errors += | ||
| 1766 | pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; | ||
| 1767 | adapter->uncorr_errors += | ||
| 1768 | (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> | ||
| 1769 | E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; | ||
| 1770 | |||
| 1771 | /* Do the reset outside of interrupt context */ | ||
| 1772 | schedule_work(&adapter->reset_task); | ||
| 1773 | |||
| 1774 | /* return immediately since reset is imminent */ | ||
| 1775 | return IRQ_HANDLED; | ||
| 1776 | } | ||
| 1777 | |||
| 1744 | if (napi_schedule_prep(&adapter->napi)) { | 1778 | if (napi_schedule_prep(&adapter->napi)) { |
| 1745 | adapter->total_tx_bytes = 0; | 1779 | adapter->total_tx_bytes = 0; |
| 1746 | adapter->total_tx_packets = 0; | 1780 | adapter->total_tx_packets = 0; |
| @@ -2104,6 +2138,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) | |||
| 2104 | if (adapter->msix_entries) { | 2138 | if (adapter->msix_entries) { |
| 2105 | ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); | 2139 | ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); |
| 2106 | ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); | 2140 | ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); |
| 2141 | } else if (hw->mac.type == e1000_pch_lpt) { | ||
| 2142 | ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); | ||
| 2107 | } else { | 2143 | } else { |
| 2108 | ew32(IMS, IMS_ENABLE_MASK); | 2144 | ew32(IMS, IMS_ENABLE_MASK); |
| 2109 | } | 2145 | } |
| @@ -4251,6 +4287,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) | |||
| 4251 | adapter->stats.mgptc += er32(MGTPTC); | 4287 | adapter->stats.mgptc += er32(MGTPTC); |
| 4252 | adapter->stats.mgprc += er32(MGTPRC); | 4288 | adapter->stats.mgprc += er32(MGTPRC); |
| 4253 | adapter->stats.mgpdc += er32(MGTPDC); | 4289 | adapter->stats.mgpdc += er32(MGTPDC); |
| 4290 | |||
| 4291 | /* Correctable ECC Errors */ | ||
| 4292 | if (hw->mac.type == e1000_pch_lpt) { | ||
| 4293 | u32 pbeccsts = er32(PBECCSTS); | ||
| 4294 | adapter->corr_errors += | ||
| 4295 | pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; | ||
| 4296 | adapter->uncorr_errors += | ||
| 4297 | (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> | ||
| 4298 | E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; | ||
| 4299 | } | ||
| 4254 | } | 4300 | } |
| 4255 | 4301 | ||
| 4256 | /** | 4302 | /** |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index a6542d75374c..5163af314990 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
| @@ -380,7 +380,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
| 380 | } | 380 | } |
| 381 | } | 381 | } |
| 382 | 382 | ||
| 383 | if ((dev_cap->flags & | 383 | if ((dev->caps.flags & |
| 384 | (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && | 384 | (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && |
| 385 | mlx4_is_master(dev)) | 385 | mlx4_is_master(dev)) |
| 386 | dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; | 386 | dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; |
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 7992b3e05d3d..78ace59efd29 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c | |||
| @@ -1801,7 +1801,7 @@ static void rhine_tx(struct net_device *dev) | |||
| 1801 | rp->tx_skbuff[entry]->len, | 1801 | rp->tx_skbuff[entry]->len, |
| 1802 | PCI_DMA_TODEVICE); | 1802 | PCI_DMA_TODEVICE); |
| 1803 | } | 1803 | } |
| 1804 | dev_kfree_skb_irq(rp->tx_skbuff[entry]); | 1804 | dev_kfree_skb(rp->tx_skbuff[entry]); |
| 1805 | rp->tx_skbuff[entry] = NULL; | 1805 | rp->tx_skbuff[entry] = NULL; |
| 1806 | entry = (++rp->dirty_tx) % TX_RING_SIZE; | 1806 | entry = (++rp->dirty_tx) % TX_RING_SIZE; |
| 1807 | } | 1807 | } |
| @@ -2010,11 +2010,7 @@ static void rhine_slow_event_task(struct work_struct *work) | |||
| 2010 | if (intr_status & IntrPCIErr) | 2010 | if (intr_status & IntrPCIErr) |
| 2011 | netif_warn(rp, hw, dev, "PCI error\n"); | 2011 | netif_warn(rp, hw, dev, "PCI error\n"); |
| 2012 | 2012 | ||
| 2013 | napi_disable(&rp->napi); | 2013 | iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable); |
| 2014 | rhine_irq_disable(rp); | ||
| 2015 | /* Slow and safe. Consider __napi_schedule as a replacement ? */ | ||
| 2016 | napi_enable(&rp->napi); | ||
| 2017 | napi_schedule(&rp->napi); | ||
| 2018 | 2014 | ||
| 2019 | out_unlock: | 2015 | out_unlock: |
| 2020 | mutex_unlock(&rp->task_lock); | 2016 | mutex_unlock(&rp->task_lock); |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index cc09b67c23bc..2917a86f4c43 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -298,11 +298,12 @@ static void tun_flow_cleanup(unsigned long data) | |||
| 298 | } | 298 | } |
| 299 | 299 | ||
| 300 | static void tun_flow_update(struct tun_struct *tun, u32 rxhash, | 300 | static void tun_flow_update(struct tun_struct *tun, u32 rxhash, |
| 301 | u16 queue_index) | 301 | struct tun_file *tfile) |
| 302 | { | 302 | { |
| 303 | struct hlist_head *head; | 303 | struct hlist_head *head; |
| 304 | struct tun_flow_entry *e; | 304 | struct tun_flow_entry *e; |
| 305 | unsigned long delay = tun->ageing_time; | 305 | unsigned long delay = tun->ageing_time; |
| 306 | u16 queue_index = tfile->queue_index; | ||
| 306 | 307 | ||
| 307 | if (!rxhash) | 308 | if (!rxhash) |
| 308 | return; | 309 | return; |
| @@ -311,7 +312,9 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash, | |||
| 311 | 312 | ||
| 312 | rcu_read_lock(); | 313 | rcu_read_lock(); |
| 313 | 314 | ||
| 314 | if (tun->numqueues == 1) | 315 | /* We may get a very small possibility of OOO during switching, not |
| 316 | * worth to optimize.*/ | ||
| 317 | if (tun->numqueues == 1 || tfile->detached) | ||
| 315 | goto unlock; | 318 | goto unlock; |
| 316 | 319 | ||
| 317 | e = tun_flow_find(head, rxhash); | 320 | e = tun_flow_find(head, rxhash); |
| @@ -411,21 +414,21 @@ static void __tun_detach(struct tun_file *tfile, bool clean) | |||
| 411 | 414 | ||
| 412 | tun = rtnl_dereference(tfile->tun); | 415 | tun = rtnl_dereference(tfile->tun); |
| 413 | 416 | ||
| 414 | if (tun) { | 417 | if (tun && !tfile->detached) { |
| 415 | u16 index = tfile->queue_index; | 418 | u16 index = tfile->queue_index; |
| 416 | BUG_ON(index >= tun->numqueues); | 419 | BUG_ON(index >= tun->numqueues); |
| 417 | dev = tun->dev; | 420 | dev = tun->dev; |
| 418 | 421 | ||
| 419 | rcu_assign_pointer(tun->tfiles[index], | 422 | rcu_assign_pointer(tun->tfiles[index], |
| 420 | tun->tfiles[tun->numqueues - 1]); | 423 | tun->tfiles[tun->numqueues - 1]); |
| 421 | rcu_assign_pointer(tfile->tun, NULL); | ||
| 422 | ntfile = rtnl_dereference(tun->tfiles[index]); | 424 | ntfile = rtnl_dereference(tun->tfiles[index]); |
| 423 | ntfile->queue_index = index; | 425 | ntfile->queue_index = index; |
| 424 | 426 | ||
| 425 | --tun->numqueues; | 427 | --tun->numqueues; |
| 426 | if (clean) | 428 | if (clean) { |
| 429 | rcu_assign_pointer(tfile->tun, NULL); | ||
| 427 | sock_put(&tfile->sk); | 430 | sock_put(&tfile->sk); |
| 428 | else | 431 | } else |
| 429 | tun_disable_queue(tun, tfile); | 432 | tun_disable_queue(tun, tfile); |
| 430 | 433 | ||
| 431 | synchronize_net(); | 434 | synchronize_net(); |
| @@ -439,10 +442,13 @@ static void __tun_detach(struct tun_file *tfile, bool clean) | |||
| 439 | } | 442 | } |
| 440 | 443 | ||
| 441 | if (clean) { | 444 | if (clean) { |
| 442 | if (tun && tun->numqueues == 0 && tun->numdisabled == 0 && | 445 | if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { |
| 443 | !(tun->flags & TUN_PERSIST)) | 446 | netif_carrier_off(tun->dev); |
| 444 | if (tun->dev->reg_state == NETREG_REGISTERED) | 447 | |
| 448 | if (!(tun->flags & TUN_PERSIST) && | ||
| 449 | tun->dev->reg_state == NETREG_REGISTERED) | ||
| 445 | unregister_netdevice(tun->dev); | 450 | unregister_netdevice(tun->dev); |
| 451 | } | ||
| 446 | 452 | ||
| 447 | BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, | 453 | BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, |
| 448 | &tfile->socket.flags)); | 454 | &tfile->socket.flags)); |
| @@ -470,6 +476,10 @@ static void tun_detach_all(struct net_device *dev) | |||
| 470 | rcu_assign_pointer(tfile->tun, NULL); | 476 | rcu_assign_pointer(tfile->tun, NULL); |
| 471 | --tun->numqueues; | 477 | --tun->numqueues; |
| 472 | } | 478 | } |
| 479 | list_for_each_entry(tfile, &tun->disabled, next) { | ||
| 480 | wake_up_all(&tfile->wq.wait); | ||
| 481 | rcu_assign_pointer(tfile->tun, NULL); | ||
| 482 | } | ||
| 473 | BUG_ON(tun->numqueues != 0); | 483 | BUG_ON(tun->numqueues != 0); |
| 474 | 484 | ||
| 475 | synchronize_net(); | 485 | synchronize_net(); |
| @@ -500,7 +510,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file) | |||
| 500 | goto out; | 510 | goto out; |
| 501 | 511 | ||
| 502 | err = -EINVAL; | 512 | err = -EINVAL; |
| 503 | if (rtnl_dereference(tfile->tun)) | 513 | if (rtnl_dereference(tfile->tun) && !tfile->detached) |
| 504 | goto out; | 514 | goto out; |
| 505 | 515 | ||
| 506 | err = -EBUSY; | 516 | err = -EBUSY; |
| @@ -1199,7 +1209,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
| 1199 | tun->dev->stats.rx_packets++; | 1209 | tun->dev->stats.rx_packets++; |
| 1200 | tun->dev->stats.rx_bytes += len; | 1210 | tun->dev->stats.rx_bytes += len; |
| 1201 | 1211 | ||
| 1202 | tun_flow_update(tun, rxhash, tfile->queue_index); | 1212 | tun_flow_update(tun, rxhash, tfile); |
| 1203 | return total_len; | 1213 | return total_len; |
| 1204 | } | 1214 | } |
| 1205 | 1215 | ||
| @@ -1658,10 +1668,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
| 1658 | device_create_file(&tun->dev->dev, &dev_attr_owner) || | 1668 | device_create_file(&tun->dev->dev, &dev_attr_owner) || |
| 1659 | device_create_file(&tun->dev->dev, &dev_attr_group)) | 1669 | device_create_file(&tun->dev->dev, &dev_attr_group)) |
| 1660 | pr_err("Failed to create tun sysfs files\n"); | 1670 | pr_err("Failed to create tun sysfs files\n"); |
| 1661 | |||
| 1662 | netif_carrier_on(tun->dev); | ||
| 1663 | } | 1671 | } |
| 1664 | 1672 | ||
| 1673 | netif_carrier_on(tun->dev); | ||
| 1674 | |||
| 1665 | tun_debug(KERN_INFO, tun, "tun_set_iff\n"); | 1675 | tun_debug(KERN_INFO, tun, "tun_set_iff\n"); |
| 1666 | 1676 | ||
| 1667 | if (ifr->ifr_flags & IFF_NO_PI) | 1677 | if (ifr->ifr_flags & IFF_NO_PI) |
| @@ -1813,7 +1823,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr) | |||
| 1813 | ret = tun_attach(tun, file); | 1823 | ret = tun_attach(tun, file); |
| 1814 | } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { | 1824 | } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { |
| 1815 | tun = rtnl_dereference(tfile->tun); | 1825 | tun = rtnl_dereference(tfile->tun); |
| 1816 | if (!tun || !(tun->flags & TUN_TAP_MQ)) | 1826 | if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached) |
| 1817 | ret = -EINVAL; | 1827 | ret = -EINVAL; |
| 1818 | else | 1828 | else |
| 1819 | __tun_detach(tfile, false); | 1829 | __tun_detach(tfile, false); |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 9197b2c72ca3..00d3b2d37828 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
| @@ -1215,6 +1215,9 @@ static const struct usb_device_id cdc_devs[] = { | |||
| 1215 | { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46), | 1215 | { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46), |
| 1216 | .driver_info = (unsigned long)&wwan_info, | 1216 | .driver_info = (unsigned long)&wwan_info, |
| 1217 | }, | 1217 | }, |
| 1218 | { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76), | ||
| 1219 | .driver_info = (unsigned long)&wwan_info, | ||
| 1220 | }, | ||
| 1218 | 1221 | ||
| 1219 | /* Infineon(now Intel) HSPA Modem platform */ | 1222 | /* Infineon(now Intel) HSPA Modem platform */ |
| 1220 | { USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443, | 1223 | { USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443, |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 575a5839ee34..c8e05e27f38c 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
| @@ -351,6 +351,10 @@ static const struct usb_device_id products[] = { | |||
| 351 | USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57), | 351 | USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57), |
| 352 | .driver_info = (unsigned long)&qmi_wwan_info, | 352 | .driver_info = (unsigned long)&qmi_wwan_info, |
| 353 | }, | 353 | }, |
| 354 | { /* HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM */ | ||
| 355 | USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69), | ||
| 356 | .driver_info = (unsigned long)&qmi_wwan_info, | ||
| 357 | }, | ||
| 354 | 358 | ||
| 355 | /* 2. Combined interface devices matching on class+protocol */ | 359 | /* 2. Combined interface devices matching on class+protocol */ |
| 356 | { /* Huawei E367 and possibly others in "Windows mode" */ | 360 | { /* Huawei E367 and possibly others in "Windows mode" */ |
| @@ -361,6 +365,14 @@ static const struct usb_device_id products[] = { | |||
| 361 | USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17), | 365 | USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17), |
| 362 | .driver_info = (unsigned long)&qmi_wwan_info, | 366 | .driver_info = (unsigned long)&qmi_wwan_info, |
| 363 | }, | 367 | }, |
| 368 | { /* HUAWEI_NDIS_SINGLE_INTERFACE_VDF */ | ||
| 369 | USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x37), | ||
| 370 | .driver_info = (unsigned long)&qmi_wwan_info, | ||
| 371 | }, | ||
| 372 | { /* HUAWEI_INTERFACE_NDIS_HW_QUALCOMM */ | ||
| 373 | USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x67), | ||
| 374 | .driver_info = (unsigned long)&qmi_wwan_info, | ||
| 375 | }, | ||
| 364 | { /* Pantech UML290, P4200 and more */ | 376 | { /* Pantech UML290, P4200 and more */ |
| 365 | USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff), | 377 | USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff), |
| 366 | .driver_info = (unsigned long)&qmi_wwan_info, | 378 | .driver_info = (unsigned long)&qmi_wwan_info, |
| @@ -461,6 +473,7 @@ static const struct usb_device_id products[] = { | |||
| 461 | {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ | 473 | {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ |
| 462 | {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ | 474 | {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ |
| 463 | {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ | 475 | {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ |
| 476 | {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ | ||
| 464 | 477 | ||
| 465 | /* 4. Gobi 1000 devices */ | 478 | /* 4. Gobi 1000 devices */ |
| 466 | {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ | 479 | {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ |
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index f34b2ebee815..5e33606c1366 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
| @@ -380,6 +380,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) | |||
| 380 | unsigned long lockflags; | 380 | unsigned long lockflags; |
| 381 | size_t size = dev->rx_urb_size; | 381 | size_t size = dev->rx_urb_size; |
| 382 | 382 | ||
| 383 | /* prevent rx skb allocation when error ratio is high */ | ||
| 384 | if (test_bit(EVENT_RX_KILL, &dev->flags)) { | ||
| 385 | usb_free_urb(urb); | ||
| 386 | return -ENOLINK; | ||
| 387 | } | ||
| 388 | |||
| 383 | skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); | 389 | skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); |
| 384 | if (!skb) { | 390 | if (!skb) { |
| 385 | netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); | 391 | netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); |
| @@ -539,6 +545,17 @@ block: | |||
| 539 | break; | 545 | break; |
| 540 | } | 546 | } |
| 541 | 547 | ||
| 548 | /* stop rx if packet error rate is high */ | ||
| 549 | if (++dev->pkt_cnt > 30) { | ||
| 550 | dev->pkt_cnt = 0; | ||
| 551 | dev->pkt_err = 0; | ||
| 552 | } else { | ||
| 553 | if (state == rx_cleanup) | ||
| 554 | dev->pkt_err++; | ||
| 555 | if (dev->pkt_err > 20) | ||
| 556 | set_bit(EVENT_RX_KILL, &dev->flags); | ||
| 557 | } | ||
| 558 | |||
| 542 | state = defer_bh(dev, skb, &dev->rxq, state); | 559 | state = defer_bh(dev, skb, &dev->rxq, state); |
| 543 | 560 | ||
| 544 | if (urb) { | 561 | if (urb) { |
| @@ -791,6 +808,11 @@ int usbnet_open (struct net_device *net) | |||
| 791 | (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" : | 808 | (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" : |
| 792 | "simple"); | 809 | "simple"); |
| 793 | 810 | ||
| 811 | /* reset rx error state */ | ||
| 812 | dev->pkt_cnt = 0; | ||
| 813 | dev->pkt_err = 0; | ||
| 814 | clear_bit(EVENT_RX_KILL, &dev->flags); | ||
| 815 | |||
| 794 | // delay posting reads until we're fully open | 816 | // delay posting reads until we're fully open |
| 795 | tasklet_schedule (&dev->bh); | 817 | tasklet_schedule (&dev->bh); |
| 796 | if (info->manage_power) { | 818 | if (info->manage_power) { |
| @@ -1103,13 +1125,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, | |||
| 1103 | if (info->tx_fixup) { | 1125 | if (info->tx_fixup) { |
| 1104 | skb = info->tx_fixup (dev, skb, GFP_ATOMIC); | 1126 | skb = info->tx_fixup (dev, skb, GFP_ATOMIC); |
| 1105 | if (!skb) { | 1127 | if (!skb) { |
| 1106 | if (netif_msg_tx_err(dev)) { | 1128 | /* packet collected; minidriver waiting for more */ |
| 1107 | netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); | 1129 | if (info->flags & FLAG_MULTI_PACKET) |
| 1108 | goto drop; | ||
| 1109 | } else { | ||
| 1110 | /* cdc_ncm collected packet; waits for more */ | ||
| 1111 | goto not_drop; | 1130 | goto not_drop; |
| 1112 | } | 1131 | netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); |
| 1132 | goto drop; | ||
| 1113 | } | 1133 | } |
| 1114 | } | 1134 | } |
| 1115 | length = skb->len; | 1135 | length = skb->len; |
| @@ -1254,6 +1274,9 @@ static void usbnet_bh (unsigned long param) | |||
| 1254 | } | 1274 | } |
| 1255 | } | 1275 | } |
| 1256 | 1276 | ||
| 1277 | /* restart RX again after disabling due to high error rate */ | ||
| 1278 | clear_bit(EVENT_RX_KILL, &dev->flags); | ||
| 1279 | |||
| 1257 | // waiting for all pending urbs to complete? | 1280 | // waiting for all pending urbs to complete? |
| 1258 | if (dev->wait) { | 1281 | if (dev->wait) { |
| 1259 | if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { | 1282 | if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index dc8913c6238c..12c6440d1649 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
| @@ -154,8 +154,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) | |||
| 154 | if (ret & 1) { /* Link is up. */ | 154 | if (ret & 1) { /* Link is up. */ |
| 155 | printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", | 155 | printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", |
| 156 | adapter->netdev->name, adapter->link_speed); | 156 | adapter->netdev->name, adapter->link_speed); |
| 157 | if (!netif_carrier_ok(adapter->netdev)) | 157 | netif_carrier_on(adapter->netdev); |
| 158 | netif_carrier_on(adapter->netdev); | ||
| 159 | 158 | ||
| 160 | if (affectTxQueue) { | 159 | if (affectTxQueue) { |
| 161 | for (i = 0; i < adapter->num_tx_queues; i++) | 160 | for (i = 0; i < adapter->num_tx_queues; i++) |
| @@ -165,8 +164,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) | |||
| 165 | } else { | 164 | } else { |
| 166 | printk(KERN_INFO "%s: NIC Link is Down\n", | 165 | printk(KERN_INFO "%s: NIC Link is Down\n", |
| 167 | adapter->netdev->name); | 166 | adapter->netdev->name); |
| 168 | if (netif_carrier_ok(adapter->netdev)) | 167 | netif_carrier_off(adapter->netdev); |
| 169 | netif_carrier_off(adapter->netdev); | ||
| 170 | 168 | ||
| 171 | if (affectTxQueue) { | 169 | if (affectTxQueue) { |
| 172 | for (i = 0; i < adapter->num_tx_queues; i++) | 170 | for (i = 0; i < adapter->num_tx_queues; i++) |
| @@ -3061,6 +3059,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, | |||
| 3061 | netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); | 3059 | netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); |
| 3062 | netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); | 3060 | netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); |
| 3063 | 3061 | ||
| 3062 | netif_carrier_off(netdev); | ||
| 3064 | err = register_netdev(netdev); | 3063 | err = register_netdev(netdev); |
| 3065 | 3064 | ||
| 3066 | if (err) { | 3065 | if (err) { |
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index 0f71d1d4339d..e5fd20994bec 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include "debug.h" | 36 | #include "debug.h" |
| 37 | 37 | ||
| 38 | #define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */ | 38 | #define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */ |
| 39 | #define BRCMS_FLUSH_TIMEOUT 500 /* msec */ | ||
| 39 | 40 | ||
| 40 | /* Flags we support */ | 41 | /* Flags we support */ |
| 41 | #define MAC_FILTERS (FIF_PROMISC_IN_BSS | \ | 42 | #define MAC_FILTERS (FIF_PROMISC_IN_BSS | \ |
| @@ -708,16 +709,29 @@ static void brcms_ops_rfkill_poll(struct ieee80211_hw *hw) | |||
| 708 | wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked); | 709 | wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked); |
| 709 | } | 710 | } |
| 710 | 711 | ||
| 712 | static bool brcms_tx_flush_completed(struct brcms_info *wl) | ||
| 713 | { | ||
| 714 | bool result; | ||
| 715 | |||
| 716 | spin_lock_bh(&wl->lock); | ||
| 717 | result = brcms_c_tx_flush_completed(wl->wlc); | ||
| 718 | spin_unlock_bh(&wl->lock); | ||
| 719 | return result; | ||
| 720 | } | ||
| 721 | |||
| 711 | static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop) | 722 | static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop) |
| 712 | { | 723 | { |
| 713 | struct brcms_info *wl = hw->priv; | 724 | struct brcms_info *wl = hw->priv; |
| 725 | int ret; | ||
| 714 | 726 | ||
| 715 | no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false"); | 727 | no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false"); |
| 716 | 728 | ||
| 717 | /* wait for packet queue and dma fifos to run empty */ | 729 | ret = wait_event_timeout(wl->tx_flush_wq, |
| 718 | spin_lock_bh(&wl->lock); | 730 | brcms_tx_flush_completed(wl), |
| 719 | brcms_c_wait_for_tx_completion(wl->wlc, drop); | 731 | msecs_to_jiffies(BRCMS_FLUSH_TIMEOUT)); |
| 720 | spin_unlock_bh(&wl->lock); | 732 | |
| 733 | brcms_dbg_mac80211(wl->wlc->hw->d11core, | ||
| 734 | "ret=%d\n", jiffies_to_msecs(ret)); | ||
| 721 | } | 735 | } |
| 722 | 736 | ||
| 723 | static const struct ieee80211_ops brcms_ops = { | 737 | static const struct ieee80211_ops brcms_ops = { |
| @@ -772,6 +786,7 @@ void brcms_dpc(unsigned long data) | |||
| 772 | 786 | ||
| 773 | done: | 787 | done: |
| 774 | spin_unlock_bh(&wl->lock); | 788 | spin_unlock_bh(&wl->lock); |
| 789 | wake_up(&wl->tx_flush_wq); | ||
| 775 | } | 790 | } |
| 776 | 791 | ||
| 777 | /* | 792 | /* |
| @@ -1020,6 +1035,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev) | |||
| 1020 | 1035 | ||
| 1021 | atomic_set(&wl->callbacks, 0); | 1036 | atomic_set(&wl->callbacks, 0); |
| 1022 | 1037 | ||
| 1038 | init_waitqueue_head(&wl->tx_flush_wq); | ||
| 1039 | |||
| 1023 | /* setup the bottom half handler */ | 1040 | /* setup the bottom half handler */ |
| 1024 | tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl); | 1041 | tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl); |
| 1025 | 1042 | ||
| @@ -1609,13 +1626,3 @@ bool brcms_rfkill_set_hw_state(struct brcms_info *wl) | |||
| 1609 | spin_lock_bh(&wl->lock); | 1626 | spin_lock_bh(&wl->lock); |
| 1610 | return blocked; | 1627 | return blocked; |
| 1611 | } | 1628 | } |
| 1612 | |||
| 1613 | /* | ||
| 1614 | * precondition: perimeter lock has been acquired | ||
| 1615 | */ | ||
| 1616 | void brcms_msleep(struct brcms_info *wl, uint ms) | ||
| 1617 | { | ||
| 1618 | spin_unlock_bh(&wl->lock); | ||
| 1619 | msleep(ms); | ||
| 1620 | spin_lock_bh(&wl->lock); | ||
| 1621 | } | ||
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h index 9358bd5ebd35..947ccacf43e6 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h | |||
| @@ -68,6 +68,8 @@ struct brcms_info { | |||
| 68 | spinlock_t lock; /* per-device perimeter lock */ | 68 | spinlock_t lock; /* per-device perimeter lock */ |
| 69 | spinlock_t isr_lock; /* per-device ISR synchronization lock */ | 69 | spinlock_t isr_lock; /* per-device ISR synchronization lock */ |
| 70 | 70 | ||
| 71 | /* tx flush */ | ||
| 72 | wait_queue_head_t tx_flush_wq; | ||
| 71 | 73 | ||
| 72 | /* timer related fields */ | 74 | /* timer related fields */ |
| 73 | atomic_t callbacks; /* # outstanding callback functions */ | 75 | atomic_t callbacks; /* # outstanding callback functions */ |
| @@ -100,7 +102,6 @@ extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl, | |||
| 100 | extern void brcms_free_timer(struct brcms_timer *timer); | 102 | extern void brcms_free_timer(struct brcms_timer *timer); |
| 101 | extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic); | 103 | extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic); |
| 102 | extern bool brcms_del_timer(struct brcms_timer *timer); | 104 | extern bool brcms_del_timer(struct brcms_timer *timer); |
| 103 | extern void brcms_msleep(struct brcms_info *wl, uint ms); | ||
| 104 | extern void brcms_dpc(unsigned long data); | 105 | extern void brcms_dpc(unsigned long data); |
| 105 | extern void brcms_timer(struct brcms_timer *t); | 106 | extern void brcms_timer(struct brcms_timer *t); |
| 106 | extern void brcms_fatal_error(struct brcms_info *wl); | 107 | extern void brcms_fatal_error(struct brcms_info *wl); |
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c index 17594de4199e..8b5839008af3 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c | |||
| @@ -1027,7 +1027,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs) | |||
| 1027 | static bool | 1027 | static bool |
| 1028 | brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) | 1028 | brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) |
| 1029 | { | 1029 | { |
| 1030 | bool morepending = false; | ||
| 1031 | struct bcma_device *core; | 1030 | struct bcma_device *core; |
| 1032 | struct tx_status txstatus, *txs; | 1031 | struct tx_status txstatus, *txs; |
| 1033 | u32 s1, s2; | 1032 | u32 s1, s2; |
| @@ -1041,23 +1040,20 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) | |||
| 1041 | txs = &txstatus; | 1040 | txs = &txstatus; |
| 1042 | core = wlc_hw->d11core; | 1041 | core = wlc_hw->d11core; |
| 1043 | *fatal = false; | 1042 | *fatal = false; |
| 1044 | s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); | ||
| 1045 | while (!(*fatal) | ||
| 1046 | && (s1 & TXS_V)) { | ||
| 1047 | /* !give others some time to run! */ | ||
| 1048 | if (n >= max_tx_num) { | ||
| 1049 | morepending = true; | ||
| 1050 | break; | ||
| 1051 | } | ||
| 1052 | 1043 | ||
| 1044 | while (n < max_tx_num) { | ||
| 1045 | s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); | ||
| 1053 | if (s1 == 0xffffffff) { | 1046 | if (s1 == 0xffffffff) { |
| 1054 | brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit, | 1047 | brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit, |
| 1055 | __func__); | 1048 | __func__); |
| 1056 | *fatal = true; | 1049 | *fatal = true; |
| 1057 | return false; | 1050 | return false; |
| 1058 | } | 1051 | } |
| 1059 | s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2)); | 1052 | /* only process when valid */ |
| 1053 | if (!(s1 & TXS_V)) | ||
| 1054 | break; | ||
| 1060 | 1055 | ||
| 1056 | s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2)); | ||
| 1061 | txs->status = s1 & TXS_STATUS_MASK; | 1057 | txs->status = s1 & TXS_STATUS_MASK; |
| 1062 | txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT; | 1058 | txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT; |
| 1063 | txs->sequence = s2 & TXS_SEQ_MASK; | 1059 | txs->sequence = s2 & TXS_SEQ_MASK; |
| @@ -1065,15 +1061,12 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) | |||
| 1065 | txs->lasttxtime = 0; | 1061 | txs->lasttxtime = 0; |
| 1066 | 1062 | ||
| 1067 | *fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs); | 1063 | *fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs); |
| 1068 | 1064 | if (*fatal == true) | |
| 1069 | s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); | 1065 | return false; |
| 1070 | n++; | 1066 | n++; |
| 1071 | } | 1067 | } |
| 1072 | 1068 | ||
| 1073 | if (*fatal) | 1069 | return n >= max_tx_num; |
| 1074 | return false; | ||
| 1075 | |||
| 1076 | return morepending; | ||
| 1077 | } | 1070 | } |
| 1078 | 1071 | ||
| 1079 | static void brcms_c_tbtt(struct brcms_c_info *wlc) | 1072 | static void brcms_c_tbtt(struct brcms_c_info *wlc) |
| @@ -7518,25 +7511,16 @@ int brcms_c_get_curband(struct brcms_c_info *wlc) | |||
| 7518 | return wlc->band->bandunit; | 7511 | return wlc->band->bandunit; |
| 7519 | } | 7512 | } |
| 7520 | 7513 | ||
| 7521 | void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop) | 7514 | bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc) |
| 7522 | { | 7515 | { |
| 7523 | int timeout = 20; | ||
| 7524 | int i; | 7516 | int i; |
| 7525 | 7517 | ||
| 7526 | /* Kick DMA to send any pending AMPDU */ | 7518 | /* Kick DMA to send any pending AMPDU */ |
| 7527 | for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++) | 7519 | for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++) |
| 7528 | if (wlc->hw->di[i]) | 7520 | if (wlc->hw->di[i]) |
| 7529 | dma_txflush(wlc->hw->di[i]); | 7521 | dma_kick_tx(wlc->hw->di[i]); |
| 7530 | |||
| 7531 | /* wait for queue and DMA fifos to run dry */ | ||
| 7532 | while (brcms_txpktpendtot(wlc) > 0) { | ||
| 7533 | brcms_msleep(wlc->wl, 1); | ||
| 7534 | |||
| 7535 | if (--timeout == 0) | ||
| 7536 | break; | ||
| 7537 | } | ||
| 7538 | 7522 | ||
| 7539 | WARN_ON_ONCE(timeout == 0); | 7523 | return !brcms_txpktpendtot(wlc); |
| 7540 | } | 7524 | } |
| 7541 | 7525 | ||
| 7542 | void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval) | 7526 | void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval) |
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h index 4fb2834f4e64..b0f14b7b8616 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h | |||
| @@ -314,8 +314,6 @@ extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state); | |||
| 314 | extern void brcms_c_scan_start(struct brcms_c_info *wlc); | 314 | extern void brcms_c_scan_start(struct brcms_c_info *wlc); |
| 315 | extern void brcms_c_scan_stop(struct brcms_c_info *wlc); | 315 | extern void brcms_c_scan_stop(struct brcms_c_info *wlc); |
| 316 | extern int brcms_c_get_curband(struct brcms_c_info *wlc); | 316 | extern int brcms_c_get_curband(struct brcms_c_info *wlc); |
| 317 | extern void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, | ||
| 318 | bool drop); | ||
| 319 | extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel); | 317 | extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel); |
| 320 | extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl); | 318 | extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl); |
| 321 | extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc, | 319 | extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc, |
| @@ -332,5 +330,6 @@ extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr); | |||
| 332 | extern int brcms_c_get_tx_power(struct brcms_c_info *wlc); | 330 | extern int brcms_c_get_tx_power(struct brcms_c_info *wlc); |
| 333 | extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc); | 331 | extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc); |
| 334 | extern void brcms_c_mute(struct brcms_c_info *wlc, bool on); | 332 | extern void brcms_c_mute(struct brcms_c_info *wlc, bool on); |
| 333 | extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc); | ||
| 335 | 334 | ||
| 336 | #endif /* _BRCM_PUB_H_ */ | 335 | #endif /* _BRCM_PUB_H_ */ |
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index 31534f7c0548..279796419ea0 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c | |||
| @@ -1153,6 +1153,13 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, | |||
| 1153 | next_reclaimed = ssn; | 1153 | next_reclaimed = ssn; |
| 1154 | } | 1154 | } |
| 1155 | 1155 | ||
| 1156 | if (tid != IWL_TID_NON_QOS) { | ||
| 1157 | priv->tid_data[sta_id][tid].next_reclaimed = | ||
| 1158 | next_reclaimed; | ||
| 1159 | IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", | ||
| 1160 | next_reclaimed); | ||
| 1161 | } | ||
| 1162 | |||
| 1156 | iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); | 1163 | iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); |
| 1157 | 1164 | ||
| 1158 | iwlagn_check_ratid_empty(priv, sta_id, tid); | 1165 | iwlagn_check_ratid_empty(priv, sta_id, tid); |
| @@ -1203,28 +1210,11 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, | |||
| 1203 | if (!is_agg) | 1210 | if (!is_agg) |
| 1204 | iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); | 1211 | iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); |
| 1205 | 1212 | ||
| 1206 | /* | ||
| 1207 | * W/A for FW bug - the seq_ctl isn't updated when the | ||
| 1208 | * queues are flushed. Fetch it from the packet itself | ||
| 1209 | */ | ||
| 1210 | if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) { | ||
| 1211 | next_reclaimed = le16_to_cpu(hdr->seq_ctrl); | ||
| 1212 | next_reclaimed = | ||
| 1213 | SEQ_TO_SN(next_reclaimed + 0x10); | ||
| 1214 | } | ||
| 1215 | |||
| 1216 | is_offchannel_skb = | 1213 | is_offchannel_skb = |
| 1217 | (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN); | 1214 | (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN); |
| 1218 | freed++; | 1215 | freed++; |
| 1219 | } | 1216 | } |
| 1220 | 1217 | ||
| 1221 | if (tid != IWL_TID_NON_QOS) { | ||
| 1222 | priv->tid_data[sta_id][tid].next_reclaimed = | ||
| 1223 | next_reclaimed; | ||
| 1224 | IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", | ||
| 1225 | next_reclaimed); | ||
| 1226 | } | ||
| 1227 | |||
| 1228 | WARN_ON(!is_agg && freed != 1); | 1218 | WARN_ON(!is_agg && freed != 1); |
| 1229 | 1219 | ||
| 1230 | /* | 1220 | /* |
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index 9189a32b7844..973a9d90e9ea 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c | |||
| @@ -1563,7 +1563,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, | |||
| 1563 | dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n", | 1563 | dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n", |
| 1564 | scan_rsp->number_of_sets); | 1564 | scan_rsp->number_of_sets); |
| 1565 | ret = -1; | 1565 | ret = -1; |
| 1566 | goto done; | 1566 | goto check_next_scan; |
| 1567 | } | 1567 | } |
| 1568 | 1568 | ||
| 1569 | bytes_left = le16_to_cpu(scan_rsp->bss_descript_size); | 1569 | bytes_left = le16_to_cpu(scan_rsp->bss_descript_size); |
| @@ -1634,7 +1634,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, | |||
| 1634 | if (!beacon_size || beacon_size > bytes_left) { | 1634 | if (!beacon_size || beacon_size > bytes_left) { |
| 1635 | bss_info += bytes_left; | 1635 | bss_info += bytes_left; |
| 1636 | bytes_left = 0; | 1636 | bytes_left = 0; |
| 1637 | return -1; | 1637 | ret = -1; |
| 1638 | goto check_next_scan; | ||
| 1638 | } | 1639 | } |
| 1639 | 1640 | ||
| 1640 | /* Initialize the current working beacon pointer for this BSS | 1641 | /* Initialize the current working beacon pointer for this BSS |
| @@ -1690,7 +1691,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, | |||
| 1690 | dev_err(priv->adapter->dev, | 1691 | dev_err(priv->adapter->dev, |
| 1691 | "%s: bytes left < IE length\n", | 1692 | "%s: bytes left < IE length\n", |
| 1692 | __func__); | 1693 | __func__); |
| 1693 | goto done; | 1694 | goto check_next_scan; |
| 1694 | } | 1695 | } |
| 1695 | if (element_id == WLAN_EID_DS_PARAMS) { | 1696 | if (element_id == WLAN_EID_DS_PARAMS) { |
| 1696 | channel = *(current_ptr + sizeof(struct ieee_types_header)); | 1697 | channel = *(current_ptr + sizeof(struct ieee_types_header)); |
| @@ -1753,6 +1754,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, | |||
| 1753 | } | 1754 | } |
| 1754 | } | 1755 | } |
| 1755 | 1756 | ||
| 1757 | check_next_scan: | ||
| 1756 | spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); | 1758 | spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); |
| 1757 | if (list_empty(&adapter->scan_pending_q)) { | 1759 | if (list_empty(&adapter->scan_pending_q)) { |
| 1758 | spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); | 1760 | spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); |
| @@ -1813,7 +1815,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, | |||
| 1813 | } | 1815 | } |
| 1814 | } | 1816 | } |
| 1815 | 1817 | ||
| 1816 | done: | ||
| 1817 | return ret; | 1818 | return ret; |
| 1818 | } | 1819 | } |
| 1819 | 1820 | ||
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index 4494d130b37c..0f8b05185eda 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c | |||
| @@ -1004,7 +1004,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) | |||
| 1004 | is_tx ? "Tx" : "Rx"); | 1004 | is_tx ? "Tx" : "Rx"); |
| 1005 | 1005 | ||
| 1006 | if (is_tx) { | 1006 | if (is_tx) { |
| 1007 | rtl_lps_leave(hw); | 1007 | schedule_work(&rtlpriv-> |
| 1008 | works.lps_leave_work); | ||
| 1008 | ppsc->last_delaylps_stamp_jiffies = | 1009 | ppsc->last_delaylps_stamp_jiffies = |
| 1009 | jiffies; | 1010 | jiffies; |
| 1010 | } | 1011 | } |
| @@ -1014,7 +1015,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) | |||
| 1014 | } | 1015 | } |
| 1015 | } else if (ETH_P_ARP == ether_type) { | 1016 | } else if (ETH_P_ARP == ether_type) { |
| 1016 | if (is_tx) { | 1017 | if (is_tx) { |
| 1017 | rtl_lps_leave(hw); | 1018 | schedule_work(&rtlpriv->works.lps_leave_work); |
| 1018 | ppsc->last_delaylps_stamp_jiffies = jiffies; | 1019 | ppsc->last_delaylps_stamp_jiffies = jiffies; |
| 1019 | } | 1020 | } |
| 1020 | 1021 | ||
| @@ -1024,7 +1025,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) | |||
| 1024 | "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx"); | 1025 | "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx"); |
| 1025 | 1026 | ||
| 1026 | if (is_tx) { | 1027 | if (is_tx) { |
| 1027 | rtl_lps_leave(hw); | 1028 | schedule_work(&rtlpriv->works.lps_leave_work); |
| 1028 | ppsc->last_delaylps_stamp_jiffies = jiffies; | 1029 | ppsc->last_delaylps_stamp_jiffies = jiffies; |
| 1029 | } | 1030 | } |
| 1030 | 1031 | ||
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index f2ecdeb3a90d..1535efda3d52 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c | |||
| @@ -542,8 +542,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
| 542 | WARN_ON(skb_queue_empty(&rx_queue)); | 542 | WARN_ON(skb_queue_empty(&rx_queue)); |
| 543 | while (!skb_queue_empty(&rx_queue)) { | 543 | while (!skb_queue_empty(&rx_queue)) { |
| 544 | _skb = skb_dequeue(&rx_queue); | 544 | _skb = skb_dequeue(&rx_queue); |
| 545 | _rtl_usb_rx_process_agg(hw, skb); | 545 | _rtl_usb_rx_process_agg(hw, _skb); |
| 546 | ieee80211_rx_irqsafe(hw, skb); | 546 | ieee80211_rx_irqsafe(hw, _skb); |
| 547 | } | 547 | } |
| 548 | } | 548 | } |
| 549 | 549 | ||
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 94b79c3338c4..9d7f1723dd8f 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h | |||
| @@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb); | |||
| 151 | /* Notify xenvif that ring now has space to send an skb to the frontend */ | 151 | /* Notify xenvif that ring now has space to send an skb to the frontend */ |
| 152 | void xenvif_notify_tx_completion(struct xenvif *vif); | 152 | void xenvif_notify_tx_completion(struct xenvif *vif); |
| 153 | 153 | ||
| 154 | /* Prevent the device from generating any further traffic. */ | ||
| 155 | void xenvif_carrier_off(struct xenvif *vif); | ||
| 156 | |||
| 154 | /* Returns number of ring slots required to send an skb to the frontend */ | 157 | /* Returns number of ring slots required to send an skb to the frontend */ |
| 155 | unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); | 158 | unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); |
| 156 | 159 | ||
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index b7d41f8c338a..b8c5193bd420 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
| @@ -343,17 +343,22 @@ err: | |||
| 343 | return err; | 343 | return err; |
| 344 | } | 344 | } |
| 345 | 345 | ||
| 346 | void xenvif_disconnect(struct xenvif *vif) | 346 | void xenvif_carrier_off(struct xenvif *vif) |
| 347 | { | 347 | { |
| 348 | struct net_device *dev = vif->dev; | 348 | struct net_device *dev = vif->dev; |
| 349 | if (netif_carrier_ok(dev)) { | 349 | |
| 350 | rtnl_lock(); | 350 | rtnl_lock(); |
| 351 | netif_carrier_off(dev); /* discard queued packets */ | 351 | netif_carrier_off(dev); /* discard queued packets */ |
| 352 | if (netif_running(dev)) | 352 | if (netif_running(dev)) |
| 353 | xenvif_down(vif); | 353 | xenvif_down(vif); |
| 354 | rtnl_unlock(); | 354 | rtnl_unlock(); |
| 355 | xenvif_put(vif); | 355 | xenvif_put(vif); |
| 356 | } | 356 | } |
| 357 | |||
| 358 | void xenvif_disconnect(struct xenvif *vif) | ||
| 359 | { | ||
| 360 | if (netif_carrier_ok(vif->dev)) | ||
| 361 | xenvif_carrier_off(vif); | ||
| 357 | 362 | ||
| 358 | atomic_dec(&vif->refcnt); | 363 | atomic_dec(&vif->refcnt); |
| 359 | wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); | 364 | wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index f2d6b78d901d..2b9520c46e97 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
| @@ -147,7 +147,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif) | |||
| 147 | atomic_dec(&netbk->netfront_count); | 147 | atomic_dec(&netbk->netfront_count); |
| 148 | } | 148 | } |
| 149 | 149 | ||
| 150 | static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx); | 150 | static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, |
| 151 | u8 status); | ||
| 151 | static void make_tx_response(struct xenvif *vif, | 152 | static void make_tx_response(struct xenvif *vif, |
| 152 | struct xen_netif_tx_request *txp, | 153 | struct xen_netif_tx_request *txp, |
| 153 | s8 st); | 154 | s8 st); |
| @@ -879,7 +880,7 @@ static void netbk_tx_err(struct xenvif *vif, | |||
| 879 | 880 | ||
| 880 | do { | 881 | do { |
| 881 | make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); | 882 | make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); |
| 882 | if (cons >= end) | 883 | if (cons == end) |
| 883 | break; | 884 | break; |
| 884 | txp = RING_GET_REQUEST(&vif->tx, cons++); | 885 | txp = RING_GET_REQUEST(&vif->tx, cons++); |
| 885 | } while (1); | 886 | } while (1); |
| @@ -888,6 +889,13 @@ static void netbk_tx_err(struct xenvif *vif, | |||
| 888 | xenvif_put(vif); | 889 | xenvif_put(vif); |
| 889 | } | 890 | } |
| 890 | 891 | ||
| 892 | static void netbk_fatal_tx_err(struct xenvif *vif) | ||
| 893 | { | ||
| 894 | netdev_err(vif->dev, "fatal error; disabling device\n"); | ||
| 895 | xenvif_carrier_off(vif); | ||
| 896 | xenvif_put(vif); | ||
| 897 | } | ||
| 898 | |||
| 891 | static int netbk_count_requests(struct xenvif *vif, | 899 | static int netbk_count_requests(struct xenvif *vif, |
| 892 | struct xen_netif_tx_request *first, | 900 | struct xen_netif_tx_request *first, |
| 893 | struct xen_netif_tx_request *txp, | 901 | struct xen_netif_tx_request *txp, |
| @@ -901,19 +909,22 @@ static int netbk_count_requests(struct xenvif *vif, | |||
| 901 | 909 | ||
| 902 | do { | 910 | do { |
| 903 | if (frags >= work_to_do) { | 911 | if (frags >= work_to_do) { |
| 904 | netdev_dbg(vif->dev, "Need more frags\n"); | 912 | netdev_err(vif->dev, "Need more frags\n"); |
| 913 | netbk_fatal_tx_err(vif); | ||
| 905 | return -frags; | 914 | return -frags; |
| 906 | } | 915 | } |
| 907 | 916 | ||
| 908 | if (unlikely(frags >= MAX_SKB_FRAGS)) { | 917 | if (unlikely(frags >= MAX_SKB_FRAGS)) { |
| 909 | netdev_dbg(vif->dev, "Too many frags\n"); | 918 | netdev_err(vif->dev, "Too many frags\n"); |
| 919 | netbk_fatal_tx_err(vif); | ||
| 910 | return -frags; | 920 | return -frags; |
| 911 | } | 921 | } |
| 912 | 922 | ||
| 913 | memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), | 923 | memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), |
| 914 | sizeof(*txp)); | 924 | sizeof(*txp)); |
| 915 | if (txp->size > first->size) { | 925 | if (txp->size > first->size) { |
| 916 | netdev_dbg(vif->dev, "Frags galore\n"); | 926 | netdev_err(vif->dev, "Frag is bigger than frame.\n"); |
| 927 | netbk_fatal_tx_err(vif); | ||
| 917 | return -frags; | 928 | return -frags; |
| 918 | } | 929 | } |
| 919 | 930 | ||
| @@ -921,8 +932,9 @@ static int netbk_count_requests(struct xenvif *vif, | |||
| 921 | frags++; | 932 | frags++; |
| 922 | 933 | ||
| 923 | if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { | 934 | if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { |
| 924 | netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n", | 935 | netdev_err(vif->dev, "txp->offset: %x, size: %u\n", |
| 925 | txp->offset, txp->size); | 936 | txp->offset, txp->size); |
| 937 | netbk_fatal_tx_err(vif); | ||
| 926 | return -frags; | 938 | return -frags; |
| 927 | } | 939 | } |
| 928 | } while ((txp++)->flags & XEN_NETTXF_more_data); | 940 | } while ((txp++)->flags & XEN_NETTXF_more_data); |
| @@ -966,7 +978,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, | |||
| 966 | pending_idx = netbk->pending_ring[index]; | 978 | pending_idx = netbk->pending_ring[index]; |
| 967 | page = xen_netbk_alloc_page(netbk, skb, pending_idx); | 979 | page = xen_netbk_alloc_page(netbk, skb, pending_idx); |
| 968 | if (!page) | 980 | if (!page) |
| 969 | return NULL; | 981 | goto err; |
| 970 | 982 | ||
| 971 | gop->source.u.ref = txp->gref; | 983 | gop->source.u.ref = txp->gref; |
| 972 | gop->source.domid = vif->domid; | 984 | gop->source.domid = vif->domid; |
| @@ -988,6 +1000,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, | |||
| 988 | } | 1000 | } |
| 989 | 1001 | ||
| 990 | return gop; | 1002 | return gop; |
| 1003 | err: | ||
| 1004 | /* Unwind, freeing all pages and sending error responses. */ | ||
| 1005 | while (i-- > start) { | ||
| 1006 | xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]), | ||
| 1007 | XEN_NETIF_RSP_ERROR); | ||
| 1008 | } | ||
| 1009 | /* The head too, if necessary. */ | ||
| 1010 | if (start) | ||
| 1011 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); | ||
| 1012 | |||
| 1013 | return NULL; | ||
| 991 | } | 1014 | } |
| 992 | 1015 | ||
| 993 | static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, | 1016 | static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, |
| @@ -996,30 +1019,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, | |||
| 996 | { | 1019 | { |
| 997 | struct gnttab_copy *gop = *gopp; | 1020 | struct gnttab_copy *gop = *gopp; |
| 998 | u16 pending_idx = *((u16 *)skb->data); | 1021 | u16 pending_idx = *((u16 *)skb->data); |
| 999 | struct pending_tx_info *pending_tx_info = netbk->pending_tx_info; | ||
| 1000 | struct xenvif *vif = pending_tx_info[pending_idx].vif; | ||
| 1001 | struct xen_netif_tx_request *txp; | ||
| 1002 | struct skb_shared_info *shinfo = skb_shinfo(skb); | 1022 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
| 1003 | int nr_frags = shinfo->nr_frags; | 1023 | int nr_frags = shinfo->nr_frags; |
| 1004 | int i, err, start; | 1024 | int i, err, start; |
| 1005 | 1025 | ||
| 1006 | /* Check status of header. */ | 1026 | /* Check status of header. */ |
| 1007 | err = gop->status; | 1027 | err = gop->status; |
| 1008 | if (unlikely(err)) { | 1028 | if (unlikely(err)) |
| 1009 | pending_ring_idx_t index; | 1029 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); |
| 1010 | index = pending_index(netbk->pending_prod++); | ||
| 1011 | txp = &pending_tx_info[pending_idx].req; | ||
| 1012 | make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); | ||
| 1013 | netbk->pending_ring[index] = pending_idx; | ||
| 1014 | xenvif_put(vif); | ||
| 1015 | } | ||
| 1016 | 1030 | ||
| 1017 | /* Skip first skb fragment if it is on same page as header fragment. */ | 1031 | /* Skip first skb fragment if it is on same page as header fragment. */ |
| 1018 | start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); | 1032 | start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); |
| 1019 | 1033 | ||
| 1020 | for (i = start; i < nr_frags; i++) { | 1034 | for (i = start; i < nr_frags; i++) { |
| 1021 | int j, newerr; | 1035 | int j, newerr; |
| 1022 | pending_ring_idx_t index; | ||
| 1023 | 1036 | ||
| 1024 | pending_idx = frag_get_pending_idx(&shinfo->frags[i]); | 1037 | pending_idx = frag_get_pending_idx(&shinfo->frags[i]); |
| 1025 | 1038 | ||
| @@ -1028,16 +1041,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, | |||
| 1028 | if (likely(!newerr)) { | 1041 | if (likely(!newerr)) { |
| 1029 | /* Had a previous error? Invalidate this fragment. */ | 1042 | /* Had a previous error? Invalidate this fragment. */ |
| 1030 | if (unlikely(err)) | 1043 | if (unlikely(err)) |
| 1031 | xen_netbk_idx_release(netbk, pending_idx); | 1044 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); |
| 1032 | continue; | 1045 | continue; |
| 1033 | } | 1046 | } |
| 1034 | 1047 | ||
| 1035 | /* Error on this fragment: respond to client with an error. */ | 1048 | /* Error on this fragment: respond to client with an error. */ |
| 1036 | txp = &netbk->pending_tx_info[pending_idx].req; | 1049 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); |
| 1037 | make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); | ||
| 1038 | index = pending_index(netbk->pending_prod++); | ||
| 1039 | netbk->pending_ring[index] = pending_idx; | ||
| 1040 | xenvif_put(vif); | ||
| 1041 | 1050 | ||
| 1042 | /* Not the first error? Preceding frags already invalidated. */ | 1051 | /* Not the first error? Preceding frags already invalidated. */ |
| 1043 | if (err) | 1052 | if (err) |
| @@ -1045,10 +1054,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, | |||
| 1045 | 1054 | ||
| 1046 | /* First error: invalidate header and preceding fragments. */ | 1055 | /* First error: invalidate header and preceding fragments. */ |
| 1047 | pending_idx = *((u16 *)skb->data); | 1056 | pending_idx = *((u16 *)skb->data); |
| 1048 | xen_netbk_idx_release(netbk, pending_idx); | 1057 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); |
| 1049 | for (j = start; j < i; j++) { | 1058 | for (j = start; j < i; j++) { |
| 1050 | pending_idx = frag_get_pending_idx(&shinfo->frags[j]); | 1059 | pending_idx = frag_get_pending_idx(&shinfo->frags[j]); |
| 1051 | xen_netbk_idx_release(netbk, pending_idx); | 1060 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); |
| 1052 | } | 1061 | } |
| 1053 | 1062 | ||
| 1054 | /* Remember the error: invalidate all subsequent fragments. */ | 1063 | /* Remember the error: invalidate all subsequent fragments. */ |
| @@ -1082,7 +1091,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb) | |||
| 1082 | 1091 | ||
| 1083 | /* Take an extra reference to offset xen_netbk_idx_release */ | 1092 | /* Take an extra reference to offset xen_netbk_idx_release */ |
| 1084 | get_page(netbk->mmap_pages[pending_idx]); | 1093 | get_page(netbk->mmap_pages[pending_idx]); |
| 1085 | xen_netbk_idx_release(netbk, pending_idx); | 1094 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); |
| 1086 | } | 1095 | } |
| 1087 | } | 1096 | } |
| 1088 | 1097 | ||
| @@ -1095,7 +1104,8 @@ static int xen_netbk_get_extras(struct xenvif *vif, | |||
| 1095 | 1104 | ||
| 1096 | do { | 1105 | do { |
| 1097 | if (unlikely(work_to_do-- <= 0)) { | 1106 | if (unlikely(work_to_do-- <= 0)) { |
| 1098 | netdev_dbg(vif->dev, "Missing extra info\n"); | 1107 | netdev_err(vif->dev, "Missing extra info\n"); |
| 1108 | netbk_fatal_tx_err(vif); | ||
| 1099 | return -EBADR; | 1109 | return -EBADR; |
| 1100 | } | 1110 | } |
| 1101 | 1111 | ||
| @@ -1104,8 +1114,9 @@ static int xen_netbk_get_extras(struct xenvif *vif, | |||
| 1104 | if (unlikely(!extra.type || | 1114 | if (unlikely(!extra.type || |
| 1105 | extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { | 1115 | extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { |
| 1106 | vif->tx.req_cons = ++cons; | 1116 | vif->tx.req_cons = ++cons; |
| 1107 | netdev_dbg(vif->dev, | 1117 | netdev_err(vif->dev, |
| 1108 | "Invalid extra type: %d\n", extra.type); | 1118 | "Invalid extra type: %d\n", extra.type); |
| 1119 | netbk_fatal_tx_err(vif); | ||
| 1109 | return -EINVAL; | 1120 | return -EINVAL; |
| 1110 | } | 1121 | } |
| 1111 | 1122 | ||
| @@ -1121,13 +1132,15 @@ static int netbk_set_skb_gso(struct xenvif *vif, | |||
| 1121 | struct xen_netif_extra_info *gso) | 1132 | struct xen_netif_extra_info *gso) |
| 1122 | { | 1133 | { |
| 1123 | if (!gso->u.gso.size) { | 1134 | if (!gso->u.gso.size) { |
| 1124 | netdev_dbg(vif->dev, "GSO size must not be zero.\n"); | 1135 | netdev_err(vif->dev, "GSO size must not be zero.\n"); |
| 1136 | netbk_fatal_tx_err(vif); | ||
| 1125 | return -EINVAL; | 1137 | return -EINVAL; |
| 1126 | } | 1138 | } |
| 1127 | 1139 | ||
| 1128 | /* Currently only TCPv4 S.O. is supported. */ | 1140 | /* Currently only TCPv4 S.O. is supported. */ |
| 1129 | if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { | 1141 | if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { |
| 1130 | netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); | 1142 | netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); |
| 1143 | netbk_fatal_tx_err(vif); | ||
| 1131 | return -EINVAL; | 1144 | return -EINVAL; |
| 1132 | } | 1145 | } |
| 1133 | 1146 | ||
| @@ -1264,9 +1277,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) | |||
| 1264 | 1277 | ||
| 1265 | /* Get a netif from the list with work to do. */ | 1278 | /* Get a netif from the list with work to do. */ |
| 1266 | vif = poll_net_schedule_list(netbk); | 1279 | vif = poll_net_schedule_list(netbk); |
| 1280 | /* This can sometimes happen because the test of | ||
| 1281 | * list_empty(net_schedule_list) at the top of the | ||
| 1282 | * loop is unlocked. Just go back and have another | ||
| 1283 | * look. | ||
| 1284 | */ | ||
| 1267 | if (!vif) | 1285 | if (!vif) |
| 1268 | continue; | 1286 | continue; |
| 1269 | 1287 | ||
| 1288 | if (vif->tx.sring->req_prod - vif->tx.req_cons > | ||
| 1289 | XEN_NETIF_TX_RING_SIZE) { | ||
| 1290 | netdev_err(vif->dev, | ||
| 1291 | "Impossible number of requests. " | ||
| 1292 | "req_prod %d, req_cons %d, size %ld\n", | ||
| 1293 | vif->tx.sring->req_prod, vif->tx.req_cons, | ||
| 1294 | XEN_NETIF_TX_RING_SIZE); | ||
| 1295 | netbk_fatal_tx_err(vif); | ||
| 1296 | continue; | ||
| 1297 | } | ||
| 1298 | |||
| 1270 | RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); | 1299 | RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); |
| 1271 | if (!work_to_do) { | 1300 | if (!work_to_do) { |
| 1272 | xenvif_put(vif); | 1301 | xenvif_put(vif); |
| @@ -1294,17 +1323,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) | |||
| 1294 | work_to_do = xen_netbk_get_extras(vif, extras, | 1323 | work_to_do = xen_netbk_get_extras(vif, extras, |
| 1295 | work_to_do); | 1324 | work_to_do); |
| 1296 | idx = vif->tx.req_cons; | 1325 | idx = vif->tx.req_cons; |
| 1297 | if (unlikely(work_to_do < 0)) { | 1326 | if (unlikely(work_to_do < 0)) |
| 1298 | netbk_tx_err(vif, &txreq, idx); | ||
| 1299 | continue; | 1327 | continue; |
| 1300 | } | ||
| 1301 | } | 1328 | } |
| 1302 | 1329 | ||
| 1303 | ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); | 1330 | ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); |
| 1304 | if (unlikely(ret < 0)) { | 1331 | if (unlikely(ret < 0)) |
| 1305 | netbk_tx_err(vif, &txreq, idx - ret); | ||
| 1306 | continue; | 1332 | continue; |
| 1307 | } | 1333 | |
| 1308 | idx += ret; | 1334 | idx += ret; |
| 1309 | 1335 | ||
| 1310 | if (unlikely(txreq.size < ETH_HLEN)) { | 1336 | if (unlikely(txreq.size < ETH_HLEN)) { |
| @@ -1316,11 +1342,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) | |||
| 1316 | 1342 | ||
| 1317 | /* No crossing a page as the payload mustn't fragment. */ | 1343 | /* No crossing a page as the payload mustn't fragment. */ |
| 1318 | if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { | 1344 | if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { |
| 1319 | netdev_dbg(vif->dev, | 1345 | netdev_err(vif->dev, |
| 1320 | "txreq.offset: %x, size: %u, end: %lu\n", | 1346 | "txreq.offset: %x, size: %u, end: %lu\n", |
| 1321 | txreq.offset, txreq.size, | 1347 | txreq.offset, txreq.size, |
| 1322 | (txreq.offset&~PAGE_MASK) + txreq.size); | 1348 | (txreq.offset&~PAGE_MASK) + txreq.size); |
| 1323 | netbk_tx_err(vif, &txreq, idx); | 1349 | netbk_fatal_tx_err(vif); |
| 1324 | continue; | 1350 | continue; |
| 1325 | } | 1351 | } |
| 1326 | 1352 | ||
| @@ -1348,8 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) | |||
| 1348 | gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; | 1374 | gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; |
| 1349 | 1375 | ||
| 1350 | if (netbk_set_skb_gso(vif, skb, gso)) { | 1376 | if (netbk_set_skb_gso(vif, skb, gso)) { |
| 1377 | /* Failure in netbk_set_skb_gso is fatal. */ | ||
| 1351 | kfree_skb(skb); | 1378 | kfree_skb(skb); |
| 1352 | netbk_tx_err(vif, &txreq, idx); | ||
| 1353 | continue; | 1379 | continue; |
| 1354 | } | 1380 | } |
| 1355 | } | 1381 | } |
| @@ -1448,7 +1474,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk) | |||
| 1448 | txp->size -= data_len; | 1474 | txp->size -= data_len; |
| 1449 | } else { | 1475 | } else { |
| 1450 | /* Schedule a response immediately. */ | 1476 | /* Schedule a response immediately. */ |
| 1451 | xen_netbk_idx_release(netbk, pending_idx); | 1477 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); |
| 1452 | } | 1478 | } |
| 1453 | 1479 | ||
| 1454 | if (txp->flags & XEN_NETTXF_csum_blank) | 1480 | if (txp->flags & XEN_NETTXF_csum_blank) |
| @@ -1500,7 +1526,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk) | |||
| 1500 | xen_netbk_tx_submit(netbk); | 1526 | xen_netbk_tx_submit(netbk); |
| 1501 | } | 1527 | } |
| 1502 | 1528 | ||
| 1503 | static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) | 1529 | static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, |
| 1530 | u8 status) | ||
| 1504 | { | 1531 | { |
| 1505 | struct xenvif *vif; | 1532 | struct xenvif *vif; |
| 1506 | struct pending_tx_info *pending_tx_info; | 1533 | struct pending_tx_info *pending_tx_info; |
| @@ -1514,7 +1541,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) | |||
| 1514 | 1541 | ||
| 1515 | vif = pending_tx_info->vif; | 1542 | vif = pending_tx_info->vif; |
| 1516 | 1543 | ||
| 1517 | make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY); | 1544 | make_tx_response(vif, &pending_tx_info->req, status); |
| 1518 | 1545 | ||
| 1519 | index = pending_index(netbk->pending_prod++); | 1546 | index = pending_index(netbk->pending_prod++); |
| 1520 | netbk->pending_ring[index] = pending_idx; | 1547 | netbk->pending_ring[index] = pending_idx; |
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index efaecefe3f8c..a5f3c8ca480e 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig | |||
| @@ -184,8 +184,8 @@ config PINCTRL_SAMSUNG | |||
| 184 | select PINMUX | 184 | select PINMUX |
| 185 | select PINCONF | 185 | select PINCONF |
| 186 | 186 | ||
| 187 | config PINCTRL_EXYNOS4 | 187 | config PINCTRL_EXYNOS |
| 188 | bool "Pinctrl driver data for Exynos4 SoC" | 188 | bool "Pinctrl driver data for Samsung EXYNOS SoCs" |
| 189 | depends on OF && GPIOLIB | 189 | depends on OF && GPIOLIB |
| 190 | select PINCTRL_SAMSUNG | 190 | select PINCTRL_SAMSUNG |
| 191 | 191 | ||
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index fc4606f27dc7..6e87e52eab5d 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile | |||
| @@ -36,7 +36,7 @@ obj-$(CONFIG_PINCTRL_TEGRA30) += pinctrl-tegra30.o | |||
| 36 | obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o | 36 | obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o |
| 37 | obj-$(CONFIG_PINCTRL_COH901) += pinctrl-coh901.o | 37 | obj-$(CONFIG_PINCTRL_COH901) += pinctrl-coh901.o |
| 38 | obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o | 38 | obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o |
| 39 | obj-$(CONFIG_PINCTRL_EXYNOS4) += pinctrl-exynos.o | 39 | obj-$(CONFIG_PINCTRL_EXYNOS) += pinctrl-exynos.o |
| 40 | obj-$(CONFIG_PINCTRL_EXYNOS5440) += pinctrl-exynos5440.o | 40 | obj-$(CONFIG_PINCTRL_EXYNOS5440) += pinctrl-exynos5440.o |
| 41 | obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o | 41 | obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o |
| 42 | obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o | 42 | obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o |
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c index 498b2ba905de..d02498b30c6e 100644 --- a/drivers/pinctrl/pinctrl-sirf.c +++ b/drivers/pinctrl/pinctrl-sirf.c | |||
| @@ -1246,6 +1246,22 @@ static void __iomem *sirfsoc_rsc_of_iomap(void) | |||
| 1246 | return of_iomap(np, 0); | 1246 | return of_iomap(np, 0); |
| 1247 | } | 1247 | } |
| 1248 | 1248 | ||
| 1249 | static int sirfsoc_gpio_of_xlate(struct gpio_chip *gc, | ||
| 1250 | const struct of_phandle_args *gpiospec, | ||
| 1251 | u32 *flags) | ||
| 1252 | { | ||
| 1253 | if (gpiospec->args[0] > SIRFSOC_GPIO_NO_OF_BANKS * SIRFSOC_GPIO_BANK_SIZE) | ||
| 1254 | return -EINVAL; | ||
| 1255 | |||
| 1256 | if (gc != &sgpio_bank[gpiospec->args[0] / SIRFSOC_GPIO_BANK_SIZE].chip.gc) | ||
| 1257 | return -EINVAL; | ||
| 1258 | |||
| 1259 | if (flags) | ||
| 1260 | *flags = gpiospec->args[1]; | ||
| 1261 | |||
| 1262 | return gpiospec->args[0] % SIRFSOC_GPIO_BANK_SIZE; | ||
| 1263 | } | ||
| 1264 | |||
| 1249 | static int sirfsoc_pinmux_probe(struct platform_device *pdev) | 1265 | static int sirfsoc_pinmux_probe(struct platform_device *pdev) |
| 1250 | { | 1266 | { |
| 1251 | int ret; | 1267 | int ret; |
| @@ -1736,6 +1752,8 @@ static int sirfsoc_gpio_probe(struct device_node *np) | |||
| 1736 | bank->chip.gc.ngpio = SIRFSOC_GPIO_BANK_SIZE; | 1752 | bank->chip.gc.ngpio = SIRFSOC_GPIO_BANK_SIZE; |
| 1737 | bank->chip.gc.label = kstrdup(np->full_name, GFP_KERNEL); | 1753 | bank->chip.gc.label = kstrdup(np->full_name, GFP_KERNEL); |
| 1738 | bank->chip.gc.of_node = np; | 1754 | bank->chip.gc.of_node = np; |
| 1755 | bank->chip.gc.of_xlate = sirfsoc_gpio_of_xlate; | ||
| 1756 | bank->chip.gc.of_gpio_n_cells = 2; | ||
| 1739 | bank->chip.regs = regs; | 1757 | bank->chip.regs = regs; |
| 1740 | bank->id = i; | 1758 | bank->id = i; |
| 1741 | bank->is_marco = is_marco; | 1759 | bank->is_marco = is_marco; |
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c index b85040caaea3..cca18a3c0294 100644 --- a/drivers/regulator/max77686.c +++ b/drivers/regulator/max77686.c | |||
| @@ -379,9 +379,10 @@ static struct regulator_desc regulators[] = { | |||
| 379 | }; | 379 | }; |
| 380 | 380 | ||
| 381 | #ifdef CONFIG_OF | 381 | #ifdef CONFIG_OF |
| 382 | static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, | 382 | static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev, |
| 383 | struct max77686_platform_data *pdata) | 383 | struct max77686_platform_data *pdata) |
| 384 | { | 384 | { |
| 385 | struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent); | ||
| 385 | struct device_node *pmic_np, *regulators_np; | 386 | struct device_node *pmic_np, *regulators_np; |
| 386 | struct max77686_regulator_data *rdata; | 387 | struct max77686_regulator_data *rdata; |
| 387 | struct of_regulator_match rmatch; | 388 | struct of_regulator_match rmatch; |
| @@ -390,15 +391,15 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, | |||
| 390 | pmic_np = iodev->dev->of_node; | 391 | pmic_np = iodev->dev->of_node; |
| 391 | regulators_np = of_find_node_by_name(pmic_np, "voltage-regulators"); | 392 | regulators_np = of_find_node_by_name(pmic_np, "voltage-regulators"); |
| 392 | if (!regulators_np) { | 393 | if (!regulators_np) { |
| 393 | dev_err(iodev->dev, "could not find regulators sub-node\n"); | 394 | dev_err(&pdev->dev, "could not find regulators sub-node\n"); |
| 394 | return -EINVAL; | 395 | return -EINVAL; |
| 395 | } | 396 | } |
| 396 | 397 | ||
| 397 | pdata->num_regulators = ARRAY_SIZE(regulators); | 398 | pdata->num_regulators = ARRAY_SIZE(regulators); |
| 398 | rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) * | 399 | rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) * |
| 399 | pdata->num_regulators, GFP_KERNEL); | 400 | pdata->num_regulators, GFP_KERNEL); |
| 400 | if (!rdata) { | 401 | if (!rdata) { |
| 401 | dev_err(iodev->dev, | 402 | dev_err(&pdev->dev, |
| 402 | "could not allocate memory for regulator data\n"); | 403 | "could not allocate memory for regulator data\n"); |
| 403 | return -ENOMEM; | 404 | return -ENOMEM; |
| 404 | } | 405 | } |
| @@ -407,7 +408,7 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, | |||
| 407 | rmatch.name = regulators[i].name; | 408 | rmatch.name = regulators[i].name; |
| 408 | rmatch.init_data = NULL; | 409 | rmatch.init_data = NULL; |
| 409 | rmatch.of_node = NULL; | 410 | rmatch.of_node = NULL; |
| 410 | of_regulator_match(iodev->dev, regulators_np, &rmatch, 1); | 411 | of_regulator_match(&pdev->dev, regulators_np, &rmatch, 1); |
| 411 | rdata[i].initdata = rmatch.init_data; | 412 | rdata[i].initdata = rmatch.init_data; |
| 412 | rdata[i].of_node = rmatch.of_node; | 413 | rdata[i].of_node = rmatch.of_node; |
| 413 | } | 414 | } |
| @@ -417,7 +418,7 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, | |||
| 417 | return 0; | 418 | return 0; |
| 418 | } | 419 | } |
| 419 | #else | 420 | #else |
| 420 | static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, | 421 | static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev, |
| 421 | struct max77686_platform_data *pdata) | 422 | struct max77686_platform_data *pdata) |
| 422 | { | 423 | { |
| 423 | return 0; | 424 | return 0; |
| @@ -440,7 +441,7 @@ static int max77686_pmic_probe(struct platform_device *pdev) | |||
| 440 | } | 441 | } |
| 441 | 442 | ||
| 442 | if (iodev->dev->of_node) { | 443 | if (iodev->dev->of_node) { |
| 443 | ret = max77686_pmic_dt_parse_pdata(iodev, pdata); | 444 | ret = max77686_pmic_dt_parse_pdata(pdev, pdata); |
| 444 | if (ret) | 445 | if (ret) |
| 445 | return ret; | 446 | return ret; |
| 446 | } | 447 | } |
diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c index d1a77512d83e..d40cf7fdb546 100644 --- a/drivers/regulator/max8907-regulator.c +++ b/drivers/regulator/max8907-regulator.c | |||
| @@ -237,8 +237,7 @@ static int max8907_regulator_parse_dt(struct platform_device *pdev) | |||
| 237 | return -EINVAL; | 237 | return -EINVAL; |
| 238 | } | 238 | } |
| 239 | 239 | ||
| 240 | ret = of_regulator_match(pdev->dev.parent, regulators, | 240 | ret = of_regulator_match(&pdev->dev, regulators, max8907_matches, |
| 241 | max8907_matches, | ||
| 242 | ARRAY_SIZE(max8907_matches)); | 241 | ARRAY_SIZE(max8907_matches)); |
| 243 | if (ret < 0) { | 242 | if (ret < 0) { |
| 244 | dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", | 243 | dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", |
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c index 02be7fcae32f..836908ce505e 100644 --- a/drivers/regulator/max8997.c +++ b/drivers/regulator/max8997.c | |||
| @@ -934,7 +934,7 @@ static struct regulator_desc regulators[] = { | |||
| 934 | }; | 934 | }; |
| 935 | 935 | ||
| 936 | #ifdef CONFIG_OF | 936 | #ifdef CONFIG_OF |
| 937 | static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev, | 937 | static int max8997_pmic_dt_parse_dvs_gpio(struct platform_device *pdev, |
| 938 | struct max8997_platform_data *pdata, | 938 | struct max8997_platform_data *pdata, |
| 939 | struct device_node *pmic_np) | 939 | struct device_node *pmic_np) |
| 940 | { | 940 | { |
| @@ -944,7 +944,7 @@ static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev, | |||
| 944 | gpio = of_get_named_gpio(pmic_np, | 944 | gpio = of_get_named_gpio(pmic_np, |
| 945 | "max8997,pmic-buck125-dvs-gpios", i); | 945 | "max8997,pmic-buck125-dvs-gpios", i); |
| 946 | if (!gpio_is_valid(gpio)) { | 946 | if (!gpio_is_valid(gpio)) { |
| 947 | dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio); | 947 | dev_err(&pdev->dev, "invalid gpio[%d]: %d\n", i, gpio); |
| 948 | return -EINVAL; | 948 | return -EINVAL; |
| 949 | } | 949 | } |
| 950 | pdata->buck125_gpios[i] = gpio; | 950 | pdata->buck125_gpios[i] = gpio; |
| @@ -952,22 +952,23 @@ static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev, | |||
| 952 | return 0; | 952 | return 0; |
| 953 | } | 953 | } |
| 954 | 954 | ||
| 955 | static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, | 955 | static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev, |
| 956 | struct max8997_platform_data *pdata) | 956 | struct max8997_platform_data *pdata) |
| 957 | { | 957 | { |
| 958 | struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent); | ||
| 958 | struct device_node *pmic_np, *regulators_np, *reg_np; | 959 | struct device_node *pmic_np, *regulators_np, *reg_np; |
| 959 | struct max8997_regulator_data *rdata; | 960 | struct max8997_regulator_data *rdata; |
| 960 | unsigned int i, dvs_voltage_nr = 1, ret; | 961 | unsigned int i, dvs_voltage_nr = 1, ret; |
| 961 | 962 | ||
| 962 | pmic_np = iodev->dev->of_node; | 963 | pmic_np = iodev->dev->of_node; |
| 963 | if (!pmic_np) { | 964 | if (!pmic_np) { |
| 964 | dev_err(iodev->dev, "could not find pmic sub-node\n"); | 965 | dev_err(&pdev->dev, "could not find pmic sub-node\n"); |
| 965 | return -ENODEV; | 966 | return -ENODEV; |
| 966 | } | 967 | } |
| 967 | 968 | ||
| 968 | regulators_np = of_find_node_by_name(pmic_np, "regulators"); | 969 | regulators_np = of_find_node_by_name(pmic_np, "regulators"); |
| 969 | if (!regulators_np) { | 970 | if (!regulators_np) { |
| 970 | dev_err(iodev->dev, "could not find regulators sub-node\n"); | 971 | dev_err(&pdev->dev, "could not find regulators sub-node\n"); |
| 971 | return -EINVAL; | 972 | return -EINVAL; |
| 972 | } | 973 | } |
| 973 | 974 | ||
| @@ -976,11 +977,10 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, | |||
| 976 | for_each_child_of_node(regulators_np, reg_np) | 977 | for_each_child_of_node(regulators_np, reg_np) |
| 977 | pdata->num_regulators++; | 978 | pdata->num_regulators++; |
| 978 | 979 | ||
| 979 | rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) * | 980 | rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) * |
| 980 | pdata->num_regulators, GFP_KERNEL); | 981 | pdata->num_regulators, GFP_KERNEL); |
| 981 | if (!rdata) { | 982 | if (!rdata) { |
| 982 | dev_err(iodev->dev, "could not allocate memory for " | 983 | dev_err(&pdev->dev, "could not allocate memory for regulator data\n"); |
| 983 | "regulator data\n"); | ||
| 984 | return -ENOMEM; | 984 | return -ENOMEM; |
| 985 | } | 985 | } |
| 986 | 986 | ||
| @@ -991,14 +991,14 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, | |||
| 991 | break; | 991 | break; |
| 992 | 992 | ||
| 993 | if (i == ARRAY_SIZE(regulators)) { | 993 | if (i == ARRAY_SIZE(regulators)) { |
| 994 | dev_warn(iodev->dev, "don't know how to configure " | 994 | dev_warn(&pdev->dev, "don't know how to configure regulator %s\n", |
| 995 | "regulator %s\n", reg_np->name); | 995 | reg_np->name); |
| 996 | continue; | 996 | continue; |
| 997 | } | 997 | } |
| 998 | 998 | ||
| 999 | rdata->id = i; | 999 | rdata->id = i; |
| 1000 | rdata->initdata = of_get_regulator_init_data( | 1000 | rdata->initdata = of_get_regulator_init_data(&pdev->dev, |
| 1001 | iodev->dev, reg_np); | 1001 | reg_np); |
| 1002 | rdata->reg_node = reg_np; | 1002 | rdata->reg_node = reg_np; |
| 1003 | rdata++; | 1003 | rdata++; |
| 1004 | } | 1004 | } |
| @@ -1014,7 +1014,7 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, | |||
| 1014 | 1014 | ||
| 1015 | if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs || | 1015 | if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs || |
| 1016 | pdata->buck5_gpiodvs) { | 1016 | pdata->buck5_gpiodvs) { |
| 1017 | ret = max8997_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np); | 1017 | ret = max8997_pmic_dt_parse_dvs_gpio(pdev, pdata, pmic_np); |
| 1018 | if (ret) | 1018 | if (ret) |
| 1019 | return -EINVAL; | 1019 | return -EINVAL; |
| 1020 | 1020 | ||
| @@ -1025,8 +1025,7 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, | |||
| 1025 | } else { | 1025 | } else { |
| 1026 | if (pdata->buck125_default_idx >= 8) { | 1026 | if (pdata->buck125_default_idx >= 8) { |
| 1027 | pdata->buck125_default_idx = 0; | 1027 | pdata->buck125_default_idx = 0; |
| 1028 | dev_info(iodev->dev, "invalid value for " | 1028 | dev_info(&pdev->dev, "invalid value for default dvs index, using 0 instead\n"); |
| 1029 | "default dvs index, using 0 instead\n"); | ||
| 1030 | } | 1029 | } |
| 1031 | } | 1030 | } |
| 1032 | 1031 | ||
| @@ -1040,28 +1039,28 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, | |||
| 1040 | if (of_property_read_u32_array(pmic_np, | 1039 | if (of_property_read_u32_array(pmic_np, |
| 1041 | "max8997,pmic-buck1-dvs-voltage", | 1040 | "max8997,pmic-buck1-dvs-voltage", |
| 1042 | pdata->buck1_voltage, dvs_voltage_nr)) { | 1041 | pdata->buck1_voltage, dvs_voltage_nr)) { |
| 1043 | dev_err(iodev->dev, "buck1 voltages not specified\n"); | 1042 | dev_err(&pdev->dev, "buck1 voltages not specified\n"); |
| 1044 | return -EINVAL; | 1043 | return -EINVAL; |
| 1045 | } | 1044 | } |
| 1046 | 1045 | ||
| 1047 | if (of_property_read_u32_array(pmic_np, | 1046 | if (of_property_read_u32_array(pmic_np, |
| 1048 | "max8997,pmic-buck2-dvs-voltage", | 1047 | "max8997,pmic-buck2-dvs-voltage", |
| 1049 | pdata->buck2_voltage, dvs_voltage_nr)) { | 1048 | pdata->buck2_voltage, dvs_voltage_nr)) { |
| 1050 | dev_err(iodev->dev, "buck2 voltages not specified\n"); | 1049 | dev_err(&pdev->dev, "buck2 voltages not specified\n"); |
| 1051 | return -EINVAL; | 1050 | return -EINVAL; |
| 1052 | } | 1051 | } |
| 1053 | 1052 | ||
| 1054 | if (of_property_read_u32_array(pmic_np, | 1053 | if (of_property_read_u32_array(pmic_np, |
| 1055 | "max8997,pmic-buck5-dvs-voltage", | 1054 | "max8997,pmic-buck5-dvs-voltage", |
| 1056 | pdata->buck5_voltage, dvs_voltage_nr)) { | 1055 | pdata->buck5_voltage, dvs_voltage_nr)) { |
| 1057 | dev_err(iodev->dev, "buck5 voltages not specified\n"); | 1056 | dev_err(&pdev->dev, "buck5 voltages not specified\n"); |
| 1058 | return -EINVAL; | 1057 | return -EINVAL; |
| 1059 | } | 1058 | } |
| 1060 | 1059 | ||
| 1061 | return 0; | 1060 | return 0; |
| 1062 | } | 1061 | } |
| 1063 | #else | 1062 | #else |
| 1064 | static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, | 1063 | static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev, |
| 1065 | struct max8997_platform_data *pdata) | 1064 | struct max8997_platform_data *pdata) |
| 1066 | { | 1065 | { |
| 1067 | return 0; | 1066 | return 0; |
| @@ -1085,7 +1084,7 @@ static int max8997_pmic_probe(struct platform_device *pdev) | |||
| 1085 | } | 1084 | } |
| 1086 | 1085 | ||
| 1087 | if (iodev->dev->of_node) { | 1086 | if (iodev->dev->of_node) { |
| 1088 | ret = max8997_pmic_dt_parse_pdata(iodev, pdata); | 1087 | ret = max8997_pmic_dt_parse_pdata(pdev, pdata); |
| 1089 | if (ret) | 1088 | if (ret) |
| 1090 | return ret; | 1089 | return ret; |
| 1091 | } | 1090 | } |
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c index 1f0df4046b86..0a8dd1cbee6f 100644 --- a/drivers/regulator/max8998.c +++ b/drivers/regulator/max8998.c | |||
| @@ -65,7 +65,7 @@ static const struct voltage_map_desc ldo9_voltage_map_desc = { | |||
| 65 | .min = 2800000, .step = 100000, .max = 3100000, | 65 | .min = 2800000, .step = 100000, .max = 3100000, |
| 66 | }; | 66 | }; |
| 67 | static const struct voltage_map_desc ldo10_voltage_map_desc = { | 67 | static const struct voltage_map_desc ldo10_voltage_map_desc = { |
| 68 | .min = 95000, .step = 50000, .max = 1300000, | 68 | .min = 950000, .step = 50000, .max = 1300000, |
| 69 | }; | 69 | }; |
| 70 | static const struct voltage_map_desc ldo1213_voltage_map_desc = { | 70 | static const struct voltage_map_desc ldo1213_voltage_map_desc = { |
| 71 | .min = 800000, .step = 100000, .max = 3300000, | 71 | .min = 800000, .step = 100000, .max = 3300000, |
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 6f684916fd79..66ca769287ab 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c | |||
| @@ -120,6 +120,12 @@ int of_regulator_match(struct device *dev, struct device_node *node, | |||
| 120 | if (!dev || !node) | 120 | if (!dev || !node) |
| 121 | return -EINVAL; | 121 | return -EINVAL; |
| 122 | 122 | ||
| 123 | for (i = 0; i < num_matches; i++) { | ||
| 124 | struct of_regulator_match *match = &matches[i]; | ||
| 125 | match->init_data = NULL; | ||
| 126 | match->of_node = NULL; | ||
| 127 | } | ||
| 128 | |||
| 123 | for_each_child_of_node(node, child) { | 129 | for_each_child_of_node(node, child) { |
| 124 | name = of_get_property(child, | 130 | name = of_get_property(child, |
| 125 | "regulator-compatible", NULL); | 131 | "regulator-compatible", NULL); |
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index bd062a2ffbe2..cd9ea2ea1826 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c | |||
| @@ -174,9 +174,9 @@ static struct regulator_ops s2mps11_buck_ops = { | |||
| 174 | .min_uV = S2MPS11_BUCK_MIN2, \ | 174 | .min_uV = S2MPS11_BUCK_MIN2, \ |
| 175 | .uV_step = S2MPS11_BUCK_STEP2, \ | 175 | .uV_step = S2MPS11_BUCK_STEP2, \ |
| 176 | .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \ | 176 | .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \ |
| 177 | .vsel_reg = S2MPS11_REG_B9CTRL2, \ | 177 | .vsel_reg = S2MPS11_REG_B10CTRL2, \ |
| 178 | .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \ | 178 | .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \ |
| 179 | .enable_reg = S2MPS11_REG_B9CTRL1, \ | 179 | .enable_reg = S2MPS11_REG_B10CTRL1, \ |
| 180 | .enable_mask = S2MPS11_ENABLE_MASK \ | 180 | .enable_mask = S2MPS11_ENABLE_MASK \ |
| 181 | } | 181 | } |
| 182 | 182 | ||
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c index 73dce7664126..df395187c063 100644 --- a/drivers/regulator/tps65217-regulator.c +++ b/drivers/regulator/tps65217-regulator.c | |||
| @@ -305,8 +305,8 @@ static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev) | |||
| 305 | if (!regs) | 305 | if (!regs) |
| 306 | return NULL; | 306 | return NULL; |
| 307 | 307 | ||
| 308 | count = of_regulator_match(pdev->dev.parent, regs, | 308 | count = of_regulator_match(&pdev->dev, regs, reg_matches, |
| 309 | reg_matches, TPS65217_NUM_REGULATOR); | 309 | TPS65217_NUM_REGULATOR); |
| 310 | of_node_put(regs); | 310 | of_node_put(regs); |
| 311 | if ((count < 0) || (count > TPS65217_NUM_REGULATOR)) | 311 | if ((count < 0) || (count > TPS65217_NUM_REGULATOR)) |
| 312 | return NULL; | 312 | return NULL; |
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c index 59c3770fa77d..b0e4c0bc85c3 100644 --- a/drivers/regulator/tps65910-regulator.c +++ b/drivers/regulator/tps65910-regulator.c | |||
| @@ -998,7 +998,7 @@ static struct tps65910_board *tps65910_parse_dt_reg_data( | |||
| 998 | return NULL; | 998 | return NULL; |
| 999 | } | 999 | } |
| 1000 | 1000 | ||
| 1001 | ret = of_regulator_match(pdev->dev.parent, regulators, matches, count); | 1001 | ret = of_regulator_match(&pdev->dev, regulators, matches, count); |
| 1002 | if (ret < 0) { | 1002 | if (ret < 0) { |
| 1003 | dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", | 1003 | dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", |
| 1004 | ret); | 1004 | ret); |
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index 10c1a3454e48..81c5077feff3 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c | |||
| @@ -350,7 +350,9 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 350 | /* Enable the clockwatch on ST Variants */ | 350 | /* Enable the clockwatch on ST Variants */ |
| 351 | if (vendor->clockwatch) | 351 | if (vendor->clockwatch) |
| 352 | data |= RTC_CR_CWEN; | 352 | data |= RTC_CR_CWEN; |
| 353 | writel(data | RTC_CR_EN, ldata->base + RTC_CR); | 353 | else |
| 354 | data |= RTC_CR_EN; | ||
| 355 | writel(data, ldata->base + RTC_CR); | ||
| 354 | 356 | ||
| 355 | /* | 357 | /* |
| 356 | * On ST PL031 variants, the RTC reset value does not provide correct | 358 | * On ST PL031 variants, the RTC reset value does not provide correct |
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c index 97ac0a38e3d0..eb2753008ef0 100644 --- a/drivers/ssb/driver_gpio.c +++ b/drivers/ssb/driver_gpio.c | |||
| @@ -174,3 +174,15 @@ int ssb_gpio_init(struct ssb_bus *bus) | |||
| 174 | 174 | ||
| 175 | return -1; | 175 | return -1; |
| 176 | } | 176 | } |
| 177 | |||
| 178 | int ssb_gpio_unregister(struct ssb_bus *bus) | ||
| 179 | { | ||
| 180 | if (ssb_chipco_available(&bus->chipco) || | ||
| 181 | ssb_extif_available(&bus->extif)) { | ||
| 182 | return gpiochip_remove(&bus->gpio); | ||
| 183 | } else { | ||
| 184 | SSB_WARN_ON(1); | ||
| 185 | } | ||
| 186 | |||
| 187 | return -1; | ||
| 188 | } | ||
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index 772ad9b5c304..24dc331b4701 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c | |||
| @@ -443,6 +443,15 @@ static void ssb_devices_unregister(struct ssb_bus *bus) | |||
| 443 | 443 | ||
| 444 | void ssb_bus_unregister(struct ssb_bus *bus) | 444 | void ssb_bus_unregister(struct ssb_bus *bus) |
| 445 | { | 445 | { |
| 446 | int err; | ||
| 447 | |||
| 448 | err = ssb_gpio_unregister(bus); | ||
| 449 | if (err == -EBUSY) | ||
| 450 | ssb_dprintk(KERN_ERR PFX "Some GPIOs are still in use.\n"); | ||
| 451 | else if (err) | ||
| 452 | ssb_dprintk(KERN_ERR PFX | ||
| 453 | "Can not unregister GPIO driver: %i\n", err); | ||
| 454 | |||
| 446 | ssb_buses_lock(); | 455 | ssb_buses_lock(); |
| 447 | ssb_devices_unregister(bus); | 456 | ssb_devices_unregister(bus); |
| 448 | list_del(&bus->list); | 457 | list_del(&bus->list); |
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h index 6c10b66c796c..da38305a2d22 100644 --- a/drivers/ssb/ssb_private.h +++ b/drivers/ssb/ssb_private.h | |||
| @@ -252,11 +252,16 @@ static inline void ssb_extif_init(struct ssb_extif *extif) | |||
| 252 | 252 | ||
| 253 | #ifdef CONFIG_SSB_DRIVER_GPIO | 253 | #ifdef CONFIG_SSB_DRIVER_GPIO |
| 254 | extern int ssb_gpio_init(struct ssb_bus *bus); | 254 | extern int ssb_gpio_init(struct ssb_bus *bus); |
| 255 | extern int ssb_gpio_unregister(struct ssb_bus *bus); | ||
| 255 | #else /* CONFIG_SSB_DRIVER_GPIO */ | 256 | #else /* CONFIG_SSB_DRIVER_GPIO */ |
| 256 | static inline int ssb_gpio_init(struct ssb_bus *bus) | 257 | static inline int ssb_gpio_init(struct ssb_bus *bus) |
| 257 | { | 258 | { |
| 258 | return -ENOTSUPP; | 259 | return -ENOTSUPP; |
| 259 | } | 260 | } |
| 261 | static inline int ssb_gpio_unregister(struct ssb_bus *bus) | ||
| 262 | { | ||
| 263 | return 0; | ||
| 264 | } | ||
| 260 | #endif /* CONFIG_SSB_DRIVER_GPIO */ | 265 | #endif /* CONFIG_SSB_DRIVER_GPIO */ |
| 261 | 266 | ||
| 262 | #endif /* LINUX_SSB_PRIVATE_H_ */ | 267 | #endif /* LINUX_SSB_PRIVATE_H_ */ |
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 4225d5e72131..8e64adf8e4d5 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | #include <asm/unaligned.h> | 39 | #include <asm/unaligned.h> |
| 40 | #include <linux/platform_device.h> | 40 | #include <linux/platform_device.h> |
| 41 | #include <linux/workqueue.h> | 41 | #include <linux/workqueue.h> |
| 42 | #include <linux/pm_runtime.h> | ||
| 42 | 43 | ||
| 43 | #include <linux/usb.h> | 44 | #include <linux/usb.h> |
| 44 | #include <linux/usb/hcd.h> | 45 | #include <linux/usb/hcd.h> |
| @@ -1025,6 +1026,49 @@ static int register_root_hub(struct usb_hcd *hcd) | |||
| 1025 | return retval; | 1026 | return retval; |
| 1026 | } | 1027 | } |
| 1027 | 1028 | ||
| 1029 | /* | ||
| 1030 | * usb_hcd_start_port_resume - a root-hub port is sending a resume signal | ||
| 1031 | * @bus: the bus which the root hub belongs to | ||
| 1032 | * @portnum: the port which is being resumed | ||
| 1033 | * | ||
| 1034 | * HCDs should call this function when they know that a resume signal is | ||
| 1035 | * being sent to a root-hub port. The root hub will be prevented from | ||
| 1036 | * going into autosuspend until usb_hcd_end_port_resume() is called. | ||
| 1037 | * | ||
| 1038 | * The bus's private lock must be held by the caller. | ||
| 1039 | */ | ||
| 1040 | void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum) | ||
| 1041 | { | ||
| 1042 | unsigned bit = 1 << portnum; | ||
| 1043 | |||
| 1044 | if (!(bus->resuming_ports & bit)) { | ||
| 1045 | bus->resuming_ports |= bit; | ||
| 1046 | pm_runtime_get_noresume(&bus->root_hub->dev); | ||
| 1047 | } | ||
| 1048 | } | ||
| 1049 | EXPORT_SYMBOL_GPL(usb_hcd_start_port_resume); | ||
| 1050 | |||
| 1051 | /* | ||
| 1052 | * usb_hcd_end_port_resume - a root-hub port has stopped sending a resume signal | ||
| 1053 | * @bus: the bus which the root hub belongs to | ||
| 1054 | * @portnum: the port which is being resumed | ||
| 1055 | * | ||
| 1056 | * HCDs should call this function when they know that a resume signal has | ||
| 1057 | * stopped being sent to a root-hub port. The root hub will be allowed to | ||
| 1058 | * autosuspend again. | ||
| 1059 | * | ||
| 1060 | * The bus's private lock must be held by the caller. | ||
| 1061 | */ | ||
| 1062 | void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum) | ||
| 1063 | { | ||
| 1064 | unsigned bit = 1 << portnum; | ||
| 1065 | |||
| 1066 | if (bus->resuming_ports & bit) { | ||
| 1067 | bus->resuming_ports &= ~bit; | ||
| 1068 | pm_runtime_put_noidle(&bus->root_hub->dev); | ||
| 1069 | } | ||
| 1070 | } | ||
| 1071 | EXPORT_SYMBOL_GPL(usb_hcd_end_port_resume); | ||
| 1028 | 1072 | ||
| 1029 | /*-------------------------------------------------------------------------*/ | 1073 | /*-------------------------------------------------------------------------*/ |
| 1030 | 1074 | ||
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 957ed2c41482..cbf7168e3ce7 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
| @@ -2838,6 +2838,23 @@ void usb_enable_ltm(struct usb_device *udev) | |||
| 2838 | EXPORT_SYMBOL_GPL(usb_enable_ltm); | 2838 | EXPORT_SYMBOL_GPL(usb_enable_ltm); |
| 2839 | 2839 | ||
| 2840 | #ifdef CONFIG_USB_SUSPEND | 2840 | #ifdef CONFIG_USB_SUSPEND |
| 2841 | /* | ||
| 2842 | * usb_disable_function_remotewakeup - disable usb3.0 | ||
| 2843 | * device's function remote wakeup | ||
| 2844 | * @udev: target device | ||
| 2845 | * | ||
| 2846 | * Assume there's only one function on the USB 3.0 | ||
| 2847 | * device and disable remote wake for the first | ||
| 2848 | * interface. FIXME if the interface association | ||
| 2849 | * descriptor shows there's more than one function. | ||
| 2850 | */ | ||
| 2851 | static int usb_disable_function_remotewakeup(struct usb_device *udev) | ||
| 2852 | { | ||
| 2853 | return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | ||
| 2854 | USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE, | ||
| 2855 | USB_INTRF_FUNC_SUSPEND, 0, NULL, 0, | ||
| 2856 | USB_CTRL_SET_TIMEOUT); | ||
| 2857 | } | ||
| 2841 | 2858 | ||
| 2842 | /* | 2859 | /* |
| 2843 | * usb_port_suspend - suspend a usb device's upstream port | 2860 | * usb_port_suspend - suspend a usb device's upstream port |
| @@ -2955,12 +2972,19 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) | |||
| 2955 | dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n", | 2972 | dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n", |
| 2956 | port1, status); | 2973 | port1, status); |
| 2957 | /* paranoia: "should not happen" */ | 2974 | /* paranoia: "should not happen" */ |
| 2958 | if (udev->do_remote_wakeup) | 2975 | if (udev->do_remote_wakeup) { |
| 2959 | (void) usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | 2976 | if (!hub_is_superspeed(hub->hdev)) { |
| 2960 | USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, | 2977 | (void) usb_control_msg(udev, |
| 2961 | USB_DEVICE_REMOTE_WAKEUP, 0, | 2978 | usb_sndctrlpipe(udev, 0), |
| 2962 | NULL, 0, | 2979 | USB_REQ_CLEAR_FEATURE, |
| 2963 | USB_CTRL_SET_TIMEOUT); | 2980 | USB_RECIP_DEVICE, |
| 2981 | USB_DEVICE_REMOTE_WAKEUP, 0, | ||
| 2982 | NULL, 0, | ||
| 2983 | USB_CTRL_SET_TIMEOUT); | ||
| 2984 | } else | ||
| 2985 | (void) usb_disable_function_remotewakeup(udev); | ||
| 2986 | |||
| 2987 | } | ||
| 2964 | 2988 | ||
| 2965 | /* Try to enable USB2 hardware LPM again */ | 2989 | /* Try to enable USB2 hardware LPM again */ |
| 2966 | if (udev->usb2_hw_lpm_capable == 1) | 2990 | if (udev->usb2_hw_lpm_capable == 1) |
| @@ -3052,20 +3076,30 @@ static int finish_port_resume(struct usb_device *udev) | |||
| 3052 | * udev->reset_resume | 3076 | * udev->reset_resume |
| 3053 | */ | 3077 | */ |
| 3054 | } else if (udev->actconfig && !udev->reset_resume) { | 3078 | } else if (udev->actconfig && !udev->reset_resume) { |
| 3055 | le16_to_cpus(&devstatus); | 3079 | if (!hub_is_superspeed(udev->parent)) { |
| 3056 | if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) { | 3080 | le16_to_cpus(&devstatus); |
| 3057 | status = usb_control_msg(udev, | 3081 | if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) |
| 3058 | usb_sndctrlpipe(udev, 0), | 3082 | status = usb_control_msg(udev, |
| 3059 | USB_REQ_CLEAR_FEATURE, | 3083 | usb_sndctrlpipe(udev, 0), |
| 3084 | USB_REQ_CLEAR_FEATURE, | ||
| 3060 | USB_RECIP_DEVICE, | 3085 | USB_RECIP_DEVICE, |
| 3061 | USB_DEVICE_REMOTE_WAKEUP, 0, | 3086 | USB_DEVICE_REMOTE_WAKEUP, 0, |
| 3062 | NULL, 0, | 3087 | NULL, 0, |
| 3063 | USB_CTRL_SET_TIMEOUT); | 3088 | USB_CTRL_SET_TIMEOUT); |
| 3064 | if (status) | 3089 | } else { |
| 3065 | dev_dbg(&udev->dev, | 3090 | status = usb_get_status(udev, USB_RECIP_INTERFACE, 0, |
| 3066 | "disable remote wakeup, status %d\n", | 3091 | &devstatus); |
| 3067 | status); | 3092 | le16_to_cpus(&devstatus); |
| 3093 | if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP | ||
| 3094 | | USB_INTRF_STAT_FUNC_RW)) | ||
| 3095 | status = | ||
| 3096 | usb_disable_function_remotewakeup(udev); | ||
| 3068 | } | 3097 | } |
| 3098 | |||
| 3099 | if (status) | ||
| 3100 | dev_dbg(&udev->dev, | ||
| 3101 | "disable remote wakeup, status %d\n", | ||
| 3102 | status); | ||
| 3069 | status = 0; | 3103 | status = 0; |
| 3070 | } | 3104 | } |
| 3071 | return status; | 3105 | return status; |
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 09537b2f1002..b416a3fc9959 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
| @@ -797,6 +797,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) | |||
| 797 | ehci->reset_done[i] = jiffies + msecs_to_jiffies(25); | 797 | ehci->reset_done[i] = jiffies + msecs_to_jiffies(25); |
| 798 | set_bit(i, &ehci->resuming_ports); | 798 | set_bit(i, &ehci->resuming_ports); |
| 799 | ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); | 799 | ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); |
| 800 | usb_hcd_start_port_resume(&hcd->self, i); | ||
| 800 | mod_timer(&hcd->rh_timer, ehci->reset_done[i]); | 801 | mod_timer(&hcd->rh_timer, ehci->reset_done[i]); |
| 801 | } | 802 | } |
| 802 | } | 803 | } |
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 4ccb97c0678f..4d3b294f203e 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c | |||
| @@ -649,7 +649,11 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf) | |||
| 649 | status = STS_PCD; | 649 | status = STS_PCD; |
| 650 | } | 650 | } |
| 651 | } | 651 | } |
| 652 | /* FIXME autosuspend idle root hubs */ | 652 | |
| 653 | /* If a resume is in progress, make sure it can finish */ | ||
| 654 | if (ehci->resuming_ports) | ||
| 655 | mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(25)); | ||
| 656 | |||
| 653 | spin_unlock_irqrestore (&ehci->lock, flags); | 657 | spin_unlock_irqrestore (&ehci->lock, flags); |
| 654 | return status ? retval : 0; | 658 | return status ? retval : 0; |
| 655 | } | 659 | } |
| @@ -851,6 +855,7 @@ static int ehci_hub_control ( | |||
| 851 | /* resume signaling for 20 msec */ | 855 | /* resume signaling for 20 msec */ |
| 852 | ehci->reset_done[wIndex] = jiffies | 856 | ehci->reset_done[wIndex] = jiffies |
| 853 | + msecs_to_jiffies(20); | 857 | + msecs_to_jiffies(20); |
| 858 | usb_hcd_start_port_resume(&hcd->self, wIndex); | ||
| 854 | /* check the port again */ | 859 | /* check the port again */ |
| 855 | mod_timer(&ehci_to_hcd(ehci)->rh_timer, | 860 | mod_timer(&ehci_to_hcd(ehci)->rh_timer, |
| 856 | ehci->reset_done[wIndex]); | 861 | ehci->reset_done[wIndex]); |
| @@ -862,6 +867,7 @@ static int ehci_hub_control ( | |||
| 862 | clear_bit(wIndex, &ehci->suspended_ports); | 867 | clear_bit(wIndex, &ehci->suspended_ports); |
| 863 | set_bit(wIndex, &ehci->port_c_suspend); | 868 | set_bit(wIndex, &ehci->port_c_suspend); |
| 864 | ehci->reset_done[wIndex] = 0; | 869 | ehci->reset_done[wIndex] = 0; |
| 870 | usb_hcd_end_port_resume(&hcd->self, wIndex); | ||
| 865 | 871 | ||
| 866 | /* stop resume signaling */ | 872 | /* stop resume signaling */ |
| 867 | temp = ehci_readl(ehci, status_reg); | 873 | temp = ehci_readl(ehci, status_reg); |
| @@ -950,6 +956,7 @@ static int ehci_hub_control ( | |||
| 950 | ehci->reset_done[wIndex] = 0; | 956 | ehci->reset_done[wIndex] = 0; |
| 951 | if (temp & PORT_PE) | 957 | if (temp & PORT_PE) |
| 952 | set_bit(wIndex, &ehci->port_c_suspend); | 958 | set_bit(wIndex, &ehci->port_c_suspend); |
| 959 | usb_hcd_end_port_resume(&hcd->self, wIndex); | ||
| 953 | } | 960 | } |
| 954 | 961 | ||
| 955 | if (temp & PORT_OC) | 962 | if (temp & PORT_OC) |
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 3d989028c836..fd252f0cfb3a 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c | |||
| @@ -1197,17 +1197,26 @@ static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested) | |||
| 1197 | if (ehci->async_iaa || ehci->async_unlinking) | 1197 | if (ehci->async_iaa || ehci->async_unlinking) |
| 1198 | return; | 1198 | return; |
| 1199 | 1199 | ||
| 1200 | /* Do all the waiting QHs at once */ | ||
| 1201 | ehci->async_iaa = ehci->async_unlink; | ||
| 1202 | ehci->async_unlink = NULL; | ||
| 1203 | |||
| 1204 | /* If the controller isn't running, we don't have to wait for it */ | 1200 | /* If the controller isn't running, we don't have to wait for it */ |
| 1205 | if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) { | 1201 | if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) { |
| 1202 | |||
| 1203 | /* Do all the waiting QHs */ | ||
| 1204 | ehci->async_iaa = ehci->async_unlink; | ||
| 1205 | ehci->async_unlink = NULL; | ||
| 1206 | |||
| 1206 | if (!nested) /* Avoid recursion */ | 1207 | if (!nested) /* Avoid recursion */ |
| 1207 | end_unlink_async(ehci); | 1208 | end_unlink_async(ehci); |
| 1208 | 1209 | ||
| 1209 | /* Otherwise start a new IAA cycle */ | 1210 | /* Otherwise start a new IAA cycle */ |
| 1210 | } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) { | 1211 | } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) { |
| 1212 | struct ehci_qh *qh; | ||
| 1213 | |||
| 1214 | /* Do only the first waiting QH (nVidia bug?) */ | ||
| 1215 | qh = ehci->async_unlink; | ||
| 1216 | ehci->async_iaa = qh; | ||
| 1217 | ehci->async_unlink = qh->unlink_next; | ||
| 1218 | qh->unlink_next = NULL; | ||
| 1219 | |||
| 1211 | /* Make sure the unlinks are all visible to the hardware */ | 1220 | /* Make sure the unlinks are all visible to the hardware */ |
| 1212 | wmb(); | 1221 | wmb(); |
| 1213 | 1222 | ||
| @@ -1255,34 +1264,35 @@ static void end_unlink_async(struct ehci_hcd *ehci) | |||
| 1255 | } | 1264 | } |
| 1256 | } | 1265 | } |
| 1257 | 1266 | ||
| 1267 | static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh); | ||
| 1268 | |||
| 1258 | static void unlink_empty_async(struct ehci_hcd *ehci) | 1269 | static void unlink_empty_async(struct ehci_hcd *ehci) |
| 1259 | { | 1270 | { |
| 1260 | struct ehci_qh *qh, *next; | 1271 | struct ehci_qh *qh; |
| 1261 | bool stopped = (ehci->rh_state < EHCI_RH_RUNNING); | 1272 | struct ehci_qh *qh_to_unlink = NULL; |
| 1262 | bool check_unlinks_later = false; | 1273 | bool check_unlinks_later = false; |
| 1274 | int count = 0; | ||
| 1263 | 1275 | ||
| 1264 | /* Unlink all the async QHs that have been empty for a timer cycle */ | 1276 | /* Find the last async QH which has been empty for a timer cycle */ |
| 1265 | next = ehci->async->qh_next.qh; | 1277 | for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) { |
| 1266 | while (next) { | ||
| 1267 | qh = next; | ||
| 1268 | next = qh->qh_next.qh; | ||
| 1269 | |||
| 1270 | if (list_empty(&qh->qtd_list) && | 1278 | if (list_empty(&qh->qtd_list) && |
| 1271 | qh->qh_state == QH_STATE_LINKED) { | 1279 | qh->qh_state == QH_STATE_LINKED) { |
| 1272 | if (!stopped && qh->unlink_cycle == | 1280 | ++count; |
| 1273 | ehci->async_unlink_cycle) | 1281 | if (qh->unlink_cycle == ehci->async_unlink_cycle) |
| 1274 | check_unlinks_later = true; | 1282 | check_unlinks_later = true; |
| 1275 | else | 1283 | else |
| 1276 | single_unlink_async(ehci, qh); | 1284 | qh_to_unlink = qh; |
| 1277 | } | 1285 | } |
| 1278 | } | 1286 | } |
| 1279 | 1287 | ||
| 1280 | /* Start a new IAA cycle if any QHs are waiting for it */ | 1288 | /* If nothing else is being unlinked, unlink the last empty QH */ |
| 1281 | if (ehci->async_unlink) | 1289 | if (!ehci->async_iaa && !ehci->async_unlink && qh_to_unlink) { |
| 1282 | start_iaa_cycle(ehci, false); | 1290 | start_unlink_async(ehci, qh_to_unlink); |
| 1291 | --count; | ||
| 1292 | } | ||
| 1283 | 1293 | ||
| 1284 | /* QHs that haven't been empty for long enough will be handled later */ | 1294 | /* Other QHs will be handled later */ |
| 1285 | if (check_unlinks_later) { | 1295 | if (count > 0) { |
| 1286 | ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true); | 1296 | ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true); |
| 1287 | ++ehci->async_unlink_cycle; | 1297 | ++ehci->async_unlink_cycle; |
| 1288 | } | 1298 | } |
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 69ebee73c0c1..b476daf49f6f 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c | |||
| @@ -213,7 +213,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask) | |||
| 213 | } | 213 | } |
| 214 | 214 | ||
| 215 | static const unsigned char | 215 | static const unsigned char |
| 216 | max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 }; | 216 | max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 }; |
| 217 | 217 | ||
| 218 | /* carryover low/fullspeed bandwidth that crosses uframe boundries */ | 218 | /* carryover low/fullspeed bandwidth that crosses uframe boundries */ |
| 219 | static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8]) | 219 | static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8]) |
| @@ -2212,11 +2212,11 @@ static void scan_isoc(struct ehci_hcd *ehci) | |||
| 2212 | } | 2212 | } |
| 2213 | ehci->now_frame = now_frame; | 2213 | ehci->now_frame = now_frame; |
| 2214 | 2214 | ||
| 2215 | frame = ehci->last_iso_frame; | ||
| 2215 | for (;;) { | 2216 | for (;;) { |
| 2216 | union ehci_shadow q, *q_p; | 2217 | union ehci_shadow q, *q_p; |
| 2217 | __hc32 type, *hw_p; | 2218 | __hc32 type, *hw_p; |
| 2218 | 2219 | ||
| 2219 | frame = ehci->last_iso_frame; | ||
| 2220 | restart: | 2220 | restart: |
| 2221 | /* scan each element in frame's queue for completions */ | 2221 | /* scan each element in frame's queue for completions */ |
| 2222 | q_p = &ehci->pshadow [frame]; | 2222 | q_p = &ehci->pshadow [frame]; |
| @@ -2321,6 +2321,9 @@ restart: | |||
| 2321 | /* Stop when we have reached the current frame */ | 2321 | /* Stop when we have reached the current frame */ |
| 2322 | if (frame == now_frame) | 2322 | if (frame == now_frame) |
| 2323 | break; | 2323 | break; |
| 2324 | ehci->last_iso_frame = (frame + 1) & fmask; | 2324 | |
| 2325 | /* The last frame may still have active siTDs */ | ||
| 2326 | ehci->last_iso_frame = frame; | ||
| 2327 | frame = (frame + 1) & fmask; | ||
| 2325 | } | 2328 | } |
| 2326 | } | 2329 | } |
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c index 20dbdcbe9b0f..f904071d70df 100644 --- a/drivers/usb/host/ehci-timer.c +++ b/drivers/usb/host/ehci-timer.c | |||
| @@ -113,14 +113,15 @@ static void ehci_poll_ASS(struct ehci_hcd *ehci) | |||
| 113 | 113 | ||
| 114 | if (want != actual) { | 114 | if (want != actual) { |
| 115 | 115 | ||
| 116 | /* Poll again later, but give up after about 20 ms */ | 116 | /* Poll again later */ |
| 117 | if (ehci->ASS_poll_count++ < 20) { | 117 | ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true); |
| 118 | ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true); | 118 | ++ehci->ASS_poll_count; |
| 119 | return; | 119 | return; |
| 120 | } | ||
| 121 | ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n", | ||
| 122 | want, actual); | ||
| 123 | } | 120 | } |
| 121 | |||
| 122 | if (ehci->ASS_poll_count > 20) | ||
| 123 | ehci_dbg(ehci, "ASS poll count reached %d\n", | ||
| 124 | ehci->ASS_poll_count); | ||
| 124 | ehci->ASS_poll_count = 0; | 125 | ehci->ASS_poll_count = 0; |
| 125 | 126 | ||
| 126 | /* The status is up-to-date; restart or stop the schedule as needed */ | 127 | /* The status is up-to-date; restart or stop the schedule as needed */ |
| @@ -159,14 +160,14 @@ static void ehci_poll_PSS(struct ehci_hcd *ehci) | |||
| 159 | 160 | ||
| 160 | if (want != actual) { | 161 | if (want != actual) { |
| 161 | 162 | ||
| 162 | /* Poll again later, but give up after about 20 ms */ | 163 | /* Poll again later */ |
| 163 | if (ehci->PSS_poll_count++ < 20) { | 164 | ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true); |
| 164 | ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true); | 165 | return; |
| 165 | return; | ||
| 166 | } | ||
| 167 | ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n", | ||
| 168 | want, actual); | ||
| 169 | } | 166 | } |
| 167 | |||
| 168 | if (ehci->PSS_poll_count > 20) | ||
| 169 | ehci_dbg(ehci, "PSS poll count reached %d\n", | ||
| 170 | ehci->PSS_poll_count); | ||
| 170 | ehci->PSS_poll_count = 0; | 171 | ehci->PSS_poll_count = 0; |
| 171 | 172 | ||
| 172 | /* The status is up-to-date; restart or stop the schedule as needed */ | 173 | /* The status is up-to-date; restart or stop the schedule as needed */ |
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index a3b6d7104ae2..4c338ec03a07 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c | |||
| @@ -780,6 +780,7 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev) | |||
| 780 | "defaulting to EHCI.\n"); | 780 | "defaulting to EHCI.\n"); |
| 781 | dev_warn(&xhci_pdev->dev, | 781 | dev_warn(&xhci_pdev->dev, |
| 782 | "USB 3.0 devices will work at USB 2.0 speeds.\n"); | 782 | "USB 3.0 devices will work at USB 2.0 speeds.\n"); |
| 783 | usb_disable_xhci_ports(xhci_pdev); | ||
| 783 | return; | 784 | return; |
| 784 | } | 785 | } |
| 785 | 786 | ||
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c index 768d54295a20..15d13229ddbb 100644 --- a/drivers/usb/host/uhci-hub.c +++ b/drivers/usb/host/uhci-hub.c | |||
| @@ -116,6 +116,7 @@ static void uhci_finish_suspend(struct uhci_hcd *uhci, int port, | |||
| 116 | } | 116 | } |
| 117 | } | 117 | } |
| 118 | clear_bit(port, &uhci->resuming_ports); | 118 | clear_bit(port, &uhci->resuming_ports); |
| 119 | usb_hcd_end_port_resume(&uhci_to_hcd(uhci)->self, port); | ||
| 119 | } | 120 | } |
| 120 | 121 | ||
| 121 | /* Wait for the UHCI controller in HP's iLO2 server management chip. | 122 | /* Wait for the UHCI controller in HP's iLO2 server management chip. |
| @@ -167,6 +168,8 @@ static void uhci_check_ports(struct uhci_hcd *uhci) | |||
| 167 | set_bit(port, &uhci->resuming_ports); | 168 | set_bit(port, &uhci->resuming_ports); |
| 168 | uhci->ports_timeout = jiffies + | 169 | uhci->ports_timeout = jiffies + |
| 169 | msecs_to_jiffies(25); | 170 | msecs_to_jiffies(25); |
| 171 | usb_hcd_start_port_resume( | ||
| 172 | &uhci_to_hcd(uhci)->self, port); | ||
| 170 | 173 | ||
| 171 | /* Make sure we see the port again | 174 | /* Make sure we see the port again |
| 172 | * after the resuming period is over. */ | 175 | * after the resuming period is over. */ |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 59fb5c677dbe..7f76a49e90d3 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
| @@ -1698,7 +1698,7 @@ static void handle_port_status(struct xhci_hcd *xhci, | |||
| 1698 | faked_port_index + 1); | 1698 | faked_port_index + 1); |
| 1699 | if (slot_id && xhci->devs[slot_id]) | 1699 | if (slot_id && xhci->devs[slot_id]) |
| 1700 | xhci_ring_device(xhci, slot_id); | 1700 | xhci_ring_device(xhci, slot_id); |
| 1701 | if (bus_state->port_remote_wakeup && (1 << faked_port_index)) { | 1701 | if (bus_state->port_remote_wakeup & (1 << faked_port_index)) { |
| 1702 | bus_state->port_remote_wakeup &= | 1702 | bus_state->port_remote_wakeup &= |
| 1703 | ~(1 << faked_port_index); | 1703 | ~(1 << faked_port_index); |
| 1704 | xhci_test_and_clear_bit(xhci, port_array, | 1704 | xhci_test_and_clear_bit(xhci, port_array, |
| @@ -2589,6 +2589,8 @@ cleanup: | |||
| 2589 | (trb_comp_code != COMP_STALL && | 2589 | (trb_comp_code != COMP_STALL && |
| 2590 | trb_comp_code != COMP_BABBLE)) | 2590 | trb_comp_code != COMP_BABBLE)) |
| 2591 | xhci_urb_free_priv(xhci, urb_priv); | 2591 | xhci_urb_free_priv(xhci, urb_priv); |
| 2592 | else | ||
| 2593 | kfree(urb_priv); | ||
| 2592 | 2594 | ||
| 2593 | usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); | 2595 | usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); |
| 2594 | if ((urb->actual_length != urb->transfer_buffer_length && | 2596 | if ((urb->actual_length != urb->transfer_buffer_length && |
| @@ -3108,7 +3110,7 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len, | |||
| 3108 | * running_total. | 3110 | * running_total. |
| 3109 | */ | 3111 | */ |
| 3110 | packets_transferred = (running_total + trb_buff_len) / | 3112 | packets_transferred = (running_total + trb_buff_len) / |
| 3111 | usb_endpoint_maxp(&urb->ep->desc); | 3113 | GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); |
| 3112 | 3114 | ||
| 3113 | if ((total_packet_count - packets_transferred) > 31) | 3115 | if ((total_packet_count - packets_transferred) > 31) |
| 3114 | return 31 << 17; | 3116 | return 31 << 17; |
| @@ -3642,7 +3644,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
| 3642 | td_len = urb->iso_frame_desc[i].length; | 3644 | td_len = urb->iso_frame_desc[i].length; |
| 3643 | td_remain_len = td_len; | 3645 | td_remain_len = td_len; |
| 3644 | total_packet_count = DIV_ROUND_UP(td_len, | 3646 | total_packet_count = DIV_ROUND_UP(td_len, |
| 3645 | usb_endpoint_maxp(&urb->ep->desc)); | 3647 | GET_MAX_PACKET( |
| 3648 | usb_endpoint_maxp(&urb->ep->desc))); | ||
| 3646 | /* A zero-length transfer still involves at least one packet. */ | 3649 | /* A zero-length transfer still involves at least one packet. */ |
| 3647 | if (total_packet_count == 0) | 3650 | if (total_packet_count == 0) |
| 3648 | total_packet_count++; | 3651 | total_packet_count++; |
| @@ -3664,9 +3667,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
| 3664 | td = urb_priv->td[i]; | 3667 | td = urb_priv->td[i]; |
| 3665 | for (j = 0; j < trbs_per_td; j++) { | 3668 | for (j = 0; j < trbs_per_td; j++) { |
| 3666 | u32 remainder = 0; | 3669 | u32 remainder = 0; |
| 3667 | field = TRB_TBC(burst_count) | TRB_TLBPC(residue); | 3670 | field = 0; |
| 3668 | 3671 | ||
| 3669 | if (first_trb) { | 3672 | if (first_trb) { |
| 3673 | field = TRB_TBC(burst_count) | | ||
| 3674 | TRB_TLBPC(residue); | ||
| 3670 | /* Queue the isoc TRB */ | 3675 | /* Queue the isoc TRB */ |
| 3671 | field |= TRB_TYPE(TRB_ISOC); | 3676 | field |= TRB_TYPE(TRB_ISOC); |
| 3672 | /* Assume URB_ISO_ASAP is set */ | 3677 | /* Assume URB_ISO_ASAP is set */ |
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index f14736f647ff..edc0f0dcad83 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
| @@ -60,6 +60,7 @@ static const struct usb_device_id id_table[] = { | |||
| 60 | { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */ | 60 | { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */ |
| 61 | { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */ | 61 | { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */ |
| 62 | { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */ | 62 | { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */ |
| 63 | { USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */ | ||
| 63 | { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */ | 64 | { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */ |
| 64 | { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */ | 65 | { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */ |
| 65 | { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */ | 66 | { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */ |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index ba68835d06a6..90ceef1776c3 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
| @@ -584,6 +584,7 @@ static struct usb_device_id id_table_combined [] = { | |||
| 584 | /* | 584 | /* |
| 585 | * ELV devices: | 585 | * ELV devices: |
| 586 | */ | 586 | */ |
| 587 | { USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) }, | ||
| 587 | { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) }, | 588 | { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) }, |
| 588 | { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) }, | 589 | { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) }, |
| 589 | { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) }, | 590 | { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) }, |
| @@ -670,6 +671,7 @@ static struct usb_device_id id_table_combined [] = { | |||
| 670 | { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) }, | 671 | { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) }, |
| 671 | { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) }, | 672 | { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) }, |
| 672 | { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) }, | 673 | { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) }, |
| 674 | { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, | ||
| 673 | { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, | 675 | { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, |
| 674 | { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) }, | 676 | { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) }, |
| 675 | { USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) }, | 677 | { USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) }, |
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index fa5d56038276..9d359e189a64 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
| @@ -147,6 +147,11 @@ | |||
| 147 | #define XSENS_CONVERTER_6_PID 0xD38E | 147 | #define XSENS_CONVERTER_6_PID 0xD38E |
| 148 | #define XSENS_CONVERTER_7_PID 0xD38F | 148 | #define XSENS_CONVERTER_7_PID 0xD38F |
| 149 | 149 | ||
| 150 | /** | ||
| 151 | * Zolix (www.zolix.com.cb) product ids | ||
| 152 | */ | ||
| 153 | #define FTDI_OMNI1509 0xD491 /* Omni1509 embedded USB-serial */ | ||
| 154 | |||
| 150 | /* | 155 | /* |
| 151 | * NDI (www.ndigital.com) product ids | 156 | * NDI (www.ndigital.com) product ids |
| 152 | */ | 157 | */ |
| @@ -204,7 +209,7 @@ | |||
| 204 | 209 | ||
| 205 | /* | 210 | /* |
| 206 | * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). | 211 | * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). |
| 207 | * All of these devices use FTDI's vendor ID (0x0403). | 212 | * Almost all of these devices use FTDI's vendor ID (0x0403). |
| 208 | * Further IDs taken from ELV Windows .inf file. | 213 | * Further IDs taken from ELV Windows .inf file. |
| 209 | * | 214 | * |
| 210 | * The previously included PID for the UO 100 module was incorrect. | 215 | * The previously included PID for the UO 100 module was incorrect. |
| @@ -212,6 +217,8 @@ | |||
| 212 | * | 217 | * |
| 213 | * Armin Laeuger originally sent the PID for the UM 100 module. | 218 | * Armin Laeuger originally sent the PID for the UM 100 module. |
| 214 | */ | 219 | */ |
| 220 | #define FTDI_ELV_VID 0x1B1F /* ELV AG */ | ||
| 221 | #define FTDI_ELV_WS300_PID 0xC006 /* eQ3 WS 300 PC II */ | ||
| 215 | #define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */ | 222 | #define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */ |
| 216 | #define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */ | 223 | #define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */ |
| 217 | #define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */ | 224 | #define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */ |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 0d9dac9e7f93..567bc77d6397 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
| @@ -242,6 +242,7 @@ static void option_instat_callback(struct urb *urb); | |||
| 242 | #define TELIT_PRODUCT_CC864_DUAL 0x1005 | 242 | #define TELIT_PRODUCT_CC864_DUAL 0x1005 |
| 243 | #define TELIT_PRODUCT_CC864_SINGLE 0x1006 | 243 | #define TELIT_PRODUCT_CC864_SINGLE 0x1006 |
| 244 | #define TELIT_PRODUCT_DE910_DUAL 0x1010 | 244 | #define TELIT_PRODUCT_DE910_DUAL 0x1010 |
| 245 | #define TELIT_PRODUCT_LE920 0x1200 | ||
| 245 | 246 | ||
| 246 | /* ZTE PRODUCTS */ | 247 | /* ZTE PRODUCTS */ |
| 247 | #define ZTE_VENDOR_ID 0x19d2 | 248 | #define ZTE_VENDOR_ID 0x19d2 |
| @@ -453,6 +454,10 @@ static void option_instat_callback(struct urb *urb); | |||
| 453 | #define TPLINK_VENDOR_ID 0x2357 | 454 | #define TPLINK_VENDOR_ID 0x2357 |
| 454 | #define TPLINK_PRODUCT_MA180 0x0201 | 455 | #define TPLINK_PRODUCT_MA180 0x0201 |
| 455 | 456 | ||
| 457 | /* Changhong products */ | ||
| 458 | #define CHANGHONG_VENDOR_ID 0x2077 | ||
| 459 | #define CHANGHONG_PRODUCT_CH690 0x7001 | ||
| 460 | |||
| 456 | /* some devices interfaces need special handling due to a number of reasons */ | 461 | /* some devices interfaces need special handling due to a number of reasons */ |
| 457 | enum option_blacklist_reason { | 462 | enum option_blacklist_reason { |
| 458 | OPTION_BLACKLIST_NONE = 0, | 463 | OPTION_BLACKLIST_NONE = 0, |
| @@ -534,6 +539,11 @@ static const struct option_blacklist_info zte_1255_blacklist = { | |||
| 534 | .reserved = BIT(3) | BIT(4), | 539 | .reserved = BIT(3) | BIT(4), |
| 535 | }; | 540 | }; |
| 536 | 541 | ||
| 542 | static const struct option_blacklist_info telit_le920_blacklist = { | ||
| 543 | .sendsetup = BIT(0), | ||
| 544 | .reserved = BIT(1) | BIT(5), | ||
| 545 | }; | ||
| 546 | |||
| 537 | static const struct usb_device_id option_ids[] = { | 547 | static const struct usb_device_id option_ids[] = { |
| 538 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, | 548 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, |
| 539 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, | 549 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, |
| @@ -784,6 +794,8 @@ static const struct usb_device_id option_ids[] = { | |||
| 784 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) }, | 794 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) }, |
| 785 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, | 795 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, |
| 786 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, | 796 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, |
| 797 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), | ||
| 798 | .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, | ||
| 787 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ | 799 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ |
| 788 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), | 800 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), |
| 789 | .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, | 801 | .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, |
| @@ -1318,6 +1330,7 @@ static const struct usb_device_id option_ids[] = { | |||
| 1318 | { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) }, | 1330 | { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) }, |
| 1319 | { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), | 1331 | { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), |
| 1320 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | 1332 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
| 1333 | { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) }, | ||
| 1321 | { } /* Terminating entry */ | 1334 | { } /* Terminating entry */ |
| 1322 | }; | 1335 | }; |
| 1323 | MODULE_DEVICE_TABLE(usb, option_ids); | 1336 | MODULE_DEVICE_TABLE(usb, option_ids); |
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index aa148c21ea40..24662547dc5b 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c | |||
| @@ -53,6 +53,7 @@ static const struct usb_device_id id_table[] = { | |||
| 53 | {DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ | 53 | {DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ |
| 54 | {DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ | 54 | {DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ |
| 55 | {DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */ | 55 | {DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */ |
| 56 | {DEVICE_G1K(0x1bc7, 0x900e)}, /* Telit Gobi QDL device */ | ||
| 56 | 57 | ||
| 57 | /* Gobi 2000 devices */ | 58 | /* Gobi 2000 devices */ |
| 58 | {USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */ | 59 | {USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */ |
diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c index 105d900150c1..16b0bf055eeb 100644 --- a/drivers/usb/storage/initializers.c +++ b/drivers/usb/storage/initializers.c | |||
| @@ -92,8 +92,8 @@ int usb_stor_ucr61s2b_init(struct us_data *us) | |||
| 92 | return 0; | 92 | return 0; |
| 93 | } | 93 | } |
| 94 | 94 | ||
| 95 | /* This places the HUAWEI E220 devices in multi-port mode */ | 95 | /* This places the HUAWEI usb dongles in multi-port mode */ |
| 96 | int usb_stor_huawei_e220_init(struct us_data *us) | 96 | static int usb_stor_huawei_feature_init(struct us_data *us) |
| 97 | { | 97 | { |
| 98 | int result; | 98 | int result; |
| 99 | 99 | ||
| @@ -104,3 +104,75 @@ int usb_stor_huawei_e220_init(struct us_data *us) | |||
| 104 | US_DEBUGP("Huawei mode set result is %d\n", result); | 104 | US_DEBUGP("Huawei mode set result is %d\n", result); |
| 105 | return 0; | 105 | return 0; |
| 106 | } | 106 | } |
| 107 | |||
| 108 | /* | ||
| 109 | * It will send a scsi switch command called rewind' to huawei dongle. | ||
| 110 | * When the dongle receives this command at the first time, | ||
| 111 | * it will reboot immediately. After rebooted, it will ignore this command. | ||
| 112 | * So it is unnecessary to read its response. | ||
| 113 | */ | ||
| 114 | static int usb_stor_huawei_scsi_init(struct us_data *us) | ||
| 115 | { | ||
| 116 | int result = 0; | ||
| 117 | int act_len = 0; | ||
| 118 | struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf; | ||
| 119 | char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00, | ||
| 120 | 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; | ||
| 121 | |||
| 122 | bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN); | ||
| 123 | bcbw->Tag = 0; | ||
| 124 | bcbw->DataTransferLength = 0; | ||
| 125 | bcbw->Flags = bcbw->Lun = 0; | ||
| 126 | bcbw->Length = sizeof(rewind_cmd); | ||
| 127 | memset(bcbw->CDB, 0, sizeof(bcbw->CDB)); | ||
| 128 | memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd)); | ||
| 129 | |||
| 130 | result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw, | ||
| 131 | US_BULK_CB_WRAP_LEN, &act_len); | ||
| 132 | US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result); | ||
| 133 | return result; | ||
| 134 | } | ||
| 135 | |||
| 136 | /* | ||
| 137 | * It tries to find the supported Huawei USB dongles. | ||
| 138 | * In Huawei, they assign the following product IDs | ||
| 139 | * for all of their mobile broadband dongles, | ||
| 140 | * including the new dongles in the future. | ||
| 141 | * So if the product ID is not included in this list, | ||
| 142 | * it means it is not Huawei's mobile broadband dongles. | ||
| 143 | */ | ||
| 144 | static int usb_stor_huawei_dongles_pid(struct us_data *us) | ||
| 145 | { | ||
| 146 | struct usb_interface_descriptor *idesc; | ||
| 147 | int idProduct; | ||
| 148 | |||
| 149 | idesc = &us->pusb_intf->cur_altsetting->desc; | ||
| 150 | idProduct = us->pusb_dev->descriptor.idProduct; | ||
| 151 | /* The first port is CDROM, | ||
| 152 | * means the dongle in the single port mode, | ||
| 153 | * and a switch command is required to be sent. */ | ||
| 154 | if (idesc && idesc->bInterfaceNumber == 0) { | ||
| 155 | if ((idProduct == 0x1001) | ||
| 156 | || (idProduct == 0x1003) | ||
| 157 | || (idProduct == 0x1004) | ||
| 158 | || (idProduct >= 0x1401 && idProduct <= 0x1500) | ||
| 159 | || (idProduct >= 0x1505 && idProduct <= 0x1600) | ||
| 160 | || (idProduct >= 0x1c02 && idProduct <= 0x2202)) { | ||
| 161 | return 1; | ||
| 162 | } | ||
| 163 | } | ||
| 164 | return 0; | ||
| 165 | } | ||
| 166 | |||
| 167 | int usb_stor_huawei_init(struct us_data *us) | ||
| 168 | { | ||
| 169 | int result = 0; | ||
| 170 | |||
| 171 | if (usb_stor_huawei_dongles_pid(us)) { | ||
| 172 | if (us->pusb_dev->descriptor.idProduct >= 0x1446) | ||
| 173 | result = usb_stor_huawei_scsi_init(us); | ||
| 174 | else | ||
| 175 | result = usb_stor_huawei_feature_init(us); | ||
| 176 | } | ||
| 177 | return result; | ||
| 178 | } | ||
diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h index 529327fbb06b..5376d4fc76f0 100644 --- a/drivers/usb/storage/initializers.h +++ b/drivers/usb/storage/initializers.h | |||
| @@ -46,5 +46,5 @@ int usb_stor_euscsi_init(struct us_data *us); | |||
| 46 | * flash reader */ | 46 | * flash reader */ |
| 47 | int usb_stor_ucr61s2b_init(struct us_data *us); | 47 | int usb_stor_ucr61s2b_init(struct us_data *us); |
| 48 | 48 | ||
| 49 | /* This places the HUAWEI E220 devices in multi-port mode */ | 49 | /* This places the HUAWEI usb dongles in multi-port mode */ |
| 50 | int usb_stor_huawei_e220_init(struct us_data *us); | 50 | int usb_stor_huawei_init(struct us_data *us); |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index d305a5aa3a5d..72923b56bbf6 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
| @@ -1527,335 +1527,10 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100, | |||
| 1527 | /* Reported by fangxiaozhi <huananhu@huawei.com> | 1527 | /* Reported by fangxiaozhi <huananhu@huawei.com> |
| 1528 | * This brings the HUAWEI data card devices into multi-port mode | 1528 | * This brings the HUAWEI data card devices into multi-port mode |
| 1529 | */ | 1529 | */ |
| 1530 | UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000, | 1530 | UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50, |
| 1531 | "HUAWEI MOBILE", | 1531 | "HUAWEI MOBILE", |
| 1532 | "Mass Storage", | 1532 | "Mass Storage", |
| 1533 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | 1533 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init, |
| 1534 | 0), | ||
| 1535 | UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000, | ||
| 1536 | "HUAWEI MOBILE", | ||
| 1537 | "Mass Storage", | ||
| 1538 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1539 | 0), | ||
| 1540 | UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000, | ||
| 1541 | "HUAWEI MOBILE", | ||
| 1542 | "Mass Storage", | ||
| 1543 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1544 | 0), | ||
| 1545 | UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000, | ||
| 1546 | "HUAWEI MOBILE", | ||
| 1547 | "Mass Storage", | ||
| 1548 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1549 | 0), | ||
| 1550 | UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000, | ||
| 1551 | "HUAWEI MOBILE", | ||
| 1552 | "Mass Storage", | ||
| 1553 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1554 | 0), | ||
| 1555 | UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000, | ||
| 1556 | "HUAWEI MOBILE", | ||
| 1557 | "Mass Storage", | ||
| 1558 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1559 | 0), | ||
| 1560 | UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000, | ||
| 1561 | "HUAWEI MOBILE", | ||
| 1562 | "Mass Storage", | ||
| 1563 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1564 | 0), | ||
| 1565 | UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000, | ||
| 1566 | "HUAWEI MOBILE", | ||
| 1567 | "Mass Storage", | ||
| 1568 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1569 | 0), | ||
| 1570 | UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000, | ||
| 1571 | "HUAWEI MOBILE", | ||
| 1572 | "Mass Storage", | ||
| 1573 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1574 | 0), | ||
| 1575 | UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000, | ||
| 1576 | "HUAWEI MOBILE", | ||
| 1577 | "Mass Storage", | ||
| 1578 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1579 | 0), | ||
| 1580 | UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000, | ||
| 1581 | "HUAWEI MOBILE", | ||
| 1582 | "Mass Storage", | ||
| 1583 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1584 | 0), | ||
| 1585 | UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000, | ||
| 1586 | "HUAWEI MOBILE", | ||
| 1587 | "Mass Storage", | ||
| 1588 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1589 | 0), | ||
| 1590 | UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000, | ||
| 1591 | "HUAWEI MOBILE", | ||
| 1592 | "Mass Storage", | ||
| 1593 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1594 | 0), | ||
| 1595 | UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000, | ||
| 1596 | "HUAWEI MOBILE", | ||
| 1597 | "Mass Storage", | ||
| 1598 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1599 | 0), | ||
| 1600 | UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000, | ||
| 1601 | "HUAWEI MOBILE", | ||
| 1602 | "Mass Storage", | ||
| 1603 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1604 | 0), | ||
| 1605 | UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000, | ||
| 1606 | "HUAWEI MOBILE", | ||
| 1607 | "Mass Storage", | ||
| 1608 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1609 | 0), | ||
| 1610 | UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000, | ||
| 1611 | "HUAWEI MOBILE", | ||
| 1612 | "Mass Storage", | ||
| 1613 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1614 | 0), | ||
| 1615 | UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000, | ||
| 1616 | "HUAWEI MOBILE", | ||
| 1617 | "Mass Storage", | ||
| 1618 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1619 | 0), | ||
| 1620 | UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000, | ||
| 1621 | "HUAWEI MOBILE", | ||
| 1622 | "Mass Storage", | ||
| 1623 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1624 | 0), | ||
| 1625 | UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000, | ||
| 1626 | "HUAWEI MOBILE", | ||
| 1627 | "Mass Storage", | ||
| 1628 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1629 | 0), | ||
| 1630 | UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000, | ||
| 1631 | "HUAWEI MOBILE", | ||
| 1632 | "Mass Storage", | ||
| 1633 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1634 | 0), | ||
| 1635 | UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000, | ||
| 1636 | "HUAWEI MOBILE", | ||
| 1637 | "Mass Storage", | ||
| 1638 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1639 | 0), | ||
| 1640 | UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000, | ||
| 1641 | "HUAWEI MOBILE", | ||
| 1642 | "Mass Storage", | ||
| 1643 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1644 | 0), | ||
| 1645 | UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000, | ||
| 1646 | "HUAWEI MOBILE", | ||
| 1647 | "Mass Storage", | ||
| 1648 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1649 | 0), | ||
| 1650 | UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000, | ||
| 1651 | "HUAWEI MOBILE", | ||
| 1652 | "Mass Storage", | ||
| 1653 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1654 | 0), | ||
| 1655 | UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000, | ||
| 1656 | "HUAWEI MOBILE", | ||
| 1657 | "Mass Storage", | ||
| 1658 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1659 | 0), | ||
| 1660 | UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000, | ||
| 1661 | "HUAWEI MOBILE", | ||
| 1662 | "Mass Storage", | ||
| 1663 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1664 | 0), | ||
| 1665 | UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000, | ||
| 1666 | "HUAWEI MOBILE", | ||
| 1667 | "Mass Storage", | ||
| 1668 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1669 | 0), | ||
| 1670 | UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000, | ||
| 1671 | "HUAWEI MOBILE", | ||
| 1672 | "Mass Storage", | ||
| 1673 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1674 | 0), | ||
| 1675 | UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000, | ||
| 1676 | "HUAWEI MOBILE", | ||
| 1677 | "Mass Storage", | ||
| 1678 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1679 | 0), | ||
| 1680 | UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000, | ||
| 1681 | "HUAWEI MOBILE", | ||
| 1682 | "Mass Storage", | ||
| 1683 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1684 | 0), | ||
| 1685 | UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000, | ||
| 1686 | "HUAWEI MOBILE", | ||
| 1687 | "Mass Storage", | ||
| 1688 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1689 | 0), | ||
| 1690 | UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000, | ||
| 1691 | "HUAWEI MOBILE", | ||
| 1692 | "Mass Storage", | ||
| 1693 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1694 | 0), | ||
| 1695 | UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000, | ||
| 1696 | "HUAWEI MOBILE", | ||
| 1697 | "Mass Storage", | ||
| 1698 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1699 | 0), | ||
| 1700 | UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000, | ||
| 1701 | "HUAWEI MOBILE", | ||
| 1702 | "Mass Storage", | ||
| 1703 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1704 | 0), | ||
| 1705 | UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000, | ||
| 1706 | "HUAWEI MOBILE", | ||
| 1707 | "Mass Storage", | ||
| 1708 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1709 | 0), | ||
| 1710 | UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000, | ||
| 1711 | "HUAWEI MOBILE", | ||
| 1712 | "Mass Storage", | ||
| 1713 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1714 | 0), | ||
| 1715 | UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000, | ||
| 1716 | "HUAWEI MOBILE", | ||
| 1717 | "Mass Storage", | ||
| 1718 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1719 | 0), | ||
| 1720 | UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000, | ||
| 1721 | "HUAWEI MOBILE", | ||
| 1722 | "Mass Storage", | ||
| 1723 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1724 | 0), | ||
| 1725 | UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000, | ||
| 1726 | "HUAWEI MOBILE", | ||
| 1727 | "Mass Storage", | ||
| 1728 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1729 | 0), | ||
| 1730 | UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000, | ||
| 1731 | "HUAWEI MOBILE", | ||
| 1732 | "Mass Storage", | ||
| 1733 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1734 | 0), | ||
| 1735 | UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000, | ||
| 1736 | "HUAWEI MOBILE", | ||
| 1737 | "Mass Storage", | ||
| 1738 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1739 | 0), | ||
| 1740 | UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000, | ||
| 1741 | "HUAWEI MOBILE", | ||
| 1742 | "Mass Storage", | ||
| 1743 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1744 | 0), | ||
| 1745 | UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000, | ||
| 1746 | "HUAWEI MOBILE", | ||
| 1747 | "Mass Storage", | ||
| 1748 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1749 | 0), | ||
| 1750 | UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000, | ||
| 1751 | "HUAWEI MOBILE", | ||
| 1752 | "Mass Storage", | ||
| 1753 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1754 | 0), | ||
| 1755 | UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000, | ||
| 1756 | "HUAWEI MOBILE", | ||
| 1757 | "Mass Storage", | ||
| 1758 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1759 | 0), | ||
| 1760 | UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000, | ||
| 1761 | "HUAWEI MOBILE", | ||
| 1762 | "Mass Storage", | ||
| 1763 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1764 | 0), | ||
| 1765 | UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000, | ||
| 1766 | "HUAWEI MOBILE", | ||
| 1767 | "Mass Storage", | ||
| 1768 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1769 | 0), | ||
| 1770 | UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000, | ||
| 1771 | "HUAWEI MOBILE", | ||
| 1772 | "Mass Storage", | ||
| 1773 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1774 | 0), | ||
| 1775 | UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000, | ||
| 1776 | "HUAWEI MOBILE", | ||
| 1777 | "Mass Storage", | ||
| 1778 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1779 | 0), | ||
| 1780 | UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000, | ||
| 1781 | "HUAWEI MOBILE", | ||
| 1782 | "Mass Storage", | ||
| 1783 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1784 | 0), | ||
| 1785 | UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000, | ||
| 1786 | "HUAWEI MOBILE", | ||
| 1787 | "Mass Storage", | ||
| 1788 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1789 | 0), | ||
| 1790 | UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000, | ||
| 1791 | "HUAWEI MOBILE", | ||
| 1792 | "Mass Storage", | ||
| 1793 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1794 | 0), | ||
| 1795 | UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000, | ||
| 1796 | "HUAWEI MOBILE", | ||
| 1797 | "Mass Storage", | ||
| 1798 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1799 | 0), | ||
| 1800 | UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000, | ||
| 1801 | "HUAWEI MOBILE", | ||
| 1802 | "Mass Storage", | ||
| 1803 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1804 | 0), | ||
| 1805 | UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000, | ||
| 1806 | "HUAWEI MOBILE", | ||
| 1807 | "Mass Storage", | ||
| 1808 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1809 | 0), | ||
| 1810 | UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000, | ||
| 1811 | "HUAWEI MOBILE", | ||
| 1812 | "Mass Storage", | ||
| 1813 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1814 | 0), | ||
| 1815 | UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000, | ||
| 1816 | "HUAWEI MOBILE", | ||
| 1817 | "Mass Storage", | ||
| 1818 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1819 | 0), | ||
| 1820 | UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000, | ||
| 1821 | "HUAWEI MOBILE", | ||
| 1822 | "Mass Storage", | ||
| 1823 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1824 | 0), | ||
| 1825 | UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000, | ||
| 1826 | "HUAWEI MOBILE", | ||
| 1827 | "Mass Storage", | ||
| 1828 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1829 | 0), | ||
| 1830 | UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000, | ||
| 1831 | "HUAWEI MOBILE", | ||
| 1832 | "Mass Storage", | ||
| 1833 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1834 | 0), | ||
| 1835 | UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000, | ||
| 1836 | "HUAWEI MOBILE", | ||
| 1837 | "Mass Storage", | ||
| 1838 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1839 | 0), | ||
| 1840 | UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000, | ||
| 1841 | "HUAWEI MOBILE", | ||
| 1842 | "Mass Storage", | ||
| 1843 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1844 | 0), | ||
| 1845 | UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000, | ||
| 1846 | "HUAWEI MOBILE", | ||
| 1847 | "Mass Storage", | ||
| 1848 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1849 | 0), | ||
| 1850 | UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000, | ||
| 1851 | "HUAWEI MOBILE", | ||
| 1852 | "Mass Storage", | ||
| 1853 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1854 | 0), | ||
| 1855 | UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000, | ||
| 1856 | "HUAWEI MOBILE", | ||
| 1857 | "Mass Storage", | ||
| 1858 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, | ||
| 1859 | 0), | 1534 | 0), |
| 1860 | 1535 | ||
| 1861 | /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */ | 1536 | /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */ |
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 31b3e1a61bbd..cf09b6ba71ff 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c | |||
| @@ -120,6 +120,17 @@ MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks"); | |||
| 120 | .useTransport = use_transport, \ | 120 | .useTransport = use_transport, \ |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | #define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \ | ||
| 124 | vendor_name, product_name, use_protocol, use_transport, \ | ||
| 125 | init_function, Flags) \ | ||
| 126 | { \ | ||
| 127 | .vendorName = vendor_name, \ | ||
| 128 | .productName = product_name, \ | ||
| 129 | .useProtocol = use_protocol, \ | ||
| 130 | .useTransport = use_transport, \ | ||
| 131 | .initFunction = init_function, \ | ||
| 132 | } | ||
| 133 | |||
| 123 | static struct us_unusual_dev us_unusual_dev_list[] = { | 134 | static struct us_unusual_dev us_unusual_dev_list[] = { |
| 124 | # include "unusual_devs.h" | 135 | # include "unusual_devs.h" |
| 125 | { } /* Terminating entry */ | 136 | { } /* Terminating entry */ |
| @@ -131,6 +142,7 @@ static struct us_unusual_dev for_dynamic_ids = | |||
| 131 | #undef UNUSUAL_DEV | 142 | #undef UNUSUAL_DEV |
| 132 | #undef COMPLIANT_DEV | 143 | #undef COMPLIANT_DEV |
| 133 | #undef USUAL_DEV | 144 | #undef USUAL_DEV |
| 145 | #undef UNUSUAL_VENDOR_INTF | ||
| 134 | 146 | ||
| 135 | #ifdef CONFIG_LOCKDEP | 147 | #ifdef CONFIG_LOCKDEP |
| 136 | 148 | ||
diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c index b78a526910fb..5ef8ce74aae4 100644 --- a/drivers/usb/storage/usual-tables.c +++ b/drivers/usb/storage/usual-tables.c | |||
| @@ -41,6 +41,20 @@ | |||
| 41 | #define USUAL_DEV(useProto, useTrans) \ | 41 | #define USUAL_DEV(useProto, useTrans) \ |
| 42 | { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans) } | 42 | { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans) } |
| 43 | 43 | ||
| 44 | /* Define the device is matched with Vendor ID and interface descriptors */ | ||
| 45 | #define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \ | ||
| 46 | vendorName, productName, useProtocol, useTransport, \ | ||
| 47 | initFunction, flags) \ | ||
| 48 | { \ | ||
| 49 | .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \ | ||
| 50 | | USB_DEVICE_ID_MATCH_VENDOR, \ | ||
| 51 | .idVendor = (id_vendor), \ | ||
| 52 | .bInterfaceClass = (cl), \ | ||
| 53 | .bInterfaceSubClass = (sc), \ | ||
| 54 | .bInterfaceProtocol = (pr), \ | ||
| 55 | .driver_info = (flags) \ | ||
| 56 | } | ||
| 57 | |||
| 44 | struct usb_device_id usb_storage_usb_ids[] = { | 58 | struct usb_device_id usb_storage_usb_ids[] = { |
| 45 | # include "unusual_devs.h" | 59 | # include "unusual_devs.h" |
| 46 | { } /* Terminating entry */ | 60 | { } /* Terminating entry */ |
| @@ -50,6 +64,7 @@ MODULE_DEVICE_TABLE(usb, usb_storage_usb_ids); | |||
| 50 | #undef UNUSUAL_DEV | 64 | #undef UNUSUAL_DEV |
| 51 | #undef COMPLIANT_DEV | 65 | #undef COMPLIANT_DEV |
| 52 | #undef USUAL_DEV | 66 | #undef USUAL_DEV |
| 67 | #undef UNUSUAL_VENDOR_INTF | ||
| 53 | 68 | ||
| 54 | /* | 69 | /* |
| 55 | * The table of devices to ignore | 70 | * The table of devices to ignore |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index ebd08b21b234..959b1cd89e6a 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
| @@ -165,12 +165,16 @@ static void tx_poll_stop(struct vhost_net *net) | |||
| 165 | } | 165 | } |
| 166 | 166 | ||
| 167 | /* Caller must have TX VQ lock */ | 167 | /* Caller must have TX VQ lock */ |
| 168 | static void tx_poll_start(struct vhost_net *net, struct socket *sock) | 168 | static int tx_poll_start(struct vhost_net *net, struct socket *sock) |
| 169 | { | 169 | { |
| 170 | int ret; | ||
| 171 | |||
| 170 | if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED)) | 172 | if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED)) |
| 171 | return; | 173 | return 0; |
| 172 | vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file); | 174 | ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file); |
| 173 | net->tx_poll_state = VHOST_NET_POLL_STARTED; | 175 | if (!ret) |
| 176 | net->tx_poll_state = VHOST_NET_POLL_STARTED; | ||
| 177 | return ret; | ||
| 174 | } | 178 | } |
| 175 | 179 | ||
| 176 | /* In case of DMA done not in order in lower device driver for some reason. | 180 | /* In case of DMA done not in order in lower device driver for some reason. |
| @@ -642,20 +646,23 @@ static void vhost_net_disable_vq(struct vhost_net *n, | |||
| 642 | vhost_poll_stop(n->poll + VHOST_NET_VQ_RX); | 646 | vhost_poll_stop(n->poll + VHOST_NET_VQ_RX); |
| 643 | } | 647 | } |
| 644 | 648 | ||
| 645 | static void vhost_net_enable_vq(struct vhost_net *n, | 649 | static int vhost_net_enable_vq(struct vhost_net *n, |
| 646 | struct vhost_virtqueue *vq) | 650 | struct vhost_virtqueue *vq) |
| 647 | { | 651 | { |
| 648 | struct socket *sock; | 652 | struct socket *sock; |
| 653 | int ret; | ||
| 649 | 654 | ||
| 650 | sock = rcu_dereference_protected(vq->private_data, | 655 | sock = rcu_dereference_protected(vq->private_data, |
| 651 | lockdep_is_held(&vq->mutex)); | 656 | lockdep_is_held(&vq->mutex)); |
| 652 | if (!sock) | 657 | if (!sock) |
| 653 | return; | 658 | return 0; |
| 654 | if (vq == n->vqs + VHOST_NET_VQ_TX) { | 659 | if (vq == n->vqs + VHOST_NET_VQ_TX) { |
| 655 | n->tx_poll_state = VHOST_NET_POLL_STOPPED; | 660 | n->tx_poll_state = VHOST_NET_POLL_STOPPED; |
| 656 | tx_poll_start(n, sock); | 661 | ret = tx_poll_start(n, sock); |
| 657 | } else | 662 | } else |
| 658 | vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file); | 663 | ret = vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file); |
| 664 | |||
| 665 | return ret; | ||
| 659 | } | 666 | } |
| 660 | 667 | ||
| 661 | static struct socket *vhost_net_stop_vq(struct vhost_net *n, | 668 | static struct socket *vhost_net_stop_vq(struct vhost_net *n, |
| @@ -827,15 +834,18 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |||
| 827 | r = PTR_ERR(ubufs); | 834 | r = PTR_ERR(ubufs); |
| 828 | goto err_ubufs; | 835 | goto err_ubufs; |
| 829 | } | 836 | } |
| 830 | oldubufs = vq->ubufs; | 837 | |
| 831 | vq->ubufs = ubufs; | ||
| 832 | vhost_net_disable_vq(n, vq); | 838 | vhost_net_disable_vq(n, vq); |
| 833 | rcu_assign_pointer(vq->private_data, sock); | 839 | rcu_assign_pointer(vq->private_data, sock); |
| 834 | vhost_net_enable_vq(n, vq); | ||
| 835 | |||
| 836 | r = vhost_init_used(vq); | 840 | r = vhost_init_used(vq); |
| 837 | if (r) | 841 | if (r) |
| 838 | goto err_vq; | 842 | goto err_used; |
| 843 | r = vhost_net_enable_vq(n, vq); | ||
| 844 | if (r) | ||
| 845 | goto err_used; | ||
| 846 | |||
| 847 | oldubufs = vq->ubufs; | ||
| 848 | vq->ubufs = ubufs; | ||
| 839 | 849 | ||
| 840 | n->tx_packets = 0; | 850 | n->tx_packets = 0; |
| 841 | n->tx_zcopy_err = 0; | 851 | n->tx_zcopy_err = 0; |
| @@ -859,6 +869,11 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |||
| 859 | mutex_unlock(&n->dev.mutex); | 869 | mutex_unlock(&n->dev.mutex); |
| 860 | return 0; | 870 | return 0; |
| 861 | 871 | ||
| 872 | err_used: | ||
| 873 | rcu_assign_pointer(vq->private_data, oldsock); | ||
| 874 | vhost_net_enable_vq(n, vq); | ||
| 875 | if (ubufs) | ||
| 876 | vhost_ubuf_put_and_wait(ubufs); | ||
| 862 | err_ubufs: | 877 | err_ubufs: |
| 863 | fput(sock->file); | 878 | fput(sock->file); |
| 864 | err_vq: | 879 | err_vq: |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 34389f75fe65..9759249e6d90 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
| @@ -77,26 +77,38 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, | |||
| 77 | init_poll_funcptr(&poll->table, vhost_poll_func); | 77 | init_poll_funcptr(&poll->table, vhost_poll_func); |
| 78 | poll->mask = mask; | 78 | poll->mask = mask; |
| 79 | poll->dev = dev; | 79 | poll->dev = dev; |
| 80 | poll->wqh = NULL; | ||
| 80 | 81 | ||
| 81 | vhost_work_init(&poll->work, fn); | 82 | vhost_work_init(&poll->work, fn); |
| 82 | } | 83 | } |
| 83 | 84 | ||
| 84 | /* Start polling a file. We add ourselves to file's wait queue. The caller must | 85 | /* Start polling a file. We add ourselves to file's wait queue. The caller must |
| 85 | * keep a reference to a file until after vhost_poll_stop is called. */ | 86 | * keep a reference to a file until after vhost_poll_stop is called. */ |
| 86 | void vhost_poll_start(struct vhost_poll *poll, struct file *file) | 87 | int vhost_poll_start(struct vhost_poll *poll, struct file *file) |
| 87 | { | 88 | { |
| 88 | unsigned long mask; | 89 | unsigned long mask; |
| 90 | int ret = 0; | ||
| 89 | 91 | ||
| 90 | mask = file->f_op->poll(file, &poll->table); | 92 | mask = file->f_op->poll(file, &poll->table); |
| 91 | if (mask) | 93 | if (mask) |
| 92 | vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); | 94 | vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); |
| 95 | if (mask & POLLERR) { | ||
| 96 | if (poll->wqh) | ||
| 97 | remove_wait_queue(poll->wqh, &poll->wait); | ||
| 98 | ret = -EINVAL; | ||
| 99 | } | ||
| 100 | |||
| 101 | return ret; | ||
| 93 | } | 102 | } |
| 94 | 103 | ||
| 95 | /* Stop polling a file. After this function returns, it becomes safe to drop the | 104 | /* Stop polling a file. After this function returns, it becomes safe to drop the |
| 96 | * file reference. You must also flush afterwards. */ | 105 | * file reference. You must also flush afterwards. */ |
| 97 | void vhost_poll_stop(struct vhost_poll *poll) | 106 | void vhost_poll_stop(struct vhost_poll *poll) |
| 98 | { | 107 | { |
| 99 | remove_wait_queue(poll->wqh, &poll->wait); | 108 | if (poll->wqh) { |
| 109 | remove_wait_queue(poll->wqh, &poll->wait); | ||
| 110 | poll->wqh = NULL; | ||
| 111 | } | ||
| 100 | } | 112 | } |
| 101 | 113 | ||
| 102 | static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, | 114 | static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, |
| @@ -792,7 +804,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp) | |||
| 792 | fput(filep); | 804 | fput(filep); |
| 793 | 805 | ||
| 794 | if (pollstart && vq->handle_kick) | 806 | if (pollstart && vq->handle_kick) |
| 795 | vhost_poll_start(&vq->poll, vq->kick); | 807 | r = vhost_poll_start(&vq->poll, vq->kick); |
| 796 | 808 | ||
| 797 | mutex_unlock(&vq->mutex); | 809 | mutex_unlock(&vq->mutex); |
| 798 | 810 | ||
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 2639c58b23ab..17261e277c02 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h | |||
| @@ -42,7 +42,7 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work); | |||
| 42 | 42 | ||
| 43 | void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, | 43 | void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, |
| 44 | unsigned long mask, struct vhost_dev *dev); | 44 | unsigned long mask, struct vhost_dev *dev); |
| 45 | void vhost_poll_start(struct vhost_poll *poll, struct file *file); | 45 | int vhost_poll_start(struct vhost_poll *poll, struct file *file); |
| 46 | void vhost_poll_stop(struct vhost_poll *poll); | 46 | void vhost_poll_stop(struct vhost_poll *poll); |
| 47 | void vhost_poll_flush(struct vhost_poll *poll); | 47 | void vhost_poll_flush(struct vhost_poll *poll); |
| 48 | void vhost_poll_queue(struct vhost_poll *poll); | 48 | void vhost_poll_queue(struct vhost_poll *poll); |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 0be4df39e953..74d77dfa5f63 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
| @@ -840,7 +840,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
| 840 | 840 | ||
| 841 | if (irq == -1) { | 841 | if (irq == -1) { |
| 842 | irq = xen_allocate_irq_dynamic(); | 842 | irq = xen_allocate_irq_dynamic(); |
| 843 | if (irq == -1) | 843 | if (irq < 0) |
| 844 | goto out; | 844 | goto out; |
| 845 | 845 | ||
| 846 | irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, | 846 | irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, |
| @@ -944,7 +944,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | |||
| 944 | 944 | ||
| 945 | if (irq == -1) { | 945 | if (irq == -1) { |
| 946 | irq = xen_allocate_irq_dynamic(); | 946 | irq = xen_allocate_irq_dynamic(); |
| 947 | if (irq == -1) | 947 | if (irq < 0) |
| 948 | goto out; | 948 | goto out; |
| 949 | 949 | ||
| 950 | irq_set_chip_and_handler_name(irq, &xen_percpu_chip, | 950 | irq_set_chip_and_handler_name(irq, &xen_percpu_chip, |
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c index 97f5d264c31e..37c1f825f513 100644 --- a/drivers/xen/xen-pciback/pciback_ops.c +++ b/drivers/xen/xen-pciback/pciback_ops.c | |||
| @@ -135,7 +135,6 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev, | |||
| 135 | struct pci_dev *dev, struct xen_pci_op *op) | 135 | struct pci_dev *dev, struct xen_pci_op *op) |
| 136 | { | 136 | { |
| 137 | struct xen_pcibk_dev_data *dev_data; | 137 | struct xen_pcibk_dev_data *dev_data; |
| 138 | int otherend = pdev->xdev->otherend_id; | ||
| 139 | int status; | 138 | int status; |
| 140 | 139 | ||
| 141 | if (unlikely(verbose_request)) | 140 | if (unlikely(verbose_request)) |
| @@ -144,8 +143,9 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev, | |||
| 144 | status = pci_enable_msi(dev); | 143 | status = pci_enable_msi(dev); |
| 145 | 144 | ||
| 146 | if (status) { | 145 | if (status) { |
| 147 | printk(KERN_ERR "error enable msi for guest %x status %x\n", | 146 | pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI for guest %u: err %d\n", |
| 148 | otherend, status); | 147 | pci_name(dev), pdev->xdev->otherend_id, |
| 148 | status); | ||
| 149 | op->value = 0; | 149 | op->value = 0; |
| 150 | return XEN_PCI_ERR_op_failed; | 150 | return XEN_PCI_ERR_op_failed; |
| 151 | } | 151 | } |
| @@ -223,10 +223,10 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev, | |||
| 223 | pci_name(dev), i, | 223 | pci_name(dev), i, |
| 224 | op->msix_entries[i].vector); | 224 | op->msix_entries[i].vector); |
| 225 | } | 225 | } |
| 226 | } else { | 226 | } else |
| 227 | printk(KERN_WARNING DRV_NAME ": %s: failed to enable MSI-X: err %d!\n", | 227 | pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI-X for guest %u: err %d!\n", |
| 228 | pci_name(dev), result); | 228 | pci_name(dev), pdev->xdev->otherend_id, |
| 229 | } | 229 | result); |
| 230 | kfree(entries); | 230 | kfree(entries); |
| 231 | 231 | ||
| 232 | op->value = result; | 232 | op->value = result; |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a8b8adc05070..5a3327b8f90d 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
| @@ -4534,7 +4534,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) | |||
| 4534 | unsigned nr_extents = 0; | 4534 | unsigned nr_extents = 0; |
| 4535 | int extra_reserve = 0; | 4535 | int extra_reserve = 0; |
| 4536 | enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL; | 4536 | enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL; |
| 4537 | int ret; | 4537 | int ret = 0; |
| 4538 | bool delalloc_lock = true; | 4538 | bool delalloc_lock = true; |
| 4539 | 4539 | ||
| 4540 | /* If we are a free space inode we need to not flush since we will be in | 4540 | /* If we are a free space inode we need to not flush since we will be in |
| @@ -4579,20 +4579,18 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) | |||
| 4579 | csum_bytes = BTRFS_I(inode)->csum_bytes; | 4579 | csum_bytes = BTRFS_I(inode)->csum_bytes; |
| 4580 | spin_unlock(&BTRFS_I(inode)->lock); | 4580 | spin_unlock(&BTRFS_I(inode)->lock); |
| 4581 | 4581 | ||
| 4582 | if (root->fs_info->quota_enabled) { | 4582 | if (root->fs_info->quota_enabled) |
| 4583 | ret = btrfs_qgroup_reserve(root, num_bytes + | 4583 | ret = btrfs_qgroup_reserve(root, num_bytes + |
| 4584 | nr_extents * root->leafsize); | 4584 | nr_extents * root->leafsize); |
| 4585 | if (ret) { | ||
| 4586 | spin_lock(&BTRFS_I(inode)->lock); | ||
| 4587 | calc_csum_metadata_size(inode, num_bytes, 0); | ||
| 4588 | spin_unlock(&BTRFS_I(inode)->lock); | ||
| 4589 | if (delalloc_lock) | ||
| 4590 | mutex_unlock(&BTRFS_I(inode)->delalloc_mutex); | ||
| 4591 | return ret; | ||
| 4592 | } | ||
| 4593 | } | ||
| 4594 | 4585 | ||
| 4595 | ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush); | 4586 | /* |
| 4587 | * ret != 0 here means the qgroup reservation failed, we go straight to | ||
| 4588 | * the shared error handling then. | ||
| 4589 | */ | ||
| 4590 | if (ret == 0) | ||
| 4591 | ret = reserve_metadata_bytes(root, block_rsv, | ||
| 4592 | to_reserve, flush); | ||
| 4593 | |||
| 4596 | if (ret) { | 4594 | if (ret) { |
| 4597 | u64 to_free = 0; | 4595 | u64 to_free = 0; |
| 4598 | unsigned dropped; | 4596 | unsigned dropped; |
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 2e8cae63d247..fdb7a8db3b57 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c | |||
| @@ -288,7 +288,8 @@ out: | |||
| 288 | void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em) | 288 | void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em) |
| 289 | { | 289 | { |
| 290 | clear_bit(EXTENT_FLAG_LOGGING, &em->flags); | 290 | clear_bit(EXTENT_FLAG_LOGGING, &em->flags); |
| 291 | try_merge_map(tree, em); | 291 | if (em->in_tree) |
| 292 | try_merge_map(tree, em); | ||
| 292 | } | 293 | } |
| 293 | 294 | ||
| 294 | /** | 295 | /** |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index f76b1fd160d4..aeb84469d2c4 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
| @@ -293,15 +293,24 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, | |||
| 293 | struct btrfs_key key; | 293 | struct btrfs_key key; |
| 294 | struct btrfs_ioctl_defrag_range_args range; | 294 | struct btrfs_ioctl_defrag_range_args range; |
| 295 | int num_defrag; | 295 | int num_defrag; |
| 296 | int index; | ||
| 297 | int ret; | ||
| 296 | 298 | ||
| 297 | /* get the inode */ | 299 | /* get the inode */ |
| 298 | key.objectid = defrag->root; | 300 | key.objectid = defrag->root; |
| 299 | btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); | 301 | btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); |
| 300 | key.offset = (u64)-1; | 302 | key.offset = (u64)-1; |
| 303 | |||
| 304 | index = srcu_read_lock(&fs_info->subvol_srcu); | ||
| 305 | |||
| 301 | inode_root = btrfs_read_fs_root_no_name(fs_info, &key); | 306 | inode_root = btrfs_read_fs_root_no_name(fs_info, &key); |
| 302 | if (IS_ERR(inode_root)) { | 307 | if (IS_ERR(inode_root)) { |
| 303 | kmem_cache_free(btrfs_inode_defrag_cachep, defrag); | 308 | ret = PTR_ERR(inode_root); |
| 304 | return PTR_ERR(inode_root); | 309 | goto cleanup; |
| 310 | } | ||
| 311 | if (btrfs_root_refs(&inode_root->root_item) == 0) { | ||
| 312 | ret = -ENOENT; | ||
| 313 | goto cleanup; | ||
| 305 | } | 314 | } |
| 306 | 315 | ||
| 307 | key.objectid = defrag->ino; | 316 | key.objectid = defrag->ino; |
| @@ -309,9 +318,10 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, | |||
| 309 | key.offset = 0; | 318 | key.offset = 0; |
| 310 | inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL); | 319 | inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL); |
| 311 | if (IS_ERR(inode)) { | 320 | if (IS_ERR(inode)) { |
| 312 | kmem_cache_free(btrfs_inode_defrag_cachep, defrag); | 321 | ret = PTR_ERR(inode); |
| 313 | return PTR_ERR(inode); | 322 | goto cleanup; |
| 314 | } | 323 | } |
| 324 | srcu_read_unlock(&fs_info->subvol_srcu, index); | ||
| 315 | 325 | ||
| 316 | /* do a chunk of defrag */ | 326 | /* do a chunk of defrag */ |
| 317 | clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); | 327 | clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); |
| @@ -346,6 +356,10 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, | |||
| 346 | 356 | ||
| 347 | iput(inode); | 357 | iput(inode); |
| 348 | return 0; | 358 | return 0; |
| 359 | cleanup: | ||
| 360 | srcu_read_unlock(&fs_info->subvol_srcu, index); | ||
| 361 | kmem_cache_free(btrfs_inode_defrag_cachep, defrag); | ||
| 362 | return ret; | ||
| 349 | } | 363 | } |
| 350 | 364 | ||
| 351 | /* | 365 | /* |
| @@ -1594,9 +1608,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, | |||
| 1594 | if (err < 0 && num_written > 0) | 1608 | if (err < 0 && num_written > 0) |
| 1595 | num_written = err; | 1609 | num_written = err; |
| 1596 | } | 1610 | } |
| 1597 | out: | 1611 | |
| 1598 | if (sync) | 1612 | if (sync) |
| 1599 | atomic_dec(&BTRFS_I(inode)->sync_writers); | 1613 | atomic_dec(&BTRFS_I(inode)->sync_writers); |
| 1614 | out: | ||
| 1600 | sb_end_write(inode->i_sb); | 1615 | sb_end_write(inode->i_sb); |
| 1601 | current->backing_dev_info = NULL; | 1616 | current->backing_dev_info = NULL; |
| 1602 | return num_written ? num_written : err; | 1617 | return num_written ? num_written : err; |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 5b22d45d3c6a..338f2597bf7f 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
| @@ -515,7 +515,6 @@ static noinline int create_subvol(struct btrfs_root *root, | |||
| 515 | 515 | ||
| 516 | BUG_ON(ret); | 516 | BUG_ON(ret); |
| 517 | 517 | ||
| 518 | d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry)); | ||
| 519 | fail: | 518 | fail: |
| 520 | if (async_transid) { | 519 | if (async_transid) { |
| 521 | *async_transid = trans->transid; | 520 | *async_transid = trans->transid; |
| @@ -525,6 +524,10 @@ fail: | |||
| 525 | } | 524 | } |
| 526 | if (err && !ret) | 525 | if (err && !ret) |
| 527 | ret = err; | 526 | ret = err; |
| 527 | |||
| 528 | if (!ret) | ||
| 529 | d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry)); | ||
| 530 | |||
| 528 | return ret; | 531 | return ret; |
| 529 | } | 532 | } |
| 530 | 533 | ||
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index f10731297040..e5ed56729607 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
| @@ -836,9 +836,16 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, | |||
| 836 | * if the disk i_size is already at the inode->i_size, or | 836 | * if the disk i_size is already at the inode->i_size, or |
| 837 | * this ordered extent is inside the disk i_size, we're done | 837 | * this ordered extent is inside the disk i_size, we're done |
| 838 | */ | 838 | */ |
| 839 | if (disk_i_size == i_size || offset <= disk_i_size) { | 839 | if (disk_i_size == i_size) |
| 840 | goto out; | ||
| 841 | |||
| 842 | /* | ||
| 843 | * We still need to update disk_i_size if outstanding_isize is greater | ||
| 844 | * than disk_i_size. | ||
| 845 | */ | ||
| 846 | if (offset <= disk_i_size && | ||
| 847 | (!ordered || ordered->outstanding_isize <= disk_i_size)) | ||
| 840 | goto out; | 848 | goto out; |
| 841 | } | ||
| 842 | 849 | ||
| 843 | /* | 850 | /* |
| 844 | * walk backward from this ordered extent to disk_i_size. | 851 | * walk backward from this ordered extent to disk_i_size. |
| @@ -870,7 +877,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, | |||
| 870 | break; | 877 | break; |
| 871 | if (test->file_offset >= i_size) | 878 | if (test->file_offset >= i_size) |
| 872 | break; | 879 | break; |
| 873 | if (test->file_offset >= disk_i_size) { | 880 | if (entry_end(test) > disk_i_size) { |
| 874 | /* | 881 | /* |
| 875 | * we don't update disk_i_size now, so record this | 882 | * we don't update disk_i_size now, so record this |
| 876 | * undealt i_size. Or we will not know the real | 883 | * undealt i_size. Or we will not know the real |
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index bdbb94f245c9..67783e03d121 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
| @@ -580,20 +580,29 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx) | |||
| 580 | int corrected = 0; | 580 | int corrected = 0; |
| 581 | struct btrfs_key key; | 581 | struct btrfs_key key; |
| 582 | struct inode *inode = NULL; | 582 | struct inode *inode = NULL; |
| 583 | struct btrfs_fs_info *fs_info; | ||
| 583 | u64 end = offset + PAGE_SIZE - 1; | 584 | u64 end = offset + PAGE_SIZE - 1; |
| 584 | struct btrfs_root *local_root; | 585 | struct btrfs_root *local_root; |
| 586 | int srcu_index; | ||
| 585 | 587 | ||
| 586 | key.objectid = root; | 588 | key.objectid = root; |
| 587 | key.type = BTRFS_ROOT_ITEM_KEY; | 589 | key.type = BTRFS_ROOT_ITEM_KEY; |
| 588 | key.offset = (u64)-1; | 590 | key.offset = (u64)-1; |
| 589 | local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key); | 591 | |
| 590 | if (IS_ERR(local_root)) | 592 | fs_info = fixup->root->fs_info; |
| 593 | srcu_index = srcu_read_lock(&fs_info->subvol_srcu); | ||
| 594 | |||
| 595 | local_root = btrfs_read_fs_root_no_name(fs_info, &key); | ||
| 596 | if (IS_ERR(local_root)) { | ||
| 597 | srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); | ||
| 591 | return PTR_ERR(local_root); | 598 | return PTR_ERR(local_root); |
| 599 | } | ||
| 592 | 600 | ||
| 593 | key.type = BTRFS_INODE_ITEM_KEY; | 601 | key.type = BTRFS_INODE_ITEM_KEY; |
| 594 | key.objectid = inum; | 602 | key.objectid = inum; |
| 595 | key.offset = 0; | 603 | key.offset = 0; |
| 596 | inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL); | 604 | inode = btrfs_iget(fs_info->sb, &key, local_root, NULL); |
| 605 | srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); | ||
| 597 | if (IS_ERR(inode)) | 606 | if (IS_ERR(inode)) |
| 598 | return PTR_ERR(inode); | 607 | return PTR_ERR(inode); |
| 599 | 608 | ||
| @@ -606,7 +615,6 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx) | |||
| 606 | } | 615 | } |
| 607 | 616 | ||
| 608 | if (PageUptodate(page)) { | 617 | if (PageUptodate(page)) { |
| 609 | struct btrfs_fs_info *fs_info; | ||
| 610 | if (PageDirty(page)) { | 618 | if (PageDirty(page)) { |
| 611 | /* | 619 | /* |
| 612 | * we need to write the data to the defect sector. the | 620 | * we need to write the data to the defect sector. the |
| @@ -3180,18 +3188,25 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx) | |||
| 3180 | u64 physical_for_dev_replace; | 3188 | u64 physical_for_dev_replace; |
| 3181 | u64 len; | 3189 | u64 len; |
| 3182 | struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info; | 3190 | struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info; |
| 3191 | int srcu_index; | ||
| 3183 | 3192 | ||
| 3184 | key.objectid = root; | 3193 | key.objectid = root; |
| 3185 | key.type = BTRFS_ROOT_ITEM_KEY; | 3194 | key.type = BTRFS_ROOT_ITEM_KEY; |
| 3186 | key.offset = (u64)-1; | 3195 | key.offset = (u64)-1; |
| 3196 | |||
| 3197 | srcu_index = srcu_read_lock(&fs_info->subvol_srcu); | ||
| 3198 | |||
| 3187 | local_root = btrfs_read_fs_root_no_name(fs_info, &key); | 3199 | local_root = btrfs_read_fs_root_no_name(fs_info, &key); |
| 3188 | if (IS_ERR(local_root)) | 3200 | if (IS_ERR(local_root)) { |
| 3201 | srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); | ||
| 3189 | return PTR_ERR(local_root); | 3202 | return PTR_ERR(local_root); |
| 3203 | } | ||
| 3190 | 3204 | ||
| 3191 | key.type = BTRFS_INODE_ITEM_KEY; | 3205 | key.type = BTRFS_INODE_ITEM_KEY; |
| 3192 | key.objectid = inum; | 3206 | key.objectid = inum; |
| 3193 | key.offset = 0; | 3207 | key.offset = 0; |
| 3194 | inode = btrfs_iget(fs_info->sb, &key, local_root, NULL); | 3208 | inode = btrfs_iget(fs_info->sb, &key, local_root, NULL); |
| 3209 | srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); | ||
| 3195 | if (IS_ERR(inode)) | 3210 | if (IS_ERR(inode)) |
| 3196 | return PTR_ERR(inode); | 3211 | return PTR_ERR(inode); |
| 3197 | 3212 | ||
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index f15494699f3b..fc03aa60b684 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
| @@ -333,12 +333,14 @@ start_transaction(struct btrfs_root *root, u64 num_items, int type, | |||
| 333 | &root->fs_info->trans_block_rsv, | 333 | &root->fs_info->trans_block_rsv, |
| 334 | num_bytes, flush); | 334 | num_bytes, flush); |
| 335 | if (ret) | 335 | if (ret) |
| 336 | return ERR_PTR(ret); | 336 | goto reserve_fail; |
| 337 | } | 337 | } |
| 338 | again: | 338 | again: |
| 339 | h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); | 339 | h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); |
| 340 | if (!h) | 340 | if (!h) { |
| 341 | return ERR_PTR(-ENOMEM); | 341 | ret = -ENOMEM; |
| 342 | goto alloc_fail; | ||
| 343 | } | ||
| 342 | 344 | ||
| 343 | /* | 345 | /* |
| 344 | * If we are JOIN_NOLOCK we're already committing a transaction and | 346 | * If we are JOIN_NOLOCK we're already committing a transaction and |
| @@ -365,11 +367,7 @@ again: | |||
| 365 | if (ret < 0) { | 367 | if (ret < 0) { |
| 366 | /* We must get the transaction if we are JOIN_NOLOCK. */ | 368 | /* We must get the transaction if we are JOIN_NOLOCK. */ |
| 367 | BUG_ON(type == TRANS_JOIN_NOLOCK); | 369 | BUG_ON(type == TRANS_JOIN_NOLOCK); |
| 368 | 370 | goto join_fail; | |
| 369 | if (type < TRANS_JOIN_NOLOCK) | ||
| 370 | sb_end_intwrite(root->fs_info->sb); | ||
| 371 | kmem_cache_free(btrfs_trans_handle_cachep, h); | ||
| 372 | return ERR_PTR(ret); | ||
| 373 | } | 371 | } |
| 374 | 372 | ||
| 375 | cur_trans = root->fs_info->running_transaction; | 373 | cur_trans = root->fs_info->running_transaction; |
| @@ -410,6 +408,19 @@ got_it: | |||
| 410 | if (!current->journal_info && type != TRANS_USERSPACE) | 408 | if (!current->journal_info && type != TRANS_USERSPACE) |
| 411 | current->journal_info = h; | 409 | current->journal_info = h; |
| 412 | return h; | 410 | return h; |
| 411 | |||
| 412 | join_fail: | ||
| 413 | if (type < TRANS_JOIN_NOLOCK) | ||
| 414 | sb_end_intwrite(root->fs_info->sb); | ||
| 415 | kmem_cache_free(btrfs_trans_handle_cachep, h); | ||
| 416 | alloc_fail: | ||
| 417 | if (num_bytes) | ||
| 418 | btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv, | ||
| 419 | num_bytes); | ||
| 420 | reserve_fail: | ||
| 421 | if (qgroup_reserved) | ||
| 422 | btrfs_qgroup_free(root, qgroup_reserved); | ||
| 423 | return ERR_PTR(ret); | ||
| 413 | } | 424 | } |
| 414 | 425 | ||
| 415 | struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, | 426 | struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 15f6efdf6463..5cbb7f4b1672 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
| @@ -1556,7 +1556,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) | |||
| 1556 | ret = 0; | 1556 | ret = 0; |
| 1557 | 1557 | ||
| 1558 | /* Notify udev that device has changed */ | 1558 | /* Notify udev that device has changed */ |
| 1559 | btrfs_kobject_uevent(bdev, KOBJ_CHANGE); | 1559 | if (bdev) |
| 1560 | btrfs_kobject_uevent(bdev, KOBJ_CHANGE); | ||
| 1560 | 1561 | ||
| 1561 | error_brelse: | 1562 | error_brelse: |
| 1562 | brelse(bh); | 1563 | brelse(bh); |
diff --git a/include/linux/llist.h b/include/linux/llist.h index a5199f6d0e82..d0ab98f73d38 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h | |||
| @@ -125,6 +125,31 @@ static inline void init_llist_head(struct llist_head *list) | |||
| 125 | (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member)) | 125 | (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member)) |
| 126 | 126 | ||
| 127 | /** | 127 | /** |
| 128 | * llist_for_each_entry_safe - iterate safely against remove over some entries | ||
| 129 | * of lock-less list of given type. | ||
| 130 | * @pos: the type * to use as a loop cursor. | ||
| 131 | * @n: another type * to use as a temporary storage. | ||
| 132 | * @node: the fist entry of deleted list entries. | ||
| 133 | * @member: the name of the llist_node with the struct. | ||
| 134 | * | ||
| 135 | * In general, some entries of the lock-less list can be traversed | ||
| 136 | * safely only after being removed from list, so start with an entry | ||
| 137 | * instead of list head. This variant allows removal of entries | ||
| 138 | * as we iterate. | ||
| 139 | * | ||
| 140 | * If being used on entries deleted from lock-less list directly, the | ||
| 141 | * traverse order is from the newest to the oldest added entry. If | ||
| 142 | * you want to traverse from the oldest to the newest, you must | ||
| 143 | * reverse the order by yourself before traversing. | ||
| 144 | */ | ||
| 145 | #define llist_for_each_entry_safe(pos, n, node, member) \ | ||
| 146 | for ((pos) = llist_entry((node), typeof(*(pos)), member), \ | ||
| 147 | (n) = (pos)->member.next; \ | ||
| 148 | &(pos)->member != NULL; \ | ||
| 149 | (pos) = llist_entry(n, typeof(*(pos)), member), \ | ||
| 150 | (n) = (&(pos)->member != NULL) ? (pos)->member.next : NULL) | ||
| 151 | |||
| 152 | /** | ||
| 128 | * llist_empty - tests whether a lock-less list is empty | 153 | * llist_empty - tests whether a lock-less list is empty |
| 129 | * @head: the list to test | 154 | * @head: the list to test |
| 130 | * | 155 | * |
diff --git a/include/linux/usb.h b/include/linux/usb.h index 689b14b26c8d..4d22d0f6167a 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h | |||
| @@ -357,6 +357,8 @@ struct usb_bus { | |||
| 357 | int bandwidth_int_reqs; /* number of Interrupt requests */ | 357 | int bandwidth_int_reqs; /* number of Interrupt requests */ |
| 358 | int bandwidth_isoc_reqs; /* number of Isoc. requests */ | 358 | int bandwidth_isoc_reqs; /* number of Isoc. requests */ |
| 359 | 359 | ||
| 360 | unsigned resuming_ports; /* bit array: resuming root-hub ports */ | ||
| 361 | |||
| 360 | #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) | 362 | #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) |
| 361 | struct mon_bus *mon_bus; /* non-null when associated */ | 363 | struct mon_bus *mon_bus; /* non-null when associated */ |
| 362 | int monitored; /* non-zero when monitored */ | 364 | int monitored; /* non-zero when monitored */ |
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 608050b2545f..0a78df5f6cfd 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h | |||
| @@ -430,6 +430,9 @@ extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd); | |||
| 430 | extern void usb_wakeup_notification(struct usb_device *hdev, | 430 | extern void usb_wakeup_notification(struct usb_device *hdev, |
| 431 | unsigned int portnum); | 431 | unsigned int portnum); |
| 432 | 432 | ||
| 433 | extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum); | ||
| 434 | extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum); | ||
| 435 | |||
| 433 | /* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */ | 436 | /* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */ |
| 434 | #define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1) | 437 | #define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1) |
| 435 | #define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep))) | 438 | #define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep))) |
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 5de7a220e986..0e5ac93bab10 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h | |||
| @@ -33,6 +33,7 @@ struct usbnet { | |||
| 33 | wait_queue_head_t *wait; | 33 | wait_queue_head_t *wait; |
| 34 | struct mutex phy_mutex; | 34 | struct mutex phy_mutex; |
| 35 | unsigned char suspend_count; | 35 | unsigned char suspend_count; |
| 36 | unsigned char pkt_cnt, pkt_err; | ||
| 36 | 37 | ||
| 37 | /* i/o info: pipes etc */ | 38 | /* i/o info: pipes etc */ |
| 38 | unsigned in, out; | 39 | unsigned in, out; |
| @@ -70,6 +71,7 @@ struct usbnet { | |||
| 70 | # define EVENT_DEV_OPEN 7 | 71 | # define EVENT_DEV_OPEN 7 |
| 71 | # define EVENT_DEVICE_REPORT_IDLE 8 | 72 | # define EVENT_DEVICE_REPORT_IDLE 8 |
| 72 | # define EVENT_NO_RUNTIME_PM 9 | 73 | # define EVENT_NO_RUNTIME_PM 9 |
| 74 | # define EVENT_RX_KILL 10 | ||
| 73 | }; | 75 | }; |
| 74 | 76 | ||
| 75 | static inline struct usb_driver *driver_of(struct usb_interface *intf) | 77 | static inline struct usb_driver *driver_of(struct usb_interface *intf) |
| @@ -100,7 +102,6 @@ struct driver_info { | |||
| 100 | #define FLAG_LINK_INTR 0x0800 /* updates link (carrier) status */ | 102 | #define FLAG_LINK_INTR 0x0800 /* updates link (carrier) status */ |
| 101 | 103 | ||
| 102 | #define FLAG_POINTTOPOINT 0x1000 /* possibly use "usb%d" names */ | 104 | #define FLAG_POINTTOPOINT 0x1000 /* possibly use "usb%d" names */ |
| 103 | #define FLAG_NOARP 0x2000 /* device can't do ARP */ | ||
| 104 | 105 | ||
| 105 | /* | 106 | /* |
| 106 | * Indicates to usbnet, that USB driver accumulates multiple IP packets. | 107 | * Indicates to usbnet, that USB driver accumulates multiple IP packets. |
| @@ -108,6 +109,7 @@ struct driver_info { | |||
| 108 | */ | 109 | */ |
| 109 | #define FLAG_MULTI_PACKET 0x2000 | 110 | #define FLAG_MULTI_PACKET 0x2000 |
| 110 | #define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */ | 111 | #define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */ |
| 112 | #define FLAG_NOARP 0x8000 /* device can't do ARP */ | ||
| 111 | 113 | ||
| 112 | /* init device ... can sleep, or cause probe() failure */ | 114 | /* init device ... can sleep, or cause probe() failure */ |
| 113 | int (*bind)(struct usbnet *, struct usb_interface *); | 115 | int (*bind)(struct usbnet *, struct usb_interface *); |
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index 498433dd067d..938b7fd11204 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h | |||
| @@ -34,17 +34,17 @@ extern int udpv6_connect(struct sock *sk, | |||
| 34 | struct sockaddr *uaddr, | 34 | struct sockaddr *uaddr, |
| 35 | int addr_len); | 35 | int addr_len); |
| 36 | 36 | ||
| 37 | extern int datagram_recv_ctl(struct sock *sk, | 37 | extern int ip6_datagram_recv_ctl(struct sock *sk, |
| 38 | struct msghdr *msg, | 38 | struct msghdr *msg, |
| 39 | struct sk_buff *skb); | 39 | struct sk_buff *skb); |
| 40 | 40 | ||
| 41 | extern int datagram_send_ctl(struct net *net, | 41 | extern int ip6_datagram_send_ctl(struct net *net, |
| 42 | struct sock *sk, | 42 | struct sock *sk, |
| 43 | struct msghdr *msg, | 43 | struct msghdr *msg, |
| 44 | struct flowi6 *fl6, | 44 | struct flowi6 *fl6, |
| 45 | struct ipv6_txoptions *opt, | 45 | struct ipv6_txoptions *opt, |
| 46 | int *hlimit, int *tclass, | 46 | int *hlimit, int *tclass, |
| 47 | int *dontfrag); | 47 | int *dontfrag); |
| 48 | 48 | ||
| 49 | #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) | 49 | #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) |
| 50 | 50 | ||
diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h index 77cdba9df274..bb991dfe134f 100644 --- a/include/uapi/linux/auto_fs.h +++ b/include/uapi/linux/auto_fs.h | |||
| @@ -28,25 +28,16 @@ | |||
| 28 | #define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION | 28 | #define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION |
| 29 | 29 | ||
| 30 | /* | 30 | /* |
| 31 | * Architectures where both 32- and 64-bit binaries can be executed | 31 | * The wait_queue_token (autofs_wqt_t) is part of a structure which is passed |
| 32 | * on 64-bit kernels need this. This keeps the structure format | 32 | * back to the kernel via ioctl from userspace. On architectures where 32- and |
| 33 | * uniform, and makes sure the wait_queue_token isn't too big to be | 33 | * 64-bit userspace binaries can be executed it's important that the size of |
| 34 | * passed back down to the kernel. | 34 | * autofs_wqt_t stays constant between 32- and 64-bit Linux kernels so that we |
| 35 | * | 35 | * do not break the binary ABI interface by changing the structure size. |
| 36 | * This assumes that on these architectures: | ||
| 37 | * mode 32 bit 64 bit | ||
| 38 | * ------------------------- | ||
| 39 | * int 32 bit 32 bit | ||
| 40 | * long 32 bit 64 bit | ||
| 41 | * | ||
| 42 | * If so, 32-bit user-space code should be backwards compatible. | ||
| 43 | */ | 36 | */ |
| 44 | 37 | #if defined(__ia64__) || defined(__alpha__) /* pure 64bit architectures */ | |
| 45 | #if defined(__sparc__) || defined(__mips__) || defined(__x86_64__) \ | ||
| 46 | || defined(__powerpc__) || defined(__s390__) | ||
| 47 | typedef unsigned int autofs_wqt_t; | ||
| 48 | #else | ||
| 49 | typedef unsigned long autofs_wqt_t; | 38 | typedef unsigned long autofs_wqt_t; |
| 39 | #else | ||
| 40 | typedef unsigned int autofs_wqt_t; | ||
| 50 | #endif | 41 | #endif |
| 51 | 42 | ||
| 52 | /* Packet types */ | 43 | /* Packet types */ |
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index 50598472dc41..f738e25377ff 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h | |||
| @@ -152,6 +152,12 @@ | |||
| 152 | #define USB_INTRF_FUNC_SUSPEND_LP (1 << (8 + 0)) | 152 | #define USB_INTRF_FUNC_SUSPEND_LP (1 << (8 + 0)) |
| 153 | #define USB_INTRF_FUNC_SUSPEND_RW (1 << (8 + 1)) | 153 | #define USB_INTRF_FUNC_SUSPEND_RW (1 << (8 + 1)) |
| 154 | 154 | ||
| 155 | /* | ||
| 156 | * Interface status, Figure 9-5 USB 3.0 spec | ||
| 157 | */ | ||
| 158 | #define USB_INTRF_STAT_FUNC_RW_CAP 1 | ||
| 159 | #define USB_INTRF_STAT_FUNC_RW 2 | ||
| 160 | |||
| 155 | #define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */ | 161 | #define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */ |
| 156 | 162 | ||
| 157 | /* Bit array elements as returned by the USB_REQ_GET_STATUS request. */ | 163 | /* Bit array elements as returned by the USB_REQ_GET_STATUS request. */ |
diff --git a/kernel/pid.c b/kernel/pid.c index de9af600006f..f2c6a6825098 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
| @@ -331,7 +331,7 @@ out: | |||
| 331 | return pid; | 331 | return pid; |
| 332 | 332 | ||
| 333 | out_unlock: | 333 | out_unlock: |
| 334 | spin_unlock(&pidmap_lock); | 334 | spin_unlock_irq(&pidmap_lock); |
| 335 | out_free: | 335 | out_free: |
| 336 | while (++i <= ns->level) | 336 | while (++i <= ns->level) |
| 337 | free_pidmap(pid->numbers + i); | 337 | free_pidmap(pid->numbers + i); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 09255ec8159c..fbb60b103e64 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -3030,7 +3030,9 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, | |||
| 3030 | if (memcg) { | 3030 | if (memcg) { |
| 3031 | s->memcg_params->memcg = memcg; | 3031 | s->memcg_params->memcg = memcg; |
| 3032 | s->memcg_params->root_cache = root_cache; | 3032 | s->memcg_params->root_cache = root_cache; |
| 3033 | } | 3033 | } else |
| 3034 | s->memcg_params->is_root_cache = true; | ||
| 3035 | |||
| 3034 | return 0; | 3036 | return 0; |
| 3035 | } | 3037 | } |
| 3036 | 3038 | ||
diff --git a/mm/mlock.c b/mm/mlock.c index f0b9ce572fc7..c9bd528b01d2 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
| @@ -517,11 +517,11 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) | |||
| 517 | static int do_mlockall(int flags) | 517 | static int do_mlockall(int flags) |
| 518 | { | 518 | { |
| 519 | struct vm_area_struct * vma, * prev = NULL; | 519 | struct vm_area_struct * vma, * prev = NULL; |
| 520 | unsigned int def_flags = 0; | ||
| 521 | 520 | ||
| 522 | if (flags & MCL_FUTURE) | 521 | if (flags & MCL_FUTURE) |
| 523 | def_flags = VM_LOCKED; | 522 | current->mm->def_flags |= VM_LOCKED; |
| 524 | current->mm->def_flags = def_flags; | 523 | else |
| 524 | current->mm->def_flags &= ~VM_LOCKED; | ||
| 525 | if (flags == MCL_FUTURE) | 525 | if (flags == MCL_FUTURE) |
| 526 | goto out; | 526 | goto out; |
| 527 | 527 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index df2022ff0c8a..9673d96b1ba7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -773,6 +773,10 @@ void __init init_cma_reserved_pageblock(struct page *page) | |||
| 773 | set_pageblock_migratetype(page, MIGRATE_CMA); | 773 | set_pageblock_migratetype(page, MIGRATE_CMA); |
| 774 | __free_pages(page, pageblock_order); | 774 | __free_pages(page, pageblock_order); |
| 775 | totalram_pages += pageblock_nr_pages; | 775 | totalram_pages += pageblock_nr_pages; |
| 776 | #ifdef CONFIG_HIGHMEM | ||
| 777 | if (PageHighMem(page)) | ||
| 778 | totalhigh_pages += pageblock_nr_pages; | ||
| 779 | #endif | ||
| 776 | } | 780 | } |
| 777 | #endif | 781 | #endif |
| 778 | 782 | ||
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 25bfce0666eb..4925a02ae7e4 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
| @@ -249,12 +249,12 @@ static void hci_conn_disconnect(struct hci_conn *conn) | |||
| 249 | __u8 reason = hci_proto_disconn_ind(conn); | 249 | __u8 reason = hci_proto_disconn_ind(conn); |
| 250 | 250 | ||
| 251 | switch (conn->type) { | 251 | switch (conn->type) { |
| 252 | case ACL_LINK: | ||
| 253 | hci_acl_disconn(conn, reason); | ||
| 254 | break; | ||
| 255 | case AMP_LINK: | 252 | case AMP_LINK: |
| 256 | hci_amp_disconn(conn, reason); | 253 | hci_amp_disconn(conn, reason); |
| 257 | break; | 254 | break; |
| 255 | default: | ||
| 256 | hci_acl_disconn(conn, reason); | ||
| 257 | break; | ||
| 258 | } | 258 | } |
| 259 | } | 259 | } |
| 260 | 260 | ||
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 68a9587c9694..5abefb12891d 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c | |||
| @@ -859,6 +859,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) | |||
| 859 | 859 | ||
| 860 | skb_pull(skb, sizeof(code)); | 860 | skb_pull(skb, sizeof(code)); |
| 861 | 861 | ||
| 862 | /* | ||
| 863 | * The SMP context must be initialized for all other PDUs except | ||
| 864 | * pairing and security requests. If we get any other PDU when | ||
| 865 | * not initialized simply disconnect (done if this function | ||
| 866 | * returns an error). | ||
| 867 | */ | ||
| 868 | if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ && | ||
| 869 | !conn->smp_chan) { | ||
| 870 | BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code); | ||
| 871 | kfree_skb(skb); | ||
| 872 | return -ENOTSUPP; | ||
| 873 | } | ||
| 874 | |||
| 862 | switch (code) { | 875 | switch (code) { |
| 863 | case SMP_CMD_PAIRING_REQ: | 876 | case SMP_CMD_PAIRING_REQ: |
| 864 | reason = smp_cmd_pairing_req(conn, skb); | 877 | reason = smp_cmd_pairing_req(conn, skb); |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index b29dacf900f9..e6e1cbe863f5 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
| @@ -1781,10 +1781,13 @@ static ssize_t pktgen_thread_write(struct file *file, | |||
| 1781 | return -EFAULT; | 1781 | return -EFAULT; |
| 1782 | i += len; | 1782 | i += len; |
| 1783 | mutex_lock(&pktgen_thread_lock); | 1783 | mutex_lock(&pktgen_thread_lock); |
| 1784 | pktgen_add_device(t, f); | 1784 | ret = pktgen_add_device(t, f); |
| 1785 | mutex_unlock(&pktgen_thread_lock); | 1785 | mutex_unlock(&pktgen_thread_lock); |
| 1786 | ret = count; | 1786 | if (!ret) { |
| 1787 | sprintf(pg_result, "OK: add_device=%s", f); | 1787 | ret = count; |
| 1788 | sprintf(pg_result, "OK: add_device=%s", f); | ||
| 1789 | } else | ||
| 1790 | sprintf(pg_result, "ERROR: can not add device %s", f); | ||
| 1788 | goto out; | 1791 | goto out; |
| 1789 | } | 1792 | } |
| 1790 | 1793 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index a9a2ae3e2213..32443ebc3e89 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -683,7 +683,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
| 683 | new->network_header = old->network_header; | 683 | new->network_header = old->network_header; |
| 684 | new->mac_header = old->mac_header; | 684 | new->mac_header = old->mac_header; |
| 685 | new->inner_transport_header = old->inner_transport_header; | 685 | new->inner_transport_header = old->inner_transport_header; |
| 686 | new->inner_network_header = old->inner_transport_header; | 686 | new->inner_network_header = old->inner_network_header; |
| 687 | skb_dst_copy(new, old); | 687 | skb_dst_copy(new, old); |
| 688 | new->rxhash = old->rxhash; | 688 | new->rxhash = old->rxhash; |
| 689 | new->ooo_okay = old->ooo_okay; | 689 | new->ooo_okay = old->ooo_okay; |
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 291f2ed7cc31..cdf2e707bb10 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c | |||
| @@ -310,6 +310,12 @@ void tcp_slow_start(struct tcp_sock *tp) | |||
| 310 | { | 310 | { |
| 311 | int cnt; /* increase in packets */ | 311 | int cnt; /* increase in packets */ |
| 312 | unsigned int delta = 0; | 312 | unsigned int delta = 0; |
| 313 | u32 snd_cwnd = tp->snd_cwnd; | ||
| 314 | |||
| 315 | if (unlikely(!snd_cwnd)) { | ||
| 316 | pr_err_once("snd_cwnd is nul, please report this bug.\n"); | ||
| 317 | snd_cwnd = 1U; | ||
| 318 | } | ||
| 313 | 319 | ||
| 314 | /* RFC3465: ABC Slow start | 320 | /* RFC3465: ABC Slow start |
| 315 | * Increase only after a full MSS of bytes is acked | 321 | * Increase only after a full MSS of bytes is acked |
| @@ -324,7 +330,7 @@ void tcp_slow_start(struct tcp_sock *tp) | |||
| 324 | if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) | 330 | if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) |
| 325 | cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ | 331 | cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ |
| 326 | else | 332 | else |
| 327 | cnt = tp->snd_cwnd; /* exponential increase */ | 333 | cnt = snd_cwnd; /* exponential increase */ |
| 328 | 334 | ||
| 329 | /* RFC3465: ABC | 335 | /* RFC3465: ABC |
| 330 | * We MAY increase by 2 if discovered delayed ack | 336 | * We MAY increase by 2 if discovered delayed ack |
| @@ -334,11 +340,11 @@ void tcp_slow_start(struct tcp_sock *tp) | |||
| 334 | tp->bytes_acked = 0; | 340 | tp->bytes_acked = 0; |
| 335 | 341 | ||
| 336 | tp->snd_cwnd_cnt += cnt; | 342 | tp->snd_cwnd_cnt += cnt; |
| 337 | while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { | 343 | while (tp->snd_cwnd_cnt >= snd_cwnd) { |
| 338 | tp->snd_cwnd_cnt -= tp->snd_cwnd; | 344 | tp->snd_cwnd_cnt -= snd_cwnd; |
| 339 | delta++; | 345 | delta++; |
| 340 | } | 346 | } |
| 341 | tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp); | 347 | tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp); |
| 342 | } | 348 | } |
| 343 | EXPORT_SYMBOL_GPL(tcp_slow_start); | 349 | EXPORT_SYMBOL_GPL(tcp_slow_start); |
| 344 | 350 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 18f97ca76b00..ad70a962c20e 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -3504,6 +3504,11 @@ static bool tcp_process_frto(struct sock *sk, int flag) | |||
| 3504 | } | 3504 | } |
| 3505 | } else { | 3505 | } else { |
| 3506 | if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { | 3506 | if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { |
| 3507 | if (!tcp_packets_in_flight(tp)) { | ||
| 3508 | tcp_enter_frto_loss(sk, 2, flag); | ||
| 3509 | return true; | ||
| 3510 | } | ||
| 3511 | |||
| 3507 | /* Prevent sending of new data. */ | 3512 | /* Prevent sending of new data. */ |
| 3508 | tp->snd_cwnd = min(tp->snd_cwnd, | 3513 | tp->snd_cwnd = min(tp->snd_cwnd, |
| 3509 | tcp_packets_in_flight(tp)); | 3514 | tcp_packets_in_flight(tp)); |
| @@ -5649,8 +5654,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, | |||
| 5649 | * the remote receives only the retransmitted (regular) SYNs: either | 5654 | * the remote receives only the retransmitted (regular) SYNs: either |
| 5650 | * the original SYN-data or the corresponding SYN-ACK is lost. | 5655 | * the original SYN-data or the corresponding SYN-ACK is lost. |
| 5651 | */ | 5656 | */ |
| 5652 | syn_drop = (cookie->len <= 0 && data && | 5657 | syn_drop = (cookie->len <= 0 && data && tp->total_retrans); |
| 5653 | inet_csk(sk)->icsk_retransmits); | ||
| 5654 | 5658 | ||
| 5655 | tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); | 5659 | tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); |
| 5656 | 5660 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 70b09ef2463b..eadb693eef55 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -496,6 +496,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
| 496 | * errors returned from accept(). | 496 | * errors returned from accept(). |
| 497 | */ | 497 | */ |
| 498 | inet_csk_reqsk_queue_drop(sk, req, prev); | 498 | inet_csk_reqsk_queue_drop(sk, req, prev); |
| 499 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); | ||
| 499 | goto out; | 500 | goto out; |
| 500 | 501 | ||
| 501 | case TCP_SYN_SENT: | 502 | case TCP_SYN_SENT: |
| @@ -1500,8 +1501,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
| 1500 | * clogging syn queue with openreqs with exponentially increasing | 1501 | * clogging syn queue with openreqs with exponentially increasing |
| 1501 | * timeout. | 1502 | * timeout. |
| 1502 | */ | 1503 | */ |
| 1503 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) | 1504 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { |
| 1505 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); | ||
| 1504 | goto drop; | 1506 | goto drop; |
| 1507 | } | ||
| 1505 | 1508 | ||
| 1506 | req = inet_reqsk_alloc(&tcp_request_sock_ops); | 1509 | req = inet_reqsk_alloc(&tcp_request_sock_ops); |
| 1507 | if (!req) | 1510 | if (!req) |
| @@ -1666,6 +1669,7 @@ drop_and_release: | |||
| 1666 | drop_and_free: | 1669 | drop_and_free: |
| 1667 | reqsk_free(req); | 1670 | reqsk_free(req); |
| 1668 | drop: | 1671 | drop: |
| 1672 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); | ||
| 1669 | return 0; | 1673 | return 0; |
| 1670 | } | 1674 | } |
| 1671 | EXPORT_SYMBOL(tcp_v4_conn_request); | 1675 | EXPORT_SYMBOL(tcp_v4_conn_request); |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 420e56326384..1b5d8cb9b123 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -1660,6 +1660,7 @@ static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev) | |||
| 1660 | if (dev->addr_len != IEEE802154_ADDR_LEN) | 1660 | if (dev->addr_len != IEEE802154_ADDR_LEN) |
| 1661 | return -1; | 1661 | return -1; |
| 1662 | memcpy(eui, dev->dev_addr, 8); | 1662 | memcpy(eui, dev->dev_addr, 8); |
| 1663 | eui[0] ^= 2; | ||
| 1663 | return 0; | 1664 | return 0; |
| 1664 | } | 1665 | } |
| 1665 | 1666 | ||
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 8edf2601065a..7a778b9a7b85 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
| @@ -380,7 +380,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
| 380 | if (skb->protocol == htons(ETH_P_IPV6)) { | 380 | if (skb->protocol == htons(ETH_P_IPV6)) { |
| 381 | sin->sin6_addr = ipv6_hdr(skb)->saddr; | 381 | sin->sin6_addr = ipv6_hdr(skb)->saddr; |
| 382 | if (np->rxopt.all) | 382 | if (np->rxopt.all) |
| 383 | datagram_recv_ctl(sk, msg, skb); | 383 | ip6_datagram_recv_ctl(sk, msg, skb); |
| 384 | if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) | 384 | if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) |
| 385 | sin->sin6_scope_id = IP6CB(skb)->iif; | 385 | sin->sin6_scope_id = IP6CB(skb)->iif; |
| 386 | } else { | 386 | } else { |
| @@ -468,7 +468,8 @@ out: | |||
| 468 | } | 468 | } |
| 469 | 469 | ||
| 470 | 470 | ||
| 471 | int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) | 471 | int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg, |
| 472 | struct sk_buff *skb) | ||
| 472 | { | 473 | { |
| 473 | struct ipv6_pinfo *np = inet6_sk(sk); | 474 | struct ipv6_pinfo *np = inet6_sk(sk); |
| 474 | struct inet6_skb_parm *opt = IP6CB(skb); | 475 | struct inet6_skb_parm *opt = IP6CB(skb); |
| @@ -597,11 +598,12 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) | |||
| 597 | } | 598 | } |
| 598 | return 0; | 599 | return 0; |
| 599 | } | 600 | } |
| 601 | EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl); | ||
| 600 | 602 | ||
| 601 | int datagram_send_ctl(struct net *net, struct sock *sk, | 603 | int ip6_datagram_send_ctl(struct net *net, struct sock *sk, |
| 602 | struct msghdr *msg, struct flowi6 *fl6, | 604 | struct msghdr *msg, struct flowi6 *fl6, |
| 603 | struct ipv6_txoptions *opt, | 605 | struct ipv6_txoptions *opt, |
| 604 | int *hlimit, int *tclass, int *dontfrag) | 606 | int *hlimit, int *tclass, int *dontfrag) |
| 605 | { | 607 | { |
| 606 | struct in6_pktinfo *src_info; | 608 | struct in6_pktinfo *src_info; |
| 607 | struct cmsghdr *cmsg; | 609 | struct cmsghdr *cmsg; |
| @@ -871,4 +873,4 @@ int datagram_send_ctl(struct net *net, struct sock *sk, | |||
| 871 | exit_f: | 873 | exit_f: |
| 872 | return err; | 874 | return err; |
| 873 | } | 875 | } |
| 874 | EXPORT_SYMBOL_GPL(datagram_send_ctl); | 876 | EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl); |
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 29124b7a04c8..d6de4b447250 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c | |||
| @@ -365,8 +365,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, | |||
| 365 | msg.msg_control = (void*)(fl->opt+1); | 365 | msg.msg_control = (void*)(fl->opt+1); |
| 366 | memset(&flowi6, 0, sizeof(flowi6)); | 366 | memset(&flowi6, 0, sizeof(flowi6)); |
| 367 | 367 | ||
| 368 | err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk, | 368 | err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, |
| 369 | &junk, &junk); | 369 | &junk, &junk, &junk); |
| 370 | if (err) | 370 | if (err) |
| 371 | goto done; | 371 | goto done; |
| 372 | err = -EINVAL; | 372 | err = -EINVAL; |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index c727e4712751..131dd097736d 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
| @@ -960,7 +960,7 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb, | |||
| 960 | int ret; | 960 | int ret; |
| 961 | 961 | ||
| 962 | if (!ip6_tnl_xmit_ctl(t)) | 962 | if (!ip6_tnl_xmit_ctl(t)) |
| 963 | return -1; | 963 | goto tx_err; |
| 964 | 964 | ||
| 965 | switch (skb->protocol) { | 965 | switch (skb->protocol) { |
| 966 | case htons(ETH_P_IP): | 966 | case htons(ETH_P_IP): |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index ee94d31c9d4d..d1e2e8ef29c5 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
| @@ -476,8 +476,8 @@ sticky_done: | |||
| 476 | msg.msg_controllen = optlen; | 476 | msg.msg_controllen = optlen; |
| 477 | msg.msg_control = (void*)(opt+1); | 477 | msg.msg_control = (void*)(opt+1); |
| 478 | 478 | ||
| 479 | retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, | 479 | retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, |
| 480 | &junk); | 480 | &junk, &junk); |
| 481 | if (retv) | 481 | if (retv) |
| 482 | goto done; | 482 | goto done; |
| 483 | update: | 483 | update: |
| @@ -1002,7 +1002,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
| 1002 | release_sock(sk); | 1002 | release_sock(sk); |
| 1003 | 1003 | ||
| 1004 | if (skb) { | 1004 | if (skb) { |
| 1005 | int err = datagram_recv_ctl(sk, &msg, skb); | 1005 | int err = ip6_datagram_recv_ctl(sk, &msg, skb); |
| 1006 | kfree_skb(skb); | 1006 | kfree_skb(skb); |
| 1007 | if (err) | 1007 | if (err) |
| 1008 | return err; | 1008 | return err; |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 6cd29b1e8b92..70fa81449997 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
| @@ -507,7 +507,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
| 507 | sock_recv_ts_and_drops(msg, sk, skb); | 507 | sock_recv_ts_and_drops(msg, sk, skb); |
| 508 | 508 | ||
| 509 | if (np->rxopt.all) | 509 | if (np->rxopt.all) |
| 510 | datagram_recv_ctl(sk, msg, skb); | 510 | ip6_datagram_recv_ctl(sk, msg, skb); |
| 511 | 511 | ||
| 512 | err = copied; | 512 | err = copied; |
| 513 | if (flags & MSG_TRUNC) | 513 | if (flags & MSG_TRUNC) |
| @@ -822,8 +822,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
| 822 | memset(opt, 0, sizeof(struct ipv6_txoptions)); | 822 | memset(opt, 0, sizeof(struct ipv6_txoptions)); |
| 823 | opt->tot_len = sizeof(struct ipv6_txoptions); | 823 | opt->tot_len = sizeof(struct ipv6_txoptions); |
| 824 | 824 | ||
| 825 | err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, | 825 | err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, |
| 826 | &hlimit, &tclass, &dontfrag); | 826 | &hlimit, &tclass, &dontfrag); |
| 827 | if (err < 0) { | 827 | if (err < 0) { |
| 828 | fl6_sock_release(flowlabel); | 828 | fl6_sock_release(flowlabel); |
| 829 | return err; | 829 | return err; |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index e229a3bc345d..363d8b7772e8 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -928,7 +928,7 @@ restart: | |||
| 928 | dst_hold(&rt->dst); | 928 | dst_hold(&rt->dst); |
| 929 | read_unlock_bh(&table->tb6_lock); | 929 | read_unlock_bh(&table->tb6_lock); |
| 930 | 930 | ||
| 931 | if (!rt->n && !(rt->rt6i_flags & RTF_NONEXTHOP)) | 931 | if (!rt->n && !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL))) |
| 932 | nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); | 932 | nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); |
| 933 | else if (!(rt->dst.flags & DST_HOST)) | 933 | else if (!(rt->dst.flags & DST_HOST)) |
| 934 | nrt = rt6_alloc_clone(rt, &fl6->daddr); | 934 | nrt = rt6_alloc_clone(rt, &fl6->daddr); |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 93825dd3a7c0..4f43537197ef 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -423,6 +423,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
| 423 | } | 423 | } |
| 424 | 424 | ||
| 425 | inet_csk_reqsk_queue_drop(sk, req, prev); | 425 | inet_csk_reqsk_queue_drop(sk, req, prev); |
| 426 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); | ||
| 426 | goto out; | 427 | goto out; |
| 427 | 428 | ||
| 428 | case TCP_SYN_SENT: | 429 | case TCP_SYN_SENT: |
| @@ -958,8 +959,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
| 958 | goto drop; | 959 | goto drop; |
| 959 | } | 960 | } |
| 960 | 961 | ||
| 961 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) | 962 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { |
| 963 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); | ||
| 962 | goto drop; | 964 | goto drop; |
| 965 | } | ||
| 963 | 966 | ||
| 964 | req = inet6_reqsk_alloc(&tcp6_request_sock_ops); | 967 | req = inet6_reqsk_alloc(&tcp6_request_sock_ops); |
| 965 | if (req == NULL) | 968 | if (req == NULL) |
| @@ -1108,6 +1111,7 @@ drop_and_release: | |||
| 1108 | drop_and_free: | 1111 | drop_and_free: |
| 1109 | reqsk_free(req); | 1112 | reqsk_free(req); |
| 1110 | drop: | 1113 | drop: |
| 1114 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); | ||
| 1111 | return 0; /* don't send reset */ | 1115 | return 0; /* don't send reset */ |
| 1112 | } | 1116 | } |
| 1113 | 1117 | ||
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index dfaa29b8b293..fb083295ff0b 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
| @@ -443,7 +443,7 @@ try_again: | |||
| 443 | ip_cmsg_recv(msg, skb); | 443 | ip_cmsg_recv(msg, skb); |
| 444 | } else { | 444 | } else { |
| 445 | if (np->rxopt.all) | 445 | if (np->rxopt.all) |
| 446 | datagram_recv_ctl(sk, msg, skb); | 446 | ip6_datagram_recv_ctl(sk, msg, skb); |
| 447 | } | 447 | } |
| 448 | 448 | ||
| 449 | err = copied; | 449 | err = copied; |
| @@ -1153,8 +1153,8 @@ do_udp_sendmsg: | |||
| 1153 | memset(opt, 0, sizeof(struct ipv6_txoptions)); | 1153 | memset(opt, 0, sizeof(struct ipv6_txoptions)); |
| 1154 | opt->tot_len = sizeof(*opt); | 1154 | opt->tot_len = sizeof(*opt); |
| 1155 | 1155 | ||
| 1156 | err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, | 1156 | err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, |
| 1157 | &hlimit, &tclass, &dontfrag); | 1157 | &hlimit, &tclass, &dontfrag); |
| 1158 | if (err < 0) { | 1158 | if (err < 0) { |
| 1159 | fl6_sock_release(flowlabel); | 1159 | fl6_sock_release(flowlabel); |
| 1160 | return err; | 1160 | return err; |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 1a9f3723c13c..2ac884d0e89b 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
| @@ -168,6 +168,51 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id) | |||
| 168 | 168 | ||
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | /* Lookup the tunnel socket, possibly involving the fs code if the socket is | ||
| 172 | * owned by userspace. A struct sock returned from this function must be | ||
| 173 | * released using l2tp_tunnel_sock_put once you're done with it. | ||
| 174 | */ | ||
| 175 | struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel) | ||
| 176 | { | ||
| 177 | int err = 0; | ||
| 178 | struct socket *sock = NULL; | ||
| 179 | struct sock *sk = NULL; | ||
| 180 | |||
| 181 | if (!tunnel) | ||
| 182 | goto out; | ||
| 183 | |||
| 184 | if (tunnel->fd >= 0) { | ||
| 185 | /* Socket is owned by userspace, who might be in the process | ||
| 186 | * of closing it. Look the socket up using the fd to ensure | ||
| 187 | * consistency. | ||
| 188 | */ | ||
| 189 | sock = sockfd_lookup(tunnel->fd, &err); | ||
| 190 | if (sock) | ||
| 191 | sk = sock->sk; | ||
| 192 | } else { | ||
| 193 | /* Socket is owned by kernelspace */ | ||
| 194 | sk = tunnel->sock; | ||
| 195 | } | ||
| 196 | |||
| 197 | out: | ||
| 198 | return sk; | ||
| 199 | } | ||
| 200 | EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup); | ||
| 201 | |||
| 202 | /* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */ | ||
| 203 | void l2tp_tunnel_sock_put(struct sock *sk) | ||
| 204 | { | ||
| 205 | struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); | ||
| 206 | if (tunnel) { | ||
| 207 | if (tunnel->fd >= 0) { | ||
| 208 | /* Socket is owned by userspace */ | ||
| 209 | sockfd_put(sk->sk_socket); | ||
| 210 | } | ||
| 211 | sock_put(sk); | ||
| 212 | } | ||
| 213 | } | ||
| 214 | EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put); | ||
| 215 | |||
| 171 | /* Lookup a session by id in the global session list | 216 | /* Lookup a session by id in the global session list |
| 172 | */ | 217 | */ |
| 173 | static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) | 218 | static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) |
| @@ -1123,8 +1168,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len | |||
| 1123 | struct udphdr *uh; | 1168 | struct udphdr *uh; |
| 1124 | struct inet_sock *inet; | 1169 | struct inet_sock *inet; |
| 1125 | __wsum csum; | 1170 | __wsum csum; |
| 1126 | int old_headroom; | ||
| 1127 | int new_headroom; | ||
| 1128 | int headroom; | 1171 | int headroom; |
| 1129 | int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; | 1172 | int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; |
| 1130 | int udp_len; | 1173 | int udp_len; |
| @@ -1136,16 +1179,12 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len | |||
| 1136 | */ | 1179 | */ |
| 1137 | headroom = NET_SKB_PAD + sizeof(struct iphdr) + | 1180 | headroom = NET_SKB_PAD + sizeof(struct iphdr) + |
| 1138 | uhlen + hdr_len; | 1181 | uhlen + hdr_len; |
| 1139 | old_headroom = skb_headroom(skb); | ||
| 1140 | if (skb_cow_head(skb, headroom)) { | 1182 | if (skb_cow_head(skb, headroom)) { |
| 1141 | kfree_skb(skb); | 1183 | kfree_skb(skb); |
| 1142 | return NET_XMIT_DROP; | 1184 | return NET_XMIT_DROP; |
| 1143 | } | 1185 | } |
| 1144 | 1186 | ||
| 1145 | new_headroom = skb_headroom(skb); | ||
| 1146 | skb_orphan(skb); | 1187 | skb_orphan(skb); |
| 1147 | skb->truesize += new_headroom - old_headroom; | ||
| 1148 | |||
| 1149 | /* Setup L2TP header */ | 1188 | /* Setup L2TP header */ |
| 1150 | session->build_header(session, __skb_push(skb, hdr_len)); | 1189 | session->build_header(session, __skb_push(skb, hdr_len)); |
| 1151 | 1190 | ||
| @@ -1607,6 +1646,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 | |||
| 1607 | tunnel->old_sk_destruct = sk->sk_destruct; | 1646 | tunnel->old_sk_destruct = sk->sk_destruct; |
| 1608 | sk->sk_destruct = &l2tp_tunnel_destruct; | 1647 | sk->sk_destruct = &l2tp_tunnel_destruct; |
| 1609 | tunnel->sock = sk; | 1648 | tunnel->sock = sk; |
| 1649 | tunnel->fd = fd; | ||
| 1610 | lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); | 1650 | lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); |
| 1611 | 1651 | ||
| 1612 | sk->sk_allocation = GFP_ATOMIC; | 1652 | sk->sk_allocation = GFP_ATOMIC; |
| @@ -1642,24 +1682,32 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create); | |||
| 1642 | */ | 1682 | */ |
| 1643 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) | 1683 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) |
| 1644 | { | 1684 | { |
| 1645 | int err = 0; | 1685 | int err = -EBADF; |
| 1646 | struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL; | 1686 | struct socket *sock = NULL; |
| 1687 | struct sock *sk = NULL; | ||
| 1688 | |||
| 1689 | sk = l2tp_tunnel_sock_lookup(tunnel); | ||
| 1690 | if (!sk) | ||
| 1691 | goto out; | ||
| 1692 | |||
| 1693 | sock = sk->sk_socket; | ||
| 1694 | BUG_ON(!sock); | ||
| 1647 | 1695 | ||
| 1648 | /* Force the tunnel socket to close. This will eventually | 1696 | /* Force the tunnel socket to close. This will eventually |
| 1649 | * cause the tunnel to be deleted via the normal socket close | 1697 | * cause the tunnel to be deleted via the normal socket close |
| 1650 | * mechanisms when userspace closes the tunnel socket. | 1698 | * mechanisms when userspace closes the tunnel socket. |
| 1651 | */ | 1699 | */ |
| 1652 | if (sock != NULL) { | 1700 | err = inet_shutdown(sock, 2); |
| 1653 | err = inet_shutdown(sock, 2); | ||
| 1654 | 1701 | ||
| 1655 | /* If the tunnel's socket was created by the kernel, | 1702 | /* If the tunnel's socket was created by the kernel, |
| 1656 | * close the socket here since the socket was not | 1703 | * close the socket here since the socket was not |
| 1657 | * created by userspace. | 1704 | * created by userspace. |
| 1658 | */ | 1705 | */ |
| 1659 | if (sock->file == NULL) | 1706 | if (sock->file == NULL) |
| 1660 | err = inet_release(sock); | 1707 | err = inet_release(sock); |
| 1661 | } | ||
| 1662 | 1708 | ||
| 1709 | l2tp_tunnel_sock_put(sk); | ||
| 1710 | out: | ||
| 1663 | return err; | 1711 | return err; |
| 1664 | } | 1712 | } |
| 1665 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); | 1713 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); |
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 56d583e083a7..e62204cad4fe 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
| @@ -188,7 +188,8 @@ struct l2tp_tunnel { | |||
| 188 | int (*recv_payload_hook)(struct sk_buff *skb); | 188 | int (*recv_payload_hook)(struct sk_buff *skb); |
| 189 | void (*old_sk_destruct)(struct sock *); | 189 | void (*old_sk_destruct)(struct sock *); |
| 190 | struct sock *sock; /* Parent socket */ | 190 | struct sock *sock; /* Parent socket */ |
| 191 | int fd; | 191 | int fd; /* Parent fd, if tunnel socket |
| 192 | * was created by userspace */ | ||
| 192 | 193 | ||
| 193 | uint8_t priv[0]; /* private data */ | 194 | uint8_t priv[0]; /* private data */ |
| 194 | }; | 195 | }; |
| @@ -228,6 +229,8 @@ out: | |||
| 228 | return tunnel; | 229 | return tunnel; |
| 229 | } | 230 | } |
| 230 | 231 | ||
| 232 | extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel); | ||
| 233 | extern void l2tp_tunnel_sock_put(struct sock *sk); | ||
| 231 | extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id); | 234 | extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id); |
| 232 | extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); | 235 | extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); |
| 233 | extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); | 236 | extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); |
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 927547171bc7..8ee4a86ae996 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
| @@ -554,8 +554,8 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
| 554 | memset(opt, 0, sizeof(struct ipv6_txoptions)); | 554 | memset(opt, 0, sizeof(struct ipv6_txoptions)); |
| 555 | opt->tot_len = sizeof(struct ipv6_txoptions); | 555 | opt->tot_len = sizeof(struct ipv6_txoptions); |
| 556 | 556 | ||
| 557 | err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, | 557 | err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, |
| 558 | &hlimit, &tclass, &dontfrag); | 558 | &hlimit, &tclass, &dontfrag); |
| 559 | if (err < 0) { | 559 | if (err < 0) { |
| 560 | fl6_sock_release(flowlabel); | 560 | fl6_sock_release(flowlabel); |
| 561 | return err; | 561 | return err; |
| @@ -646,7 +646,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
| 646 | struct msghdr *msg, size_t len, int noblock, | 646 | struct msghdr *msg, size_t len, int noblock, |
| 647 | int flags, int *addr_len) | 647 | int flags, int *addr_len) |
| 648 | { | 648 | { |
| 649 | struct inet_sock *inet = inet_sk(sk); | 649 | struct ipv6_pinfo *np = inet6_sk(sk); |
| 650 | struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name; | 650 | struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name; |
| 651 | size_t copied = 0; | 651 | size_t copied = 0; |
| 652 | int err = -EOPNOTSUPP; | 652 | int err = -EOPNOTSUPP; |
| @@ -688,8 +688,8 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
| 688 | lsa->l2tp_scope_id = IP6CB(skb)->iif; | 688 | lsa->l2tp_scope_id = IP6CB(skb)->iif; |
| 689 | } | 689 | } |
| 690 | 690 | ||
| 691 | if (inet->cmsg_flags) | 691 | if (np->rxopt.all) |
| 692 | ip_cmsg_recv(msg, skb); | 692 | ip6_datagram_recv_ctl(sk, msg, skb); |
| 693 | 693 | ||
| 694 | if (flags & MSG_TRUNC) | 694 | if (flags & MSG_TRUNC) |
| 695 | copied = skb->len; | 695 | copied = skb->len; |
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 286366ef8930..716605c241f4 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c | |||
| @@ -388,8 +388,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) | |||
| 388 | struct l2tp_session *session; | 388 | struct l2tp_session *session; |
| 389 | struct l2tp_tunnel *tunnel; | 389 | struct l2tp_tunnel *tunnel; |
| 390 | struct pppol2tp_session *ps; | 390 | struct pppol2tp_session *ps; |
| 391 | int old_headroom; | ||
| 392 | int new_headroom; | ||
| 393 | int uhlen, headroom; | 391 | int uhlen, headroom; |
| 394 | 392 | ||
| 395 | if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) | 393 | if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) |
| @@ -408,7 +406,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) | |||
| 408 | if (tunnel == NULL) | 406 | if (tunnel == NULL) |
| 409 | goto abort_put_sess; | 407 | goto abort_put_sess; |
| 410 | 408 | ||
| 411 | old_headroom = skb_headroom(skb); | ||
| 412 | uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; | 409 | uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; |
| 413 | headroom = NET_SKB_PAD + | 410 | headroom = NET_SKB_PAD + |
| 414 | sizeof(struct iphdr) + /* IP header */ | 411 | sizeof(struct iphdr) + /* IP header */ |
| @@ -418,9 +415,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) | |||
| 418 | if (skb_cow_head(skb, headroom)) | 415 | if (skb_cow_head(skb, headroom)) |
| 419 | goto abort_put_sess_tun; | 416 | goto abort_put_sess_tun; |
| 420 | 417 | ||
| 421 | new_headroom = skb_headroom(skb); | ||
| 422 | skb->truesize += new_headroom - old_headroom; | ||
| 423 | |||
| 424 | /* Setup PPP header */ | 418 | /* Setup PPP header */ |
| 425 | __skb_push(skb, sizeof(ppph)); | 419 | __skb_push(skb, sizeof(ppph)); |
| 426 | skb->data[0] = ppph[0]; | 420 | skb->data[0] = ppph[0]; |
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index a9327e2e48ce..670cbc3518de 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c | |||
| @@ -35,10 +35,11 @@ | |||
| 35 | /* Must be called with rcu_read_lock. */ | 35 | /* Must be called with rcu_read_lock. */ |
| 36 | static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) | 36 | static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) |
| 37 | { | 37 | { |
| 38 | if (unlikely(!vport)) { | 38 | if (unlikely(!vport)) |
| 39 | kfree_skb(skb); | 39 | goto error; |
| 40 | return; | 40 | |
| 41 | } | 41 | if (unlikely(skb_warn_if_lro(skb))) |
| 42 | goto error; | ||
| 42 | 43 | ||
| 43 | /* Make our own copy of the packet. Otherwise we will mangle the | 44 | /* Make our own copy of the packet. Otherwise we will mangle the |
| 44 | * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). | 45 | * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). |
| @@ -50,6 +51,10 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) | |||
| 50 | 51 | ||
| 51 | skb_push(skb, ETH_HLEN); | 52 | skb_push(skb, ETH_HLEN); |
| 52 | ovs_vport_receive(vport, skb); | 53 | ovs_vport_receive(vport, skb); |
| 54 | return; | ||
| 55 | |||
| 56 | error: | ||
| 57 | kfree_skb(skb); | ||
| 53 | } | 58 | } |
| 54 | 59 | ||
| 55 | /* Called with rcu_read_lock and bottom-halves disabled. */ | 60 | /* Called with rcu_read_lock and bottom-halves disabled. */ |
| @@ -169,9 +174,6 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb) | |||
| 169 | goto error; | 174 | goto error; |
| 170 | } | 175 | } |
| 171 | 176 | ||
| 172 | if (unlikely(skb_warn_if_lro(skb))) | ||
| 173 | goto error; | ||
| 174 | |||
| 175 | skb->dev = netdev_vport->dev; | 177 | skb->dev = netdev_vport->dev; |
| 176 | len = skb->len; | 178 | len = skb->len; |
| 177 | dev_queue_xmit(skb); | 179 | dev_queue_xmit(skb); |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index e639645e8fec..c111bd0e083a 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -2361,13 +2361,15 @@ static int packet_release(struct socket *sock) | |||
| 2361 | 2361 | ||
| 2362 | packet_flush_mclist(sk); | 2362 | packet_flush_mclist(sk); |
| 2363 | 2363 | ||
| 2364 | memset(&req_u, 0, sizeof(req_u)); | 2364 | if (po->rx_ring.pg_vec) { |
| 2365 | 2365 | memset(&req_u, 0, sizeof(req_u)); | |
| 2366 | if (po->rx_ring.pg_vec) | ||
| 2367 | packet_set_ring(sk, &req_u, 1, 0); | 2366 | packet_set_ring(sk, &req_u, 1, 0); |
| 2367 | } | ||
| 2368 | 2368 | ||
| 2369 | if (po->tx_ring.pg_vec) | 2369 | if (po->tx_ring.pg_vec) { |
| 2370 | memset(&req_u, 0, sizeof(req_u)); | ||
| 2370 | packet_set_ring(sk, &req_u, 1, 1); | 2371 | packet_set_ring(sk, &req_u, 1, 1); |
| 2372 | } | ||
| 2371 | 2373 | ||
| 2372 | fanout_release(sk); | 2374 | fanout_release(sk); |
| 2373 | 2375 | ||
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 298c0ddfb57e..3d2acc7a9c80 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
| @@ -438,18 +438,18 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 438 | if (q->rate) { | 438 | if (q->rate) { |
| 439 | struct sk_buff_head *list = &sch->q; | 439 | struct sk_buff_head *list = &sch->q; |
| 440 | 440 | ||
| 441 | delay += packet_len_2_sched_time(skb->len, q); | ||
| 442 | |||
| 443 | if (!skb_queue_empty(list)) { | 441 | if (!skb_queue_empty(list)) { |
| 444 | /* | 442 | /* |
| 445 | * Last packet in queue is reference point (now). | 443 | * Last packet in queue is reference point (now), |
| 446 | * First packet in queue is already in flight, | 444 | * calculate this time bonus and subtract |
| 447 | * calculate this time bonus and substract | ||
| 448 | * from delay. | 445 | * from delay. |
| 449 | */ | 446 | */ |
| 450 | delay -= now - netem_skb_cb(skb_peek(list))->time_to_send; | 447 | delay -= netem_skb_cb(skb_peek_tail(list))->time_to_send - now; |
| 448 | delay = max_t(psched_tdiff_t, 0, delay); | ||
| 451 | now = netem_skb_cb(skb_peek_tail(list))->time_to_send; | 449 | now = netem_skb_cb(skb_peek_tail(list))->time_to_send; |
| 452 | } | 450 | } |
| 451 | |||
| 452 | delay += packet_len_2_sched_time(skb->len, q); | ||
| 453 | } | 453 | } |
| 454 | 454 | ||
| 455 | cb->time_to_send = now + delay; | 455 | cb->time_to_send = now + delay; |
diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 159b9bc5d633..d8420ae614dc 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c | |||
| @@ -71,7 +71,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key) | |||
| 71 | return; | 71 | return; |
| 72 | 72 | ||
| 73 | if (atomic_dec_and_test(&key->refcnt)) { | 73 | if (atomic_dec_and_test(&key->refcnt)) { |
| 74 | kfree(key); | 74 | kzfree(key); |
| 75 | SCTP_DBG_OBJCNT_DEC(keys); | 75 | SCTP_DBG_OBJCNT_DEC(keys); |
| 76 | } | 76 | } |
| 77 | } | 77 | } |
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 17a001bac2cc..1a9c5fb77310 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c | |||
| @@ -249,6 +249,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep) | |||
| 249 | /* Final destructor for endpoint. */ | 249 | /* Final destructor for endpoint. */ |
| 250 | static void sctp_endpoint_destroy(struct sctp_endpoint *ep) | 250 | static void sctp_endpoint_destroy(struct sctp_endpoint *ep) |
| 251 | { | 251 | { |
| 252 | int i; | ||
| 253 | |||
| 252 | SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); | 254 | SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); |
| 253 | 255 | ||
| 254 | /* Free up the HMAC transform. */ | 256 | /* Free up the HMAC transform. */ |
| @@ -271,6 +273,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep) | |||
| 271 | sctp_inq_free(&ep->base.inqueue); | 273 | sctp_inq_free(&ep->base.inqueue); |
| 272 | sctp_bind_addr_free(&ep->base.bind_addr); | 274 | sctp_bind_addr_free(&ep->base.bind_addr); |
| 273 | 275 | ||
| 276 | for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i) | ||
| 277 | memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE); | ||
| 278 | |||
| 274 | /* Remove and free the port */ | 279 | /* Remove and free the port */ |
| 275 | if (sctp_sk(ep->base.sk)->bind_hash) | 280 | if (sctp_sk(ep->base.sk)->bind_hash) |
| 276 | sctp_put_port(ep->base.sk); | 281 | sctp_put_port(ep->base.sk); |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9e65758cb038..cedd9bf67b8c 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -3390,7 +3390,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk, | |||
| 3390 | 3390 | ||
| 3391 | ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey); | 3391 | ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey); |
| 3392 | out: | 3392 | out: |
| 3393 | kfree(authkey); | 3393 | kzfree(authkey); |
| 3394 | return ret; | 3394 | return ret; |
| 3395 | } | 3395 | } |
| 3396 | 3396 | ||
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 0a148c9d2a5c..0f679df7d072 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
| @@ -465,7 +465,7 @@ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp, | |||
| 465 | } | 465 | } |
| 466 | 466 | ||
| 467 | /* | 467 | /* |
| 468 | * See net/ipv6/datagram.c : datagram_recv_ctl | 468 | * See net/ipv6/datagram.c : ip6_datagram_recv_ctl |
| 469 | */ | 469 | */ |
| 470 | static int svc_udp_get_dest_address6(struct svc_rqst *rqstp, | 470 | static int svc_udp_get_dest_address6(struct svc_rqst *rqstp, |
| 471 | struct cmsghdr *cmh) | 471 | struct cmsghdr *cmh) |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 01592d7d4789..45f1618c8e23 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
| @@ -1358,7 +1358,7 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info, | |||
| 1358 | &iwe, IW_EV_UINT_LEN); | 1358 | &iwe, IW_EV_UINT_LEN); |
| 1359 | } | 1359 | } |
| 1360 | 1360 | ||
| 1361 | buf = kmalloc(30, GFP_ATOMIC); | 1361 | buf = kmalloc(31, GFP_ATOMIC); |
| 1362 | if (buf) { | 1362 | if (buf) { |
| 1363 | memset(&iwe, 0, sizeof(iwe)); | 1363 | memset(&iwe, 0, sizeof(iwe)); |
| 1364 | iwe.cmd = IWEVCUSTOM; | 1364 | iwe.cmd = IWEVCUSTOM; |
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig index a210c8d7b4bc..3b98159d9645 100644 --- a/sound/soc/fsl/Kconfig +++ b/sound/soc/fsl/Kconfig | |||
| @@ -108,13 +108,18 @@ if SND_IMX_SOC | |||
| 108 | config SND_SOC_IMX_SSI | 108 | config SND_SOC_IMX_SSI |
| 109 | tristate | 109 | tristate |
| 110 | 110 | ||
| 111 | config SND_SOC_IMX_PCM_FIQ | 111 | config SND_SOC_IMX_PCM |
| 112 | tristate | 112 | tristate |
| 113 | |||
| 114 | config SND_SOC_IMX_PCM_FIQ | ||
| 115 | bool | ||
| 113 | select FIQ | 116 | select FIQ |
| 117 | select SND_SOC_IMX_PCM | ||
| 114 | 118 | ||
| 115 | config SND_SOC_IMX_PCM_DMA | 119 | config SND_SOC_IMX_PCM_DMA |
| 116 | tristate | 120 | bool |
| 117 | select SND_SOC_DMAENGINE_PCM | 121 | select SND_SOC_DMAENGINE_PCM |
| 122 | select SND_SOC_IMX_PCM | ||
| 118 | 123 | ||
| 119 | config SND_SOC_IMX_AUDMUX | 124 | config SND_SOC_IMX_AUDMUX |
| 120 | tristate | 125 | tristate |
diff --git a/sound/soc/fsl/Makefile b/sound/soc/fsl/Makefile index ec1457915d7c..afd34794db53 100644 --- a/sound/soc/fsl/Makefile +++ b/sound/soc/fsl/Makefile | |||
| @@ -41,10 +41,7 @@ endif | |||
| 41 | obj-$(CONFIG_SND_SOC_IMX_SSI) += snd-soc-imx-ssi.o | 41 | obj-$(CONFIG_SND_SOC_IMX_SSI) += snd-soc-imx-ssi.o |
| 42 | obj-$(CONFIG_SND_SOC_IMX_AUDMUX) += snd-soc-imx-audmux.o | 42 | obj-$(CONFIG_SND_SOC_IMX_AUDMUX) += snd-soc-imx-audmux.o |
| 43 | 43 | ||
| 44 | obj-$(CONFIG_SND_SOC_IMX_PCM_FIQ) += snd-soc-imx-pcm-fiq.o | 44 | obj-$(CONFIG_SND_SOC_IMX_PCM) += snd-soc-imx-pcm.o |
| 45 | snd-soc-imx-pcm-fiq-y := imx-pcm-fiq.o imx-pcm.o | ||
| 46 | obj-$(CONFIG_SND_SOC_IMX_PCM_DMA) += snd-soc-imx-pcm-dma.o | ||
| 47 | snd-soc-imx-pcm-dma-y := imx-pcm-dma.o imx-pcm.o | ||
| 48 | 45 | ||
| 49 | # i.MX Machine Support | 46 | # i.MX Machine Support |
| 50 | snd-soc-eukrea-tlv320-objs := eukrea-tlv320.o | 47 | snd-soc-eukrea-tlv320-objs := eukrea-tlv320.o |
diff --git a/sound/soc/fsl/imx-pcm-dma.c b/sound/soc/fsl/imx-pcm-dma.c index bf363d8d044a..500f8ce55d78 100644 --- a/sound/soc/fsl/imx-pcm-dma.c +++ b/sound/soc/fsl/imx-pcm-dma.c | |||
| @@ -154,26 +154,7 @@ static struct snd_soc_platform_driver imx_soc_platform_mx2 = { | |||
| 154 | .pcm_free = imx_pcm_free, | 154 | .pcm_free = imx_pcm_free, |
| 155 | }; | 155 | }; |
| 156 | 156 | ||
| 157 | static int imx_soc_platform_probe(struct platform_device *pdev) | 157 | int imx_pcm_dma_init(struct platform_device *pdev) |
| 158 | { | 158 | { |
| 159 | return snd_soc_register_platform(&pdev->dev, &imx_soc_platform_mx2); | 159 | return snd_soc_register_platform(&pdev->dev, &imx_soc_platform_mx2); |
| 160 | } | 160 | } |
| 161 | |||
| 162 | static int imx_soc_platform_remove(struct platform_device *pdev) | ||
| 163 | { | ||
| 164 | snd_soc_unregister_platform(&pdev->dev); | ||
| 165 | return 0; | ||
| 166 | } | ||
| 167 | |||
| 168 | static struct platform_driver imx_pcm_driver = { | ||
| 169 | .driver = { | ||
| 170 | .name = "imx-pcm-audio", | ||
| 171 | .owner = THIS_MODULE, | ||
| 172 | }, | ||
| 173 | .probe = imx_soc_platform_probe, | ||
| 174 | .remove = imx_soc_platform_remove, | ||
| 175 | }; | ||
| 176 | |||
| 177 | module_platform_driver(imx_pcm_driver); | ||
| 178 | MODULE_LICENSE("GPL"); | ||
| 179 | MODULE_ALIAS("platform:imx-pcm-audio"); | ||
diff --git a/sound/soc/fsl/imx-pcm-fiq.c b/sound/soc/fsl/imx-pcm-fiq.c index 5ec362ae4d01..920f945cb2f4 100644 --- a/sound/soc/fsl/imx-pcm-fiq.c +++ b/sound/soc/fsl/imx-pcm-fiq.c | |||
| @@ -281,7 +281,7 @@ static struct snd_soc_platform_driver imx_soc_platform_fiq = { | |||
| 281 | .pcm_free = imx_pcm_fiq_free, | 281 | .pcm_free = imx_pcm_fiq_free, |
| 282 | }; | 282 | }; |
| 283 | 283 | ||
| 284 | static int imx_soc_platform_probe(struct platform_device *pdev) | 284 | int imx_pcm_fiq_init(struct platform_device *pdev) |
| 285 | { | 285 | { |
| 286 | struct imx_ssi *ssi = platform_get_drvdata(pdev); | 286 | struct imx_ssi *ssi = platform_get_drvdata(pdev); |
| 287 | int ret; | 287 | int ret; |
| @@ -314,23 +314,3 @@ failed_register: | |||
| 314 | 314 | ||
| 315 | return ret; | 315 | return ret; |
| 316 | } | 316 | } |
| 317 | |||
| 318 | static int imx_soc_platform_remove(struct platform_device *pdev) | ||
| 319 | { | ||
| 320 | snd_soc_unregister_platform(&pdev->dev); | ||
| 321 | return 0; | ||
| 322 | } | ||
| 323 | |||
| 324 | static struct platform_driver imx_pcm_driver = { | ||
| 325 | .driver = { | ||
| 326 | .name = "imx-fiq-pcm-audio", | ||
| 327 | .owner = THIS_MODULE, | ||
| 328 | }, | ||
| 329 | |||
| 330 | .probe = imx_soc_platform_probe, | ||
| 331 | .remove = imx_soc_platform_remove, | ||
| 332 | }; | ||
| 333 | |||
| 334 | module_platform_driver(imx_pcm_driver); | ||
| 335 | |||
| 336 | MODULE_LICENSE("GPL"); | ||
diff --git a/sound/soc/fsl/imx-pcm.c b/sound/soc/fsl/imx-pcm.c index 0c9f188ddc68..0d0625bfcb65 100644 --- a/sound/soc/fsl/imx-pcm.c +++ b/sound/soc/fsl/imx-pcm.c | |||
| @@ -31,6 +31,7 @@ int snd_imx_pcm_mmap(struct snd_pcm_substream *substream, | |||
| 31 | runtime->dma_bytes); | 31 | runtime->dma_bytes); |
| 32 | return ret; | 32 | return ret; |
| 33 | } | 33 | } |
| 34 | EXPORT_SYMBOL_GPL(snd_imx_pcm_mmap); | ||
| 34 | 35 | ||
| 35 | static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream) | 36 | static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream) |
| 36 | { | 37 | { |
| @@ -79,6 +80,7 @@ int imx_pcm_new(struct snd_soc_pcm_runtime *rtd) | |||
| 79 | out: | 80 | out: |
| 80 | return ret; | 81 | return ret; |
| 81 | } | 82 | } |
| 83 | EXPORT_SYMBOL_GPL(imx_pcm_new); | ||
| 82 | 84 | ||
| 83 | void imx_pcm_free(struct snd_pcm *pcm) | 85 | void imx_pcm_free(struct snd_pcm *pcm) |
| 84 | { | 86 | { |
| @@ -100,6 +102,39 @@ void imx_pcm_free(struct snd_pcm *pcm) | |||
| 100 | buf->area = NULL; | 102 | buf->area = NULL; |
| 101 | } | 103 | } |
| 102 | } | 104 | } |
| 105 | EXPORT_SYMBOL_GPL(imx_pcm_free); | ||
| 106 | |||
| 107 | static int imx_pcm_probe(struct platform_device *pdev) | ||
| 108 | { | ||
| 109 | if (strcmp(pdev->id_entry->name, "imx-fiq-pcm-audio") == 0) | ||
| 110 | return imx_pcm_fiq_init(pdev); | ||
| 111 | |||
| 112 | return imx_pcm_dma_init(pdev); | ||
| 113 | } | ||
| 114 | |||
| 115 | static int imx_pcm_remove(struct platform_device *pdev) | ||
| 116 | { | ||
| 117 | snd_soc_unregister_platform(&pdev->dev); | ||
| 118 | return 0; | ||
| 119 | } | ||
| 120 | |||
| 121 | static struct platform_device_id imx_pcm_devtype[] = { | ||
| 122 | { .name = "imx-pcm-audio", }, | ||
| 123 | { .name = "imx-fiq-pcm-audio", }, | ||
| 124 | { /* sentinel */ } | ||
| 125 | }; | ||
| 126 | MODULE_DEVICE_TABLE(platform, imx_pcm_devtype); | ||
| 127 | |||
| 128 | static struct platform_driver imx_pcm_driver = { | ||
| 129 | .driver = { | ||
| 130 | .name = "imx-pcm", | ||
| 131 | .owner = THIS_MODULE, | ||
| 132 | }, | ||
| 133 | .id_table = imx_pcm_devtype, | ||
| 134 | .probe = imx_pcm_probe, | ||
| 135 | .remove = imx_pcm_remove, | ||
| 136 | }; | ||
| 137 | module_platform_driver(imx_pcm_driver); | ||
| 103 | 138 | ||
| 104 | MODULE_DESCRIPTION("Freescale i.MX PCM driver"); | 139 | MODULE_DESCRIPTION("Freescale i.MX PCM driver"); |
| 105 | MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); | 140 | MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); |
diff --git a/sound/soc/fsl/imx-pcm.h b/sound/soc/fsl/imx-pcm.h index 83c0ed7d55c9..5ae13a13a353 100644 --- a/sound/soc/fsl/imx-pcm.h +++ b/sound/soc/fsl/imx-pcm.h | |||
| @@ -30,4 +30,22 @@ int snd_imx_pcm_mmap(struct snd_pcm_substream *substream, | |||
| 30 | int imx_pcm_new(struct snd_soc_pcm_runtime *rtd); | 30 | int imx_pcm_new(struct snd_soc_pcm_runtime *rtd); |
| 31 | void imx_pcm_free(struct snd_pcm *pcm); | 31 | void imx_pcm_free(struct snd_pcm *pcm); |
| 32 | 32 | ||
| 33 | #ifdef CONFIG_SND_SOC_IMX_PCM_DMA | ||
| 34 | int imx_pcm_dma_init(struct platform_device *pdev); | ||
| 35 | #else | ||
| 36 | static inline int imx_pcm_dma_init(struct platform_device *pdev) | ||
| 37 | { | ||
| 38 | return -ENODEV; | ||
| 39 | } | ||
| 40 | #endif | ||
| 41 | |||
| 42 | #ifdef CONFIG_SND_SOC_IMX_PCM_FIQ | ||
| 43 | int imx_pcm_fiq_init(struct platform_device *pdev); | ||
| 44 | #else | ||
| 45 | static inline int imx_pcm_fiq_init(struct platform_device *pdev) | ||
| 46 | { | ||
| 47 | return -ENODEV; | ||
| 48 | } | ||
| 49 | #endif | ||
| 50 | |||
| 33 | #endif /* _IMX_PCM_H */ | 51 | #endif /* _IMX_PCM_H */ |
