diff options
36 files changed, 259 insertions, 151 deletions
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 4f798aa671dd..3817a3e2146c 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -24,6 +24,7 @@ config MICROBLAZE | |||
24 | select HAVE_FTRACE_MCOUNT_RECORD | 24 | select HAVE_FTRACE_MCOUNT_RECORD |
25 | select HAVE_FUNCTION_GRAPH_TRACER | 25 | select HAVE_FUNCTION_GRAPH_TRACER |
26 | select HAVE_FUNCTION_TRACER | 26 | select HAVE_FUNCTION_TRACER |
27 | select NO_BOOTMEM | ||
27 | select HAVE_MEMBLOCK | 28 | select HAVE_MEMBLOCK |
28 | select HAVE_MEMBLOCK_NODE_MAP | 29 | select HAVE_MEMBLOCK_NODE_MAP |
29 | select HAVE_OPROFILE | 30 | select HAVE_OPROFILE |
diff --git a/arch/microblaze/Kconfig.platform b/arch/microblaze/Kconfig.platform index 6996f397c16c..f7f1739c11b9 100644 --- a/arch/microblaze/Kconfig.platform +++ b/arch/microblaze/Kconfig.platform | |||
@@ -8,7 +8,6 @@ menu "Platform options" | |||
8 | 8 | ||
9 | config OPT_LIB_FUNCTION | 9 | config OPT_LIB_FUNCTION |
10 | bool "Optimalized lib function" | 10 | bool "Optimalized lib function" |
11 | depends on CPU_LITTLE_ENDIAN | ||
12 | default y | 11 | default y |
13 | help | 12 | help |
14 | Allows turn on optimalized library function (memcpy and memmove). | 13 | Allows turn on optimalized library function (memcpy and memmove). |
@@ -21,6 +20,7 @@ config OPT_LIB_FUNCTION | |||
21 | config OPT_LIB_ASM | 20 | config OPT_LIB_ASM |
22 | bool "Optimalized lib function ASM" | 21 | bool "Optimalized lib function ASM" |
23 | depends on OPT_LIB_FUNCTION && (XILINX_MICROBLAZE0_USE_BARREL = 1) | 22 | depends on OPT_LIB_FUNCTION && (XILINX_MICROBLAZE0_USE_BARREL = 1) |
23 | depends on CPU_BIG_ENDIAN | ||
24 | default n | 24 | default n |
25 | help | 25 | help |
26 | Allows turn on optimalized library function (memcpy and memmove). | 26 | Allows turn on optimalized library function (memcpy and memmove). |
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h index be84a4d3917f..7c968c1d1729 100644 --- a/arch/microblaze/include/asm/setup.h +++ b/arch/microblaze/include/asm/setup.h | |||
@@ -44,7 +44,6 @@ void machine_shutdown(void); | |||
44 | void machine_halt(void); | 44 | void machine_halt(void); |
45 | void machine_power_off(void); | 45 | void machine_power_off(void); |
46 | 46 | ||
47 | extern void *alloc_maybe_bootmem(size_t size, gfp_t mask); | ||
48 | extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); | 47 | extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); |
49 | 48 | ||
50 | # endif /* __ASSEMBLY__ */ | 49 | # endif /* __ASSEMBLY__ */ |
diff --git a/arch/microblaze/lib/fastcopy.S b/arch/microblaze/lib/fastcopy.S index 62021d7e249e..fdc48bb065d8 100644 --- a/arch/microblaze/lib/fastcopy.S +++ b/arch/microblaze/lib/fastcopy.S | |||
@@ -29,10 +29,6 @@ | |||
29 | * between mem locations with size of xfer spec'd in bytes | 29 | * between mem locations with size of xfer spec'd in bytes |
30 | */ | 30 | */ |
31 | 31 | ||
32 | #ifdef __MICROBLAZEEL__ | ||
33 | #error Microblaze LE not support ASM optimized lib func. Disable OPT_LIB_ASM. | ||
34 | #endif | ||
35 | |||
36 | #include <linux/linkage.h> | 32 | #include <linux/linkage.h> |
37 | .text | 33 | .text |
38 | .globl memcpy | 34 | .globl memcpy |
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 434639f9a3a6..df6de7ccdc2e 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c | |||
@@ -32,9 +32,6 @@ int mem_init_done; | |||
32 | #ifndef CONFIG_MMU | 32 | #ifndef CONFIG_MMU |
33 | unsigned int __page_offset; | 33 | unsigned int __page_offset; |
34 | EXPORT_SYMBOL(__page_offset); | 34 | EXPORT_SYMBOL(__page_offset); |
35 | |||
36 | #else | ||
37 | static int init_bootmem_done; | ||
38 | #endif /* CONFIG_MMU */ | 35 | #endif /* CONFIG_MMU */ |
39 | 36 | ||
40 | char *klimit = _end; | 37 | char *klimit = _end; |
@@ -117,7 +114,6 @@ static void __init paging_init(void) | |||
117 | 114 | ||
118 | void __init setup_memory(void) | 115 | void __init setup_memory(void) |
119 | { | 116 | { |
120 | unsigned long map_size; | ||
121 | struct memblock_region *reg; | 117 | struct memblock_region *reg; |
122 | 118 | ||
123 | #ifndef CONFIG_MMU | 119 | #ifndef CONFIG_MMU |
@@ -174,17 +170,6 @@ void __init setup_memory(void) | |||
174 | pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); | 170 | pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); |
175 | pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn); | 171 | pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn); |
176 | 172 | ||
177 | /* | ||
178 | * Find an area to use for the bootmem bitmap. | ||
179 | * We look for the first area which is at least | ||
180 | * 128kB in length (128kB is enough for a bitmap | ||
181 | * for 4GB of memory, using 4kB pages), plus 1 page | ||
182 | * (in case the address isn't page-aligned). | ||
183 | */ | ||
184 | map_size = init_bootmem_node(NODE_DATA(0), | ||
185 | PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn); | ||
186 | memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); | ||
187 | |||
188 | /* Add active regions with valid PFNs */ | 173 | /* Add active regions with valid PFNs */ |
189 | for_each_memblock(memory, reg) { | 174 | for_each_memblock(memory, reg) { |
190 | unsigned long start_pfn, end_pfn; | 175 | unsigned long start_pfn, end_pfn; |
@@ -196,32 +181,9 @@ void __init setup_memory(void) | |||
196 | &memblock.memory, 0); | 181 | &memblock.memory, 0); |
197 | } | 182 | } |
198 | 183 | ||
199 | /* free bootmem is whole main memory */ | ||
200 | free_bootmem_with_active_regions(0, max_low_pfn); | ||
201 | |||
202 | /* reserve allocate blocks */ | ||
203 | for_each_memblock(reserved, reg) { | ||
204 | unsigned long top = reg->base + reg->size - 1; | ||
205 | |||
206 | pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n", | ||
207 | (u32) reg->base, (u32) reg->size, top, | ||
208 | memory_start + lowmem_size - 1); | ||
209 | |||
210 | if (top <= (memory_start + lowmem_size - 1)) { | ||
211 | reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); | ||
212 | } else if (reg->base < (memory_start + lowmem_size - 1)) { | ||
213 | unsigned long trunc_size = memory_start + lowmem_size - | ||
214 | reg->base; | ||
215 | reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); | ||
216 | } | ||
217 | } | ||
218 | |||
219 | /* XXX need to clip this if using highmem? */ | 184 | /* XXX need to clip this if using highmem? */ |
220 | sparse_memory_present_with_active_regions(0); | 185 | sparse_memory_present_with_active_regions(0); |
221 | 186 | ||
222 | #ifdef CONFIG_MMU | ||
223 | init_bootmem_done = 1; | ||
224 | #endif | ||
225 | paging_init(); | 187 | paging_init(); |
226 | } | 188 | } |
227 | 189 | ||
@@ -398,30 +360,16 @@ asmlinkage void __init mmu_init(void) | |||
398 | /* This is only called until mem_init is done. */ | 360 | /* This is only called until mem_init is done. */ |
399 | void __init *early_get_page(void) | 361 | void __init *early_get_page(void) |
400 | { | 362 | { |
401 | void *p; | 363 | /* |
402 | if (init_bootmem_done) { | 364 | * Mem start + kernel_tlb -> here is limit |
403 | p = alloc_bootmem_pages(PAGE_SIZE); | 365 | * because of mem mapping from head.S |
404 | } else { | 366 | */ |
405 | /* | 367 | return __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, |
406 | * Mem start + kernel_tlb -> here is limit | 368 | memory_start + kernel_tlb)); |
407 | * because of mem mapping from head.S | ||
408 | */ | ||
409 | p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, | ||
410 | memory_start + kernel_tlb)); | ||
411 | } | ||
412 | return p; | ||
413 | } | 369 | } |
414 | 370 | ||
415 | #endif /* CONFIG_MMU */ | 371 | #endif /* CONFIG_MMU */ |
416 | 372 | ||
417 | void * __ref alloc_maybe_bootmem(size_t size, gfp_t mask) | ||
418 | { | ||
419 | if (mem_init_done) | ||
420 | return kmalloc(size, mask); | ||
421 | else | ||
422 | return alloc_bootmem(size); | ||
423 | } | ||
424 | |||
425 | void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) | 373 | void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) |
426 | { | 374 | { |
427 | void *p; | 375 | void *p; |
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 79089778725b..e3b45546d589 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c | |||
@@ -543,7 +543,8 @@ void flush_cache_mm(struct mm_struct *mm) | |||
543 | rp3440, etc. So, avoid it if the mm isn't too big. */ | 543 | rp3440, etc. So, avoid it if the mm isn't too big. */ |
544 | if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && | 544 | if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && |
545 | mm_total_size(mm) >= parisc_cache_flush_threshold) { | 545 | mm_total_size(mm) >= parisc_cache_flush_threshold) { |
546 | flush_tlb_all(); | 546 | if (mm->context) |
547 | flush_tlb_all(); | ||
547 | flush_cache_all(); | 548 | flush_cache_all(); |
548 | return; | 549 | return; |
549 | } | 550 | } |
@@ -571,6 +572,8 @@ void flush_cache_mm(struct mm_struct *mm) | |||
571 | pfn = pte_pfn(*ptep); | 572 | pfn = pte_pfn(*ptep); |
572 | if (!pfn_valid(pfn)) | 573 | if (!pfn_valid(pfn)) |
573 | continue; | 574 | continue; |
575 | if (unlikely(mm->context)) | ||
576 | flush_tlb_page(vma, addr); | ||
574 | __flush_cache_page(vma, addr, PFN_PHYS(pfn)); | 577 | __flush_cache_page(vma, addr, PFN_PHYS(pfn)); |
575 | } | 578 | } |
576 | } | 579 | } |
@@ -579,26 +582,46 @@ void flush_cache_mm(struct mm_struct *mm) | |||
579 | void flush_cache_range(struct vm_area_struct *vma, | 582 | void flush_cache_range(struct vm_area_struct *vma, |
580 | unsigned long start, unsigned long end) | 583 | unsigned long start, unsigned long end) |
581 | { | 584 | { |
585 | pgd_t *pgd; | ||
586 | unsigned long addr; | ||
587 | |||
582 | if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && | 588 | if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && |
583 | end - start >= parisc_cache_flush_threshold) { | 589 | end - start >= parisc_cache_flush_threshold) { |
584 | flush_tlb_range(vma, start, end); | 590 | if (vma->vm_mm->context) |
591 | flush_tlb_range(vma, start, end); | ||
585 | flush_cache_all(); | 592 | flush_cache_all(); |
586 | return; | 593 | return; |
587 | } | 594 | } |
588 | 595 | ||
589 | flush_user_dcache_range_asm(start, end); | 596 | if (vma->vm_mm->context == mfsp(3)) { |
590 | if (vma->vm_flags & VM_EXEC) | 597 | flush_user_dcache_range_asm(start, end); |
591 | flush_user_icache_range_asm(start, end); | 598 | if (vma->vm_flags & VM_EXEC) |
592 | flush_tlb_range(vma, start, end); | 599 | flush_user_icache_range_asm(start, end); |
600 | flush_tlb_range(vma, start, end); | ||
601 | return; | ||
602 | } | ||
603 | |||
604 | pgd = vma->vm_mm->pgd; | ||
605 | for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) { | ||
606 | unsigned long pfn; | ||
607 | pte_t *ptep = get_ptep(pgd, addr); | ||
608 | if (!ptep) | ||
609 | continue; | ||
610 | pfn = pte_pfn(*ptep); | ||
611 | if (pfn_valid(pfn)) { | ||
612 | if (unlikely(vma->vm_mm->context)) | ||
613 | flush_tlb_page(vma, addr); | ||
614 | __flush_cache_page(vma, addr, PFN_PHYS(pfn)); | ||
615 | } | ||
616 | } | ||
593 | } | 617 | } |
594 | 618 | ||
595 | void | 619 | void |
596 | flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) | 620 | flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) |
597 | { | 621 | { |
598 | BUG_ON(!vma->vm_mm->context); | ||
599 | |||
600 | if (pfn_valid(pfn)) { | 622 | if (pfn_valid(pfn)) { |
601 | flush_tlb_page(vma, vmaddr); | 623 | if (likely(vma->vm_mm->context)) |
624 | flush_tlb_page(vma, vmaddr); | ||
602 | __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); | 625 | __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); |
603 | } | 626 | } |
604 | } | 627 | } |
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 847ddffbf38a..b5cfab711651 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c | |||
@@ -163,13 +163,10 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, | |||
163 | pte_unmap(pte); | 163 | pte_unmap(pte); |
164 | } | 164 | } |
165 | 165 | ||
166 | void set_pmd_at(struct mm_struct *mm, unsigned long addr, | ||
167 | pmd_t *pmdp, pmd_t pmd) | ||
168 | { | ||
169 | pmd_t orig = *pmdp; | ||
170 | |||
171 | *pmdp = pmd; | ||
172 | 166 | ||
167 | static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr, | ||
168 | pmd_t orig, pmd_t pmd) | ||
169 | { | ||
173 | if (mm == &init_mm) | 170 | if (mm == &init_mm) |
174 | return; | 171 | return; |
175 | 172 | ||
@@ -219,6 +216,15 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |||
219 | } | 216 | } |
220 | } | 217 | } |
221 | 218 | ||
219 | void set_pmd_at(struct mm_struct *mm, unsigned long addr, | ||
220 | pmd_t *pmdp, pmd_t pmd) | ||
221 | { | ||
222 | pmd_t orig = *pmdp; | ||
223 | |||
224 | *pmdp = pmd; | ||
225 | __set_pmd_acct(mm, addr, orig, pmd); | ||
226 | } | ||
227 | |||
222 | static inline pmd_t pmdp_establish(struct vm_area_struct *vma, | 228 | static inline pmd_t pmdp_establish(struct vm_area_struct *vma, |
223 | unsigned long address, pmd_t *pmdp, pmd_t pmd) | 229 | unsigned long address, pmd_t *pmdp, pmd_t pmd) |
224 | { | 230 | { |
@@ -227,6 +233,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, | |||
227 | do { | 233 | do { |
228 | old = *pmdp; | 234 | old = *pmdp; |
229 | } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd); | 235 | } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd); |
236 | __set_pmd_acct(vma->vm_mm, address, old, pmd); | ||
230 | 237 | ||
231 | return old; | 238 | return old; |
232 | } | 239 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 74d2efaec52f..7a073ac5f9c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | |||
@@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector) | |||
69 | /* don't do anything if sink is not display port, i.e., | 69 | /* don't do anything if sink is not display port, i.e., |
70 | * passive dp->(dvi|hdmi) adaptor | 70 | * passive dp->(dvi|hdmi) adaptor |
71 | */ | 71 | */ |
72 | if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { | 72 | if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT && |
73 | int saved_dpms = connector->dpms; | 73 | amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) && |
74 | /* Only turn off the display if it's physically disconnected */ | 74 | amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { |
75 | if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { | 75 | /* Don't start link training before we have the DPCD */ |
76 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | 76 | if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) |
77 | } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { | 77 | return; |
78 | /* Don't try to start link training before we | 78 | |
79 | * have the dpcd */ | 79 | /* Turn the connector off and back on immediately, which |
80 | if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) | 80 | * will trigger link training |
81 | return; | 81 | */ |
82 | 82 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | |
83 | /* set it to OFF so that drm_helper_connector_dpms() | 83 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); |
84 | * won't return immediately since the current state | ||
85 | * is ON at this point. | ||
86 | */ | ||
87 | connector->dpms = DRM_MODE_DPMS_OFF; | ||
88 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
89 | } | ||
90 | connector->dpms = saved_dpms; | ||
91 | } | 84 | } |
92 | } | 85 | } |
93 | } | 86 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index e48b4ec88c8c..ca6c931dabfa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -36,8 +36,6 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj) | |||
36 | struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); | 36 | struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); |
37 | 37 | ||
38 | if (robj) { | 38 | if (robj) { |
39 | if (robj->gem_base.import_attach) | ||
40 | drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); | ||
41 | amdgpu_mn_unregister(robj); | 39 | amdgpu_mn_unregister(robj); |
42 | amdgpu_bo_unref(&robj); | 40 | amdgpu_bo_unref(&robj); |
43 | } | 41 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 54f06c959340..2264c5c97009 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | |||
@@ -352,6 +352,7 @@ struct amdgpu_mode_info { | |||
352 | u16 firmware_flags; | 352 | u16 firmware_flags; |
353 | /* pointer to backlight encoder */ | 353 | /* pointer to backlight encoder */ |
354 | struct amdgpu_encoder *bl_encoder; | 354 | struct amdgpu_encoder *bl_encoder; |
355 | u8 bl_level; /* saved backlight level */ | ||
355 | struct amdgpu_audio audio; /* audio stuff */ | 356 | struct amdgpu_audio audio; /* audio stuff */ |
356 | int num_crtc; /* number of crtcs */ | 357 | int num_crtc; /* number of crtcs */ |
357 | int num_hpd; /* number of hpd pins */ | 358 | int num_hpd; /* number of hpd pins */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 5c4c3e0d527b..1220322c1680 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
@@ -56,6 +56,8 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) | |||
56 | 56 | ||
57 | amdgpu_bo_kunmap(bo); | 57 | amdgpu_bo_kunmap(bo); |
58 | 58 | ||
59 | if (bo->gem_base.import_attach) | ||
60 | drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); | ||
59 | drm_gem_object_release(&bo->gem_base); | 61 | drm_gem_object_release(&bo->gem_base); |
60 | amdgpu_bo_unref(&bo->parent); | 62 | amdgpu_bo_unref(&bo->parent); |
61 | if (!list_empty(&bo->shadow_list)) { | 63 | if (!list_empty(&bo->shadow_list)) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index 2af26d2da127..d702fb8e3427 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c | |||
@@ -34,7 +34,7 @@ | |||
34 | #include <linux/backlight.h> | 34 | #include <linux/backlight.h> |
35 | #include "bif/bif_4_1_d.h" | 35 | #include "bif/bif_4_1_d.h" |
36 | 36 | ||
37 | static u8 | 37 | u8 |
38 | amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev) | 38 | amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev) |
39 | { | 39 | { |
40 | u8 backlight_level; | 40 | u8 backlight_level; |
@@ -48,7 +48,7 @@ amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev) | |||
48 | return backlight_level; | 48 | return backlight_level; |
49 | } | 49 | } |
50 | 50 | ||
51 | static void | 51 | void |
52 | amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev, | 52 | amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev, |
53 | u8 backlight_level) | 53 | u8 backlight_level) |
54 | { | 54 | { |
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h index 2bdec40515ce..f77cbdef679e 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h | |||
@@ -25,6 +25,11 @@ | |||
25 | #define __ATOMBIOS_ENCODER_H__ | 25 | #define __ATOMBIOS_ENCODER_H__ |
26 | 26 | ||
27 | u8 | 27 | u8 |
28 | amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev); | ||
29 | void | ||
30 | amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev, | ||
31 | u8 backlight_level); | ||
32 | u8 | ||
28 | amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder); | 33 | amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder); |
29 | void | 34 | void |
30 | amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder, | 35 | amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder, |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index f34bc68aadfb..022f303463fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | |||
@@ -2921,6 +2921,11 @@ static int dce_v10_0_hw_fini(void *handle) | |||
2921 | 2921 | ||
2922 | static int dce_v10_0_suspend(void *handle) | 2922 | static int dce_v10_0_suspend(void *handle) |
2923 | { | 2923 | { |
2924 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
2925 | |||
2926 | adev->mode_info.bl_level = | ||
2927 | amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); | ||
2928 | |||
2924 | return dce_v10_0_hw_fini(handle); | 2929 | return dce_v10_0_hw_fini(handle); |
2925 | } | 2930 | } |
2926 | 2931 | ||
@@ -2929,6 +2934,9 @@ static int dce_v10_0_resume(void *handle) | |||
2929 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 2934 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2930 | int ret; | 2935 | int ret; |
2931 | 2936 | ||
2937 | amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, | ||
2938 | adev->mode_info.bl_level); | ||
2939 | |||
2932 | ret = dce_v10_0_hw_init(handle); | 2940 | ret = dce_v10_0_hw_init(handle); |
2933 | 2941 | ||
2934 | /* turn on the BL */ | 2942 | /* turn on the BL */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 26378bd6aba4..800a9f36ab4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | |||
@@ -3047,6 +3047,11 @@ static int dce_v11_0_hw_fini(void *handle) | |||
3047 | 3047 | ||
3048 | static int dce_v11_0_suspend(void *handle) | 3048 | static int dce_v11_0_suspend(void *handle) |
3049 | { | 3049 | { |
3050 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
3051 | |||
3052 | adev->mode_info.bl_level = | ||
3053 | amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); | ||
3054 | |||
3050 | return dce_v11_0_hw_fini(handle); | 3055 | return dce_v11_0_hw_fini(handle); |
3051 | } | 3056 | } |
3052 | 3057 | ||
@@ -3055,6 +3060,9 @@ static int dce_v11_0_resume(void *handle) | |||
3055 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 3060 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
3056 | int ret; | 3061 | int ret; |
3057 | 3062 | ||
3063 | amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, | ||
3064 | adev->mode_info.bl_level); | ||
3065 | |||
3058 | ret = dce_v11_0_hw_init(handle); | 3066 | ret = dce_v11_0_hw_init(handle); |
3059 | 3067 | ||
3060 | /* turn on the BL */ | 3068 | /* turn on the BL */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index a712f4b285f6..b8368f69ce1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | |||
@@ -2787,6 +2787,11 @@ static int dce_v6_0_hw_fini(void *handle) | |||
2787 | 2787 | ||
2788 | static int dce_v6_0_suspend(void *handle) | 2788 | static int dce_v6_0_suspend(void *handle) |
2789 | { | 2789 | { |
2790 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
2791 | |||
2792 | adev->mode_info.bl_level = | ||
2793 | amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); | ||
2794 | |||
2790 | return dce_v6_0_hw_fini(handle); | 2795 | return dce_v6_0_hw_fini(handle); |
2791 | } | 2796 | } |
2792 | 2797 | ||
@@ -2795,6 +2800,9 @@ static int dce_v6_0_resume(void *handle) | |||
2795 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 2800 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2796 | int ret; | 2801 | int ret; |
2797 | 2802 | ||
2803 | amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, | ||
2804 | adev->mode_info.bl_level); | ||
2805 | |||
2798 | ret = dce_v6_0_hw_init(handle); | 2806 | ret = dce_v6_0_hw_init(handle); |
2799 | 2807 | ||
2800 | /* turn on the BL */ | 2808 | /* turn on the BL */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index c008dc030687..012e0a9ae0ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | |||
@@ -2819,6 +2819,11 @@ static int dce_v8_0_hw_fini(void *handle) | |||
2819 | 2819 | ||
2820 | static int dce_v8_0_suspend(void *handle) | 2820 | static int dce_v8_0_suspend(void *handle) |
2821 | { | 2821 | { |
2822 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
2823 | |||
2824 | adev->mode_info.bl_level = | ||
2825 | amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); | ||
2826 | |||
2822 | return dce_v8_0_hw_fini(handle); | 2827 | return dce_v8_0_hw_fini(handle); |
2823 | } | 2828 | } |
2824 | 2829 | ||
@@ -2827,6 +2832,9 @@ static int dce_v8_0_resume(void *handle) | |||
2827 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 2832 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2828 | int ret; | 2833 | int ret; |
2829 | 2834 | ||
2835 | amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, | ||
2836 | adev->mode_info.bl_level); | ||
2837 | |||
2830 | ret = dce_v8_0_hw_init(handle); | 2838 | ret = dce_v8_0_hw_init(handle); |
2831 | 2839 | ||
2832 | /* turn on the BL */ | 2840 | /* turn on the BL */ |
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index c8454ac43fae..db6b94dda5df 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c | |||
@@ -471,6 +471,7 @@ struct parser_exec_state { | |||
471 | * used when ret from 2nd level batch buffer | 471 | * used when ret from 2nd level batch buffer |
472 | */ | 472 | */ |
473 | int saved_buf_addr_type; | 473 | int saved_buf_addr_type; |
474 | bool is_ctx_wa; | ||
474 | 475 | ||
475 | struct cmd_info *info; | 476 | struct cmd_info *info; |
476 | 477 | ||
@@ -1715,6 +1716,11 @@ static int perform_bb_shadow(struct parser_exec_state *s) | |||
1715 | bb->accessing = true; | 1716 | bb->accessing = true; |
1716 | bb->bb_start_cmd_va = s->ip_va; | 1717 | bb->bb_start_cmd_va = s->ip_va; |
1717 | 1718 | ||
1719 | if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa)) | ||
1720 | bb->bb_offset = s->ip_va - s->rb_va; | ||
1721 | else | ||
1722 | bb->bb_offset = 0; | ||
1723 | |||
1718 | /* | 1724 | /* |
1719 | * ip_va saves the virtual address of the shadow batch buffer, while | 1725 | * ip_va saves the virtual address of the shadow batch buffer, while |
1720 | * ip_gma saves the graphics address of the original batch buffer. | 1726 | * ip_gma saves the graphics address of the original batch buffer. |
@@ -2571,6 +2577,7 @@ static int scan_workload(struct intel_vgpu_workload *workload) | |||
2571 | s.ring_tail = gma_tail; | 2577 | s.ring_tail = gma_tail; |
2572 | s.rb_va = workload->shadow_ring_buffer_va; | 2578 | s.rb_va = workload->shadow_ring_buffer_va; |
2573 | s.workload = workload; | 2579 | s.workload = workload; |
2580 | s.is_ctx_wa = false; | ||
2574 | 2581 | ||
2575 | if ((bypass_scan_mask & (1 << workload->ring_id)) || | 2582 | if ((bypass_scan_mask & (1 << workload->ring_id)) || |
2576 | gma_head == gma_tail) | 2583 | gma_head == gma_tail) |
@@ -2624,6 +2631,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |||
2624 | s.ring_tail = gma_tail; | 2631 | s.ring_tail = gma_tail; |
2625 | s.rb_va = wa_ctx->indirect_ctx.shadow_va; | 2632 | s.rb_va = wa_ctx->indirect_ctx.shadow_va; |
2626 | s.workload = workload; | 2633 | s.workload = workload; |
2634 | s.is_ctx_wa = true; | ||
2627 | 2635 | ||
2628 | if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { | 2636 | if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { |
2629 | ret = -EINVAL; | 2637 | ret = -EINVAL; |
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 256f1bb522b7..152df3d0291e 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c | |||
@@ -394,9 +394,11 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, | |||
394 | * performace for batch mmio read/write, so we need | 394 | * performace for batch mmio read/write, so we need |
395 | * handle forcewake mannually. | 395 | * handle forcewake mannually. |
396 | */ | 396 | */ |
397 | intel_runtime_pm_get(dev_priv); | ||
397 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | 398 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); |
398 | switch_mmio(pre, next, ring_id); | 399 | switch_mmio(pre, next, ring_id); |
399 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | 400 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
401 | intel_runtime_pm_put(dev_priv); | ||
400 | } | 402 | } |
401 | 403 | ||
402 | /** | 404 | /** |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index b55b3580ca1d..d74d6f05c62c 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
@@ -52,6 +52,54 @@ static void set_context_pdp_root_pointer( | |||
52 | pdp_pair[i].val = pdp[7 - i]; | 52 | pdp_pair[i].val = pdp[7 - i]; |
53 | } | 53 | } |
54 | 54 | ||
55 | /* | ||
56 | * when populating shadow ctx from guest, we should not overrride oa related | ||
57 | * registers, so that they will not be overlapped by guest oa configs. Thus | ||
58 | * made it possible to capture oa data from host for both host and guests. | ||
59 | */ | ||
60 | static void sr_oa_regs(struct intel_vgpu_workload *workload, | ||
61 | u32 *reg_state, bool save) | ||
62 | { | ||
63 | struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; | ||
64 | u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset; | ||
65 | u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset; | ||
66 | int i = 0; | ||
67 | u32 flex_mmio[] = { | ||
68 | i915_mmio_reg_offset(EU_PERF_CNTL0), | ||
69 | i915_mmio_reg_offset(EU_PERF_CNTL1), | ||
70 | i915_mmio_reg_offset(EU_PERF_CNTL2), | ||
71 | i915_mmio_reg_offset(EU_PERF_CNTL3), | ||
72 | i915_mmio_reg_offset(EU_PERF_CNTL4), | ||
73 | i915_mmio_reg_offset(EU_PERF_CNTL5), | ||
74 | i915_mmio_reg_offset(EU_PERF_CNTL6), | ||
75 | }; | ||
76 | |||
77 | if (!workload || !reg_state || workload->ring_id != RCS) | ||
78 | return; | ||
79 | |||
80 | if (save) { | ||
81 | workload->oactxctrl = reg_state[ctx_oactxctrl + 1]; | ||
82 | |||
83 | for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { | ||
84 | u32 state_offset = ctx_flexeu0 + i * 2; | ||
85 | |||
86 | workload->flex_mmio[i] = reg_state[state_offset + 1]; | ||
87 | } | ||
88 | } else { | ||
89 | reg_state[ctx_oactxctrl] = | ||
90 | i915_mmio_reg_offset(GEN8_OACTXCONTROL); | ||
91 | reg_state[ctx_oactxctrl + 1] = workload->oactxctrl; | ||
92 | |||
93 | for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { | ||
94 | u32 state_offset = ctx_flexeu0 + i * 2; | ||
95 | u32 mmio = flex_mmio[i]; | ||
96 | |||
97 | reg_state[state_offset] = mmio; | ||
98 | reg_state[state_offset + 1] = workload->flex_mmio[i]; | ||
99 | } | ||
100 | } | ||
101 | } | ||
102 | |||
55 | static int populate_shadow_context(struct intel_vgpu_workload *workload) | 103 | static int populate_shadow_context(struct intel_vgpu_workload *workload) |
56 | { | 104 | { |
57 | struct intel_vgpu *vgpu = workload->vgpu; | 105 | struct intel_vgpu *vgpu = workload->vgpu; |
@@ -98,6 +146,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) | |||
98 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); | 146 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); |
99 | shadow_ring_context = kmap(page); | 147 | shadow_ring_context = kmap(page); |
100 | 148 | ||
149 | sr_oa_regs(workload, (u32 *)shadow_ring_context, true); | ||
101 | #define COPY_REG(name) \ | 150 | #define COPY_REG(name) \ |
102 | intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ | 151 | intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ |
103 | + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) | 152 | + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) |
@@ -122,6 +171,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) | |||
122 | sizeof(*shadow_ring_context), | 171 | sizeof(*shadow_ring_context), |
123 | I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); | 172 | I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); |
124 | 173 | ||
174 | sr_oa_regs(workload, (u32 *)shadow_ring_context, false); | ||
125 | kunmap(page); | 175 | kunmap(page); |
126 | return 0; | 176 | return 0; |
127 | } | 177 | } |
@@ -376,6 +426,17 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) | |||
376 | goto err; | 426 | goto err; |
377 | } | 427 | } |
378 | 428 | ||
429 | /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va | ||
430 | * is only updated into ring_scan_buffer, not real ring address | ||
431 | * allocated in later copy_workload_to_ring_buffer. pls be noted | ||
432 | * shadow_ring_buffer_va is now pointed to real ring buffer va | ||
433 | * in copy_workload_to_ring_buffer. | ||
434 | */ | ||
435 | |||
436 | if (bb->bb_offset) | ||
437 | bb->bb_start_cmd_va = workload->shadow_ring_buffer_va | ||
438 | + bb->bb_offset; | ||
439 | |||
379 | /* relocate shadow batch buffer */ | 440 | /* relocate shadow batch buffer */ |
380 | bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma); | 441 | bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma); |
381 | if (gmadr_bytes == 8) | 442 | if (gmadr_bytes == 8) |
@@ -1044,10 +1105,12 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) | |||
1044 | 1105 | ||
1045 | bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); | 1106 | bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); |
1046 | 1107 | ||
1047 | s->workloads = kmem_cache_create("gvt-g_vgpu_workload", | 1108 | s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload", |
1048 | sizeof(struct intel_vgpu_workload), 0, | 1109 | sizeof(struct intel_vgpu_workload), 0, |
1049 | SLAB_HWCACHE_ALIGN, | 1110 | SLAB_HWCACHE_ALIGN, |
1050 | NULL); | 1111 | offsetof(struct intel_vgpu_workload, rb_tail), |
1112 | sizeof_field(struct intel_vgpu_workload, rb_tail), | ||
1113 | NULL); | ||
1051 | 1114 | ||
1052 | if (!s->workloads) { | 1115 | if (!s->workloads) { |
1053 | ret = -ENOMEM; | 1116 | ret = -ENOMEM; |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index ff175a98b19e..a79a4f60637e 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h | |||
@@ -110,6 +110,10 @@ struct intel_vgpu_workload { | |||
110 | /* shadow batch buffer */ | 110 | /* shadow batch buffer */ |
111 | struct list_head shadow_bb; | 111 | struct list_head shadow_bb; |
112 | struct intel_shadow_wa_ctx wa_ctx; | 112 | struct intel_shadow_wa_ctx wa_ctx; |
113 | |||
114 | /* oa registers */ | ||
115 | u32 oactxctrl; | ||
116 | u32 flex_mmio[7]; | ||
113 | }; | 117 | }; |
114 | 118 | ||
115 | struct intel_vgpu_shadow_bb { | 119 | struct intel_vgpu_shadow_bb { |
@@ -120,6 +124,7 @@ struct intel_vgpu_shadow_bb { | |||
120 | u32 *bb_start_cmd_va; | 124 | u32 *bb_start_cmd_va; |
121 | unsigned int clflush; | 125 | unsigned int clflush; |
122 | bool accessing; | 126 | bool accessing; |
127 | unsigned long bb_offset; | ||
123 | }; | 128 | }; |
124 | 129 | ||
125 | #define workload_q_head(vgpu, ring_id) \ | 130 | #define workload_q_head(vgpu, ring_id) \ |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 66ee9d888d16..6ff5d655c202 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -434,20 +434,28 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, | |||
434 | dma_fence_put(shared[i]); | 434 | dma_fence_put(shared[i]); |
435 | kfree(shared); | 435 | kfree(shared); |
436 | 436 | ||
437 | /* | ||
438 | * If both shared fences and an exclusive fence exist, | ||
439 | * then by construction the shared fences must be later | ||
440 | * than the exclusive fence. If we successfully wait for | ||
441 | * all the shared fences, we know that the exclusive fence | ||
442 | * must all be signaled. If all the shared fences are | ||
443 | * signaled, we can prune the array and recover the | ||
444 | * floating references on the fences/requests. | ||
445 | */ | ||
437 | prune_fences = count && timeout >= 0; | 446 | prune_fences = count && timeout >= 0; |
438 | } else { | 447 | } else { |
439 | excl = reservation_object_get_excl_rcu(resv); | 448 | excl = reservation_object_get_excl_rcu(resv); |
440 | } | 449 | } |
441 | 450 | ||
442 | if (excl && timeout >= 0) { | 451 | if (excl && timeout >= 0) |
443 | timeout = i915_gem_object_wait_fence(excl, flags, timeout, | 452 | timeout = i915_gem_object_wait_fence(excl, flags, timeout, |
444 | rps_client); | 453 | rps_client); |
445 | prune_fences = timeout >= 0; | ||
446 | } | ||
447 | 454 | ||
448 | dma_fence_put(excl); | 455 | dma_fence_put(excl); |
449 | 456 | ||
450 | /* Oportunistically prune the fences iff we know they have *all* been | 457 | /* |
458 | * Opportunistically prune the fences iff we know they have *all* been | ||
451 | * signaled and that the reservation object has not been changed (i.e. | 459 | * signaled and that the reservation object has not been changed (i.e. |
452 | * no new fences have been added). | 460 | * no new fences have been added). |
453 | */ | 461 | */ |
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index b33d2158c234..e5e6f6bb2b05 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c | |||
@@ -304,8 +304,9 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, | |||
304 | { | 304 | { |
305 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); | 305 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
306 | struct intel_rps *rps = &dev_priv->gt_pm.rps; | 306 | struct intel_rps *rps = &dev_priv->gt_pm.rps; |
307 | u32 val; | 307 | bool boost = false; |
308 | ssize_t ret; | 308 | ssize_t ret; |
309 | u32 val; | ||
309 | 310 | ||
310 | ret = kstrtou32(buf, 0, &val); | 311 | ret = kstrtou32(buf, 0, &val); |
311 | if (ret) | 312 | if (ret) |
@@ -317,8 +318,13 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, | |||
317 | return -EINVAL; | 318 | return -EINVAL; |
318 | 319 | ||
319 | mutex_lock(&dev_priv->pcu_lock); | 320 | mutex_lock(&dev_priv->pcu_lock); |
320 | rps->boost_freq = val; | 321 | if (val != rps->boost_freq) { |
322 | rps->boost_freq = val; | ||
323 | boost = atomic_read(&rps->num_waiters); | ||
324 | } | ||
321 | mutex_unlock(&dev_priv->pcu_lock); | 325 | mutex_unlock(&dev_priv->pcu_lock); |
326 | if (boost) | ||
327 | schedule_work(&rps->work); | ||
322 | 328 | ||
323 | return count; | 329 | return count; |
324 | } | 330 | } |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 35c5299feab6..a29868cd30c7 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -620,19 +620,15 @@ static int | |||
620 | bxt_power_sequencer_idx(struct intel_dp *intel_dp) | 620 | bxt_power_sequencer_idx(struct intel_dp *intel_dp) |
621 | { | 621 | { |
622 | struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); | 622 | struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); |
623 | int backlight_controller = dev_priv->vbt.backlight.controller; | ||
623 | 624 | ||
624 | lockdep_assert_held(&dev_priv->pps_mutex); | 625 | lockdep_assert_held(&dev_priv->pps_mutex); |
625 | 626 | ||
626 | /* We should never land here with regular DP ports */ | 627 | /* We should never land here with regular DP ports */ |
627 | WARN_ON(!intel_dp_is_edp(intel_dp)); | 628 | WARN_ON(!intel_dp_is_edp(intel_dp)); |
628 | 629 | ||
629 | /* | ||
630 | * TODO: BXT has 2 PPS instances. The correct port->PPS instance | ||
631 | * mapping needs to be retrieved from VBT, for now just hard-code to | ||
632 | * use instance #0 always. | ||
633 | */ | ||
634 | if (!intel_dp->pps_reset) | 630 | if (!intel_dp->pps_reset) |
635 | return 0; | 631 | return backlight_controller; |
636 | 632 | ||
637 | intel_dp->pps_reset = false; | 633 | intel_dp->pps_reset = false; |
638 | 634 | ||
@@ -642,7 +638,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp) | |||
642 | */ | 638 | */ |
643 | intel_dp_init_panel_power_sequencer_registers(intel_dp, false); | 639 | intel_dp_init_panel_power_sequencer_registers(intel_dp, false); |
644 | 640 | ||
645 | return 0; | 641 | return backlight_controller; |
646 | } | 642 | } |
647 | 643 | ||
648 | typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, | 644 | typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 380f340204e8..debbbf0fd4bd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c | |||
@@ -134,7 +134,7 @@ nv50_get_intensity(struct backlight_device *bd) | |||
134 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | 134 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); |
135 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); | 135 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); |
136 | struct nvif_object *device = &drm->client.device.object; | 136 | struct nvif_object *device = &drm->client.device.object; |
137 | int or = nv_encoder->or; | 137 | int or = ffs(nv_encoder->dcb->or) - 1; |
138 | u32 div = 1025; | 138 | u32 div = 1025; |
139 | u32 val; | 139 | u32 val; |
140 | 140 | ||
@@ -149,7 +149,7 @@ nv50_set_intensity(struct backlight_device *bd) | |||
149 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | 149 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); |
150 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); | 150 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); |
151 | struct nvif_object *device = &drm->client.device.object; | 151 | struct nvif_object *device = &drm->client.device.object; |
152 | int or = nv_encoder->or; | 152 | int or = ffs(nv_encoder->dcb->or) - 1; |
153 | u32 div = 1025; | 153 | u32 div = 1025; |
154 | u32 val = (bd->props.brightness * div) / 100; | 154 | u32 val = (bd->props.brightness * div) / 100; |
155 | 155 | ||
@@ -170,7 +170,7 @@ nva3_get_intensity(struct backlight_device *bd) | |||
170 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | 170 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); |
171 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); | 171 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); |
172 | struct nvif_object *device = &drm->client.device.object; | 172 | struct nvif_object *device = &drm->client.device.object; |
173 | int or = nv_encoder->or; | 173 | int or = ffs(nv_encoder->dcb->or) - 1; |
174 | u32 div, val; | 174 | u32 div, val; |
175 | 175 | ||
176 | div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); | 176 | div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); |
@@ -188,7 +188,7 @@ nva3_set_intensity(struct backlight_device *bd) | |||
188 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | 188 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); |
189 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); | 189 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); |
190 | struct nvif_object *device = &drm->client.device.object; | 190 | struct nvif_object *device = &drm->client.device.object; |
191 | int or = nv_encoder->or; | 191 | int or = ffs(nv_encoder->dcb->or) - 1; |
192 | u32 div, val; | 192 | u32 div, val; |
193 | 193 | ||
194 | div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); | 194 | div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); |
@@ -228,7 +228,7 @@ nv50_backlight_init(struct drm_connector *connector) | |||
228 | return -ENODEV; | 228 | return -ENODEV; |
229 | } | 229 | } |
230 | 230 | ||
231 | if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) | 231 | if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1))) |
232 | return 0; | 232 | return 0; |
233 | 233 | ||
234 | if (drm->client.device.info.chipset <= 0xa0 || | 234 | if (drm->client.device.info.chipset <= 0xa0 || |
@@ -268,13 +268,13 @@ nouveau_backlight_init(struct drm_device *dev) | |||
268 | struct nvif_device *device = &drm->client.device; | 268 | struct nvif_device *device = &drm->client.device; |
269 | struct drm_connector *connector; | 269 | struct drm_connector *connector; |
270 | 270 | ||
271 | INIT_LIST_HEAD(&drm->bl_connectors); | ||
272 | |||
271 | if (apple_gmux_present()) { | 273 | if (apple_gmux_present()) { |
272 | NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n"); | 274 | NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n"); |
273 | return 0; | 275 | return 0; |
274 | } | 276 | } |
275 | 277 | ||
276 | INIT_LIST_HEAD(&drm->bl_connectors); | ||
277 | |||
278 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 278 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
279 | if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && | 279 | if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && |
280 | connector->connector_type != DRM_MODE_CONNECTOR_eDP) | 280 | connector->connector_type != DRM_MODE_CONNECTOR_eDP) |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c index 93946dcee319..1c12e58f44c2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c | |||
@@ -1354,7 +1354,7 @@ nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse, | |||
1354 | 1354 | ||
1355 | tail = this->addr + this->size; | 1355 | tail = this->addr + this->size; |
1356 | if (vmm->func->page_block && next && next->page != p) | 1356 | if (vmm->func->page_block && next && next->page != p) |
1357 | tail = ALIGN_DOWN(addr, vmm->func->page_block); | 1357 | tail = ALIGN_DOWN(tail, vmm->func->page_block); |
1358 | 1358 | ||
1359 | if (addr <= tail && tail - addr >= size) { | 1359 | if (addr <= tail && tail - addr >= size) { |
1360 | rb_erase(&this->tree, &vmm->free); | 1360 | rb_erase(&this->tree, &vmm->free); |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index a9962ffba720..27d8e7dd2d06 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -34,8 +34,6 @@ void radeon_gem_object_free(struct drm_gem_object *gobj) | |||
34 | struct radeon_bo *robj = gem_to_radeon_bo(gobj); | 34 | struct radeon_bo *robj = gem_to_radeon_bo(gobj); |
35 | 35 | ||
36 | if (robj) { | 36 | if (robj) { |
37 | if (robj->gem_base.import_attach) | ||
38 | drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); | ||
39 | radeon_mn_unregister(robj); | 37 | radeon_mn_unregister(robj); |
40 | radeon_bo_unref(&robj); | 38 | radeon_bo_unref(&robj); |
41 | } | 39 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 15404af9d740..31f5ad605e59 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -82,6 +82,8 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) | |||
82 | mutex_unlock(&bo->rdev->gem.mutex); | 82 | mutex_unlock(&bo->rdev->gem.mutex); |
83 | radeon_bo_clear_surface_reg(bo); | 83 | radeon_bo_clear_surface_reg(bo); |
84 | WARN_ON_ONCE(!list_empty(&bo->va)); | 84 | WARN_ON_ONCE(!list_empty(&bo->va)); |
85 | if (bo->gem_base.import_attach) | ||
86 | drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); | ||
85 | drm_gem_object_release(&bo->gem_base); | 87 | drm_gem_object_release(&bo->gem_base); |
86 | kfree(bo); | 88 | kfree(bo); |
87 | } | 89 | } |
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index f94b2d8c744a..26484648d090 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
@@ -1519,6 +1519,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr) | |||
1519 | if (!node) | 1519 | if (!node) |
1520 | break; | 1520 | break; |
1521 | bytenr = node->val; | 1521 | bytenr = node->val; |
1522 | shared.share_count = 0; | ||
1522 | cond_resched(); | 1523 | cond_resched(); |
1523 | } | 1524 | } |
1524 | 1525 | ||
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index dec0907dfb8a..fcfc20de2df3 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c | |||
@@ -1370,6 +1370,7 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio, | |||
1370 | stripe_start = stripe->physical; | 1370 | stripe_start = stripe->physical; |
1371 | if (physical >= stripe_start && | 1371 | if (physical >= stripe_start && |
1372 | physical < stripe_start + rbio->stripe_len && | 1372 | physical < stripe_start + rbio->stripe_len && |
1373 | stripe->dev->bdev && | ||
1373 | bio->bi_disk == stripe->dev->bdev->bd_disk && | 1374 | bio->bi_disk == stripe->dev->bdev->bd_disk && |
1374 | bio->bi_partno == stripe->dev->bdev->bd_partno) { | 1375 | bio->bi_partno == stripe->dev->bdev->bd_partno) { |
1375 | return i; | 1376 | return i; |
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index d11c70bff5a9..a8bafed931f4 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c | |||
@@ -423,7 +423,7 @@ static ssize_t btrfs_nodesize_show(struct kobject *kobj, | |||
423 | { | 423 | { |
424 | struct btrfs_fs_info *fs_info = to_fs_info(kobj); | 424 | struct btrfs_fs_info *fs_info = to_fs_info(kobj); |
425 | 425 | ||
426 | return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->nodesize); | 426 | return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->super_copy->nodesize); |
427 | } | 427 | } |
428 | 428 | ||
429 | BTRFS_ATTR(, nodesize, btrfs_nodesize_show); | 429 | BTRFS_ATTR(, nodesize, btrfs_nodesize_show); |
@@ -433,7 +433,8 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj, | |||
433 | { | 433 | { |
434 | struct btrfs_fs_info *fs_info = to_fs_info(kobj); | 434 | struct btrfs_fs_info *fs_info = to_fs_info(kobj); |
435 | 435 | ||
436 | return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->sectorsize); | 436 | return snprintf(buf, PAGE_SIZE, "%u\n", |
437 | fs_info->super_copy->sectorsize); | ||
437 | } | 438 | } |
438 | 439 | ||
439 | BTRFS_ATTR(, sectorsize, btrfs_sectorsize_show); | 440 | BTRFS_ATTR(, sectorsize, btrfs_sectorsize_show); |
@@ -443,7 +444,8 @@ static ssize_t btrfs_clone_alignment_show(struct kobject *kobj, | |||
443 | { | 444 | { |
444 | struct btrfs_fs_info *fs_info = to_fs_info(kobj); | 445 | struct btrfs_fs_info *fs_info = to_fs_info(kobj); |
445 | 446 | ||
446 | return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->sectorsize); | 447 | return snprintf(buf, PAGE_SIZE, "%u\n", |
448 | fs_info->super_copy->sectorsize); | ||
447 | } | 449 | } |
448 | 450 | ||
449 | BTRFS_ATTR(, clone_alignment, btrfs_clone_alignment_show); | 451 | BTRFS_ATTR(, clone_alignment, btrfs_clone_alignment_show); |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 9220f004001c..04f07144b45c 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -1722,23 +1722,19 @@ static void update_super_roots(struct btrfs_fs_info *fs_info) | |||
1722 | 1722 | ||
1723 | super = fs_info->super_copy; | 1723 | super = fs_info->super_copy; |
1724 | 1724 | ||
1725 | /* update latest btrfs_super_block::chunk_root refs */ | ||
1726 | root_item = &fs_info->chunk_root->root_item; | 1725 | root_item = &fs_info->chunk_root->root_item; |
1727 | btrfs_set_super_chunk_root(super, root_item->bytenr); | 1726 | super->chunk_root = root_item->bytenr; |
1728 | btrfs_set_super_chunk_root_generation(super, root_item->generation); | 1727 | super->chunk_root_generation = root_item->generation; |
1729 | btrfs_set_super_chunk_root_level(super, root_item->level); | 1728 | super->chunk_root_level = root_item->level; |
1730 | 1729 | ||
1731 | /* update latest btrfs_super_block::root refs */ | ||
1732 | root_item = &fs_info->tree_root->root_item; | 1730 | root_item = &fs_info->tree_root->root_item; |
1733 | btrfs_set_super_root(super, root_item->bytenr); | 1731 | super->root = root_item->bytenr; |
1734 | btrfs_set_super_generation(super, root_item->generation); | 1732 | super->generation = root_item->generation; |
1735 | btrfs_set_super_root_level(super, root_item->level); | 1733 | super->root_level = root_item->level; |
1736 | |||
1737 | if (btrfs_test_opt(fs_info, SPACE_CACHE)) | 1734 | if (btrfs_test_opt(fs_info, SPACE_CACHE)) |
1738 | btrfs_set_super_cache_generation(super, root_item->generation); | 1735 | super->cache_generation = root_item->generation; |
1739 | if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags)) | 1736 | if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags)) |
1740 | btrfs_set_super_uuid_tree_generation(super, | 1737 | super->uuid_tree_generation = root_item->generation; |
1741 | root_item->generation); | ||
1742 | } | 1738 | } |
1743 | 1739 | ||
1744 | int btrfs_transaction_in_commit(struct btrfs_fs_info *info) | 1740 | int btrfs_transaction_in_commit(struct btrfs_fs_info *info) |
diff --git a/fs/dcache.c b/fs/dcache.c index 7c38f39958bc..8945e6cabd93 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -647,11 +647,16 @@ again: | |||
647 | spin_unlock(&parent->d_lock); | 647 | spin_unlock(&parent->d_lock); |
648 | goto again; | 648 | goto again; |
649 | } | 649 | } |
650 | rcu_read_unlock(); | 650 | if (parent != dentry) { |
651 | if (parent != dentry) | ||
652 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); | 651 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); |
653 | else | 652 | if (unlikely(dentry->d_lockref.count < 0)) { |
653 | spin_unlock(&parent->d_lock); | ||
654 | parent = NULL; | ||
655 | } | ||
656 | } else { | ||
654 | parent = NULL; | 657 | parent = NULL; |
658 | } | ||
659 | rcu_read_unlock(); | ||
655 | return parent; | 660 | return parent; |
656 | } | 661 | } |
657 | 662 | ||
@@ -2474,7 +2479,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent, | |||
2474 | 2479 | ||
2475 | retry: | 2480 | retry: |
2476 | rcu_read_lock(); | 2481 | rcu_read_lock(); |
2477 | seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1; | 2482 | seq = smp_load_acquire(&parent->d_inode->i_dir_seq); |
2478 | r_seq = read_seqbegin(&rename_lock); | 2483 | r_seq = read_seqbegin(&rename_lock); |
2479 | dentry = __d_lookup_rcu(parent, name, &d_seq); | 2484 | dentry = __d_lookup_rcu(parent, name, &d_seq); |
2480 | if (unlikely(dentry)) { | 2485 | if (unlikely(dentry)) { |
@@ -2495,8 +2500,14 @@ retry: | |||
2495 | rcu_read_unlock(); | 2500 | rcu_read_unlock(); |
2496 | goto retry; | 2501 | goto retry; |
2497 | } | 2502 | } |
2503 | |||
2504 | if (unlikely(seq & 1)) { | ||
2505 | rcu_read_unlock(); | ||
2506 | goto retry; | ||
2507 | } | ||
2508 | |||
2498 | hlist_bl_lock(b); | 2509 | hlist_bl_lock(b); |
2499 | if (unlikely(parent->d_inode->i_dir_seq != seq)) { | 2510 | if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) { |
2500 | hlist_bl_unlock(b); | 2511 | hlist_bl_unlock(b); |
2501 | rcu_read_unlock(); | 2512 | rcu_read_unlock(); |
2502 | goto retry; | 2513 | goto retry; |
diff --git a/fs/namei.c b/fs/namei.c index 921ae32dbc80..cafa365eeb70 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -559,9 +559,10 @@ static int __nd_alloc_stack(struct nameidata *nd) | |||
559 | static bool path_connected(const struct path *path) | 559 | static bool path_connected(const struct path *path) |
560 | { | 560 | { |
561 | struct vfsmount *mnt = path->mnt; | 561 | struct vfsmount *mnt = path->mnt; |
562 | struct super_block *sb = mnt->mnt_sb; | ||
562 | 563 | ||
563 | /* Only bind mounts can have disconnected paths */ | 564 | /* Bind mounts and multi-root filesystems can have disconnected paths */ |
564 | if (mnt->mnt_root == mnt->mnt_sb->s_root) | 565 | if (!(sb->s_iflags & SB_I_MULTIROOT) && (mnt->mnt_root == sb->s_root)) |
565 | return true; | 566 | return true; |
566 | 567 | ||
567 | return is_subdir(path->dentry, mnt->mnt_root); | 568 | return is_subdir(path->dentry, mnt->mnt_root); |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 29bacdc56f6a..5e470e233c83 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -2631,6 +2631,8 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server, | |||
2631 | /* initial superblock/root creation */ | 2631 | /* initial superblock/root creation */ |
2632 | mount_info->fill_super(s, mount_info); | 2632 | mount_info->fill_super(s, mount_info); |
2633 | nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned); | 2633 | nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned); |
2634 | if (!(server->flags & NFS_MOUNT_UNSHARED)) | ||
2635 | s->s_iflags |= SB_I_MULTIROOT; | ||
2634 | } | 2636 | } |
2635 | 2637 | ||
2636 | mntroot = nfs_get_root(s, mount_info->mntfh, dev_name); | 2638 | mntroot = nfs_get_root(s, mount_info->mntfh, dev_name); |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 79c413985305..c6baf767619e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -1317,6 +1317,7 @@ extern int send_sigurg(struct fown_struct *fown); | |||
1317 | #define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */ | 1317 | #define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */ |
1318 | #define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */ | 1318 | #define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */ |
1319 | #define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */ | 1319 | #define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */ |
1320 | #define SB_I_MULTIROOT 0x00000008 /* Multiple roots to the dentry tree */ | ||
1320 | 1321 | ||
1321 | /* sb->s_iflags to limit user namespace mounts */ | 1322 | /* sb->s_iflags to limit user namespace mounts */ |
1322 | #define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */ | 1323 | #define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */ |