aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-22 11:36:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-22 11:36:16 -0400
commit54dee406374ce8adb352c48e175176247cb8db7c (patch)
tree5a935f0e3bdbd28d869f5cd319891023e1152a3a
parent651bae980e3f3e6acf0d297ced08f9d7af71a8c9 (diff)
parent7a0a93c51799edc45ee57c6cc1679aa94f1e03d5 (diff)
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon: - Fix SPE probe failure when backing auxbuf with high-order pages - Fix handling of DMA allocations from outside of the vmalloc area - Fix generation of build-id ELF section for vDSO object - Disable huge I/O mappings if kernel page table dumping is enabled - A few other minor fixes (comments, kconfig etc) * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: vdso: Explicitly add build-id option arm64/mm: Inhibit huge-vmap with ptdump arm64: Print physical address of page table base in show_pte() arm64: don't trash config with compat symbol if COMPAT is disabled arm64: assembler: Update comment above cond_yield_neon() macro drivers/perf: arm_spe: Don't error on high-order pages for aux buf arm64/iommu: handle non-remapped addresses in ->mmap and ->get_sgtable
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/include/asm/assembler.h11
-rw-r--r--arch/arm64/kernel/vdso/Makefile4
-rw-r--r--arch/arm64/mm/dma-mapping.c10
-rw-r--r--arch/arm64/mm/fault.c5
-rw-r--r--arch/arm64/mm/mmu.c11
-rw-r--r--drivers/perf/arm_spe_pmu.c10
7 files changed, 30 insertions, 23 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 76f6e4765f49..ca9c175fb949 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -69,7 +69,7 @@ config ARM64
69 select ARCH_SUPPORTS_ATOMIC_RMW 69 select ARCH_SUPPORTS_ATOMIC_RMW
70 select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 || CC_IS_CLANG 70 select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 || CC_IS_CLANG
71 select ARCH_SUPPORTS_NUMA_BALANCING 71 select ARCH_SUPPORTS_NUMA_BALANCING
72 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION 72 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
73 select ARCH_WANT_FRAME_POINTERS 73 select ARCH_WANT_FRAME_POINTERS
74 select ARCH_HAS_UBSAN_SANITIZE_ALL 74 select ARCH_HAS_UBSAN_SANITIZE_ALL
75 select ARM_AMBA 75 select ARM_AMBA
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 039fbd822ec6..92b6b7cf67dd 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -718,12 +718,11 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
718 * the output section, any use of such directives is undefined. 718 * the output section, any use of such directives is undefined.
719 * 719 *
720 * The yield itself consists of the following: 720 * The yield itself consists of the following:
721 * - Check whether the preempt count is exactly 1, in which case disabling 721 * - Check whether the preempt count is exactly 1 and a reschedule is also
722 * preemption once will make the task preemptible. If this is not the case, 722 * needed. If so, calling of preempt_enable() in kernel_neon_end() will
723 * yielding is pointless. 723 * trigger a reschedule. If it is not the case, yielding is pointless.
724 * - Check whether TIF_NEED_RESCHED is set, and if so, disable and re-enable 724 * - Disable and re-enable kernel mode NEON, and branch to the yield fixup
725 * kernel mode NEON (which will trigger a reschedule), and branch to the 725 * code.
726 * yield fixup code.
727 * 726 *
728 * This macro sequence may clobber all CPU state that is not guaranteed by the 727 * This macro sequence may clobber all CPU state that is not guaranteed by the
729 * AAPCS to be preserved across an ordinary function call. 728 * AAPCS to be preserved across an ordinary function call.
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index 744b9dbaba03..fa230ff09aa1 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -12,8 +12,8 @@ obj-vdso := gettimeofday.o note.o sigreturn.o
12targets := $(obj-vdso) vdso.so vdso.so.dbg 12targets := $(obj-vdso) vdso.so vdso.so.dbg
13obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) 13obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
14 14
15ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 \ 15ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
16 $(call ld-option, --hash-style=sysv) -n -T 16 --build-id -n -T
17 17
18# Disable gcov profiling for VDSO code 18# Disable gcov profiling for VDSO code
19GCOV_PROFILE := n 19GCOV_PROFILE := n
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 78c0a72f822c..674860e3e478 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -249,6 +249,11 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
249 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 249 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
250 return ret; 250 return ret;
251 251
252 if (!is_vmalloc_addr(cpu_addr)) {
253 unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
254 return __swiotlb_mmap_pfn(vma, pfn, size);
255 }
256
252 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { 257 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
253 /* 258 /*
254 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped, 259 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
@@ -272,6 +277,11 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
272 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 277 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
273 struct vm_struct *area = find_vm_area(cpu_addr); 278 struct vm_struct *area = find_vm_area(cpu_addr);
274 279
280 if (!is_vmalloc_addr(cpu_addr)) {
281 struct page *page = virt_to_page(cpu_addr);
282 return __swiotlb_get_sgtable_page(sgt, page, size);
283 }
284
275 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { 285 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
276 /* 286 /*
277 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped, 287 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 0cb0e09995e1..dda234bcc020 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -171,9 +171,10 @@ static void show_pte(unsigned long addr)
171 return; 171 return;
172 } 172 }
173 173
174 pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp = %p\n", 174 pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp=%016lx\n",
175 mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K, 175 mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
176 mm == &init_mm ? VA_BITS : (int) vabits_user, mm->pgd); 176 mm == &init_mm ? VA_BITS : (int)vabits_user,
177 (unsigned long)virt_to_phys(mm->pgd));
177 pgdp = pgd_offset(mm, addr); 178 pgdp = pgd_offset(mm, addr);
178 pgd = READ_ONCE(*pgdp); 179 pgd = READ_ONCE(*pgdp);
179 pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd)); 180 pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index a170c6369a68..a1bfc4413982 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -955,13 +955,18 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
955 955
956int __init arch_ioremap_pud_supported(void) 956int __init arch_ioremap_pud_supported(void)
957{ 957{
958 /* only 4k granule supports level 1 block mappings */ 958 /*
959 return IS_ENABLED(CONFIG_ARM64_4K_PAGES); 959 * Only 4k granule supports level 1 block mappings.
960 * SW table walks can't handle removal of intermediate entries.
961 */
962 return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
963 !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
960} 964}
961 965
962int __init arch_ioremap_pmd_supported(void) 966int __init arch_ioremap_pmd_supported(void)
963{ 967{
964 return 1; 968 /* See arch_ioremap_pud_supported() */
969 return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
965} 970}
966 971
967int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) 972int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 7cb766dafe85..e120f933412a 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -855,16 +855,8 @@ static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages,
855 if (!pglist) 855 if (!pglist)
856 goto out_free_buf; 856 goto out_free_buf;
857 857
858 for (i = 0; i < nr_pages; ++i) { 858 for (i = 0; i < nr_pages; ++i)
859 struct page *page = virt_to_page(pages[i]);
860
861 if (PagePrivate(page)) {
862 pr_warn("unexpected high-order page for auxbuf!");
863 goto out_free_pglist;
864 }
865
866 pglist[i] = virt_to_page(pages[i]); 859 pglist[i] = virt_to_page(pages[i]);
867 }
868 860
869 buf->base = vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL); 861 buf->base = vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
870 if (!buf->base) 862 if (!buf->base)