diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-07-21 19:46:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-07-21 19:46:53 -0400 |
commit | ffb48e7924768d760bcd63212c8530c010059215 (patch) | |
tree | d16508564169a50a3ccc6b52f8f264245fa7e51b | |
parent | 55b636b419a269e167f7d6a9265e5e316a89cd5f (diff) | |
parent | b03897cf318dfc47de33a7ecbc7655584266f034 (diff) |
Merge tag 'powerpc-4.18-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc fixes from Michael Ellerman:
"Two regression fixes, one for xmon disassembly formatting and the
other to fix the E500 build.
Two commits to fix a potential security issue in the VFIO code under
obscure circumstances.
And finally a fix to the Power9 idle code to restore SPRG3, which is
user visible and used for sched_getcpu().
Thanks to: Alexey Kardashevskiy, David Gibson. Gautham R. Shenoy,
James Clarke"
* tag 'powerpc-4.18-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
powerpc/powernv: Fix save/restore of SPRG3 on entry/exit from stop (idle)
powerpc/Makefile: Assemble with -me500 when building for E500
KVM: PPC: Check if IOMMU page is contained in the pinned physical page
vfio/spapr: Use IOMMU pageshift rather than pagesize
powerpc/xmon: Fix disassembly since printf changes
-rw-r--r-- | arch/powerpc/Makefile | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/mmu_context.h | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/idle_book3s.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_vio.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_vio_hv.c | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_context_iommu.c | 37 | ||||
-rw-r--r-- | arch/powerpc/xmon/xmon.c | 4 | ||||
-rw-r--r-- | drivers/vfio/vfio_iommu_spapr_tce.c | 10 |
8 files changed, 52 insertions, 14 deletions
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 2ea575cb3401..fb96206de317 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile | |||
@@ -243,6 +243,7 @@ endif | |||
243 | cpu-as-$(CONFIG_4xx) += -Wa,-m405 | 243 | cpu-as-$(CONFIG_4xx) += -Wa,-m405 |
244 | cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec) | 244 | cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec) |
245 | cpu-as-$(CONFIG_E200) += -Wa,-me200 | 245 | cpu-as-$(CONFIG_E200) += -Wa,-me200 |
246 | cpu-as-$(CONFIG_E500) += -Wa,-me500 | ||
246 | cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 | 247 | cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 |
247 | cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc) | 248 | cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc) |
248 | 249 | ||
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 896efa559996..79d570cbf332 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h | |||
@@ -35,9 +35,9 @@ extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm( | |||
35 | extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, | 35 | extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, |
36 | unsigned long ua, unsigned long entries); | 36 | unsigned long ua, unsigned long entries); |
37 | extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, | 37 | extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, |
38 | unsigned long ua, unsigned long *hpa); | 38 | unsigned long ua, unsigned int pageshift, unsigned long *hpa); |
39 | extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, | 39 | extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, |
40 | unsigned long ua, unsigned long *hpa); | 40 | unsigned long ua, unsigned int pageshift, unsigned long *hpa); |
41 | extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); | 41 | extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); |
42 | extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); | 42 | extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); |
43 | #endif | 43 | #endif |
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index e734f6e45abc..689306118b48 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S | |||
@@ -144,7 +144,9 @@ power9_restore_additional_sprs: | |||
144 | mtspr SPRN_MMCR1, r4 | 144 | mtspr SPRN_MMCR1, r4 |
145 | 145 | ||
146 | ld r3, STOP_MMCR2(r13) | 146 | ld r3, STOP_MMCR2(r13) |
147 | ld r4, PACA_SPRG_VDSO(r13) | ||
147 | mtspr SPRN_MMCR2, r3 | 148 | mtspr SPRN_MMCR2, r3 |
149 | mtspr SPRN_SPRG3, r4 | ||
148 | blr | 150 | blr |
149 | 151 | ||
150 | /* | 152 | /* |
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index d066e37551ec..8c456fa691a5 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c | |||
@@ -449,7 +449,7 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, | |||
449 | /* This only handles v2 IOMMU type, v1 is handled via ioctl() */ | 449 | /* This only handles v2 IOMMU type, v1 is handled via ioctl() */ |
450 | return H_TOO_HARD; | 450 | return H_TOO_HARD; |
451 | 451 | ||
452 | if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa))) | 452 | if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa))) |
453 | return H_HARDWARE; | 453 | return H_HARDWARE; |
454 | 454 | ||
455 | if (mm_iommu_mapped_inc(mem)) | 455 | if (mm_iommu_mapped_inc(mem)) |
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 925fc316a104..5b298f5a1a14 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c | |||
@@ -279,7 +279,8 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, | |||
279 | if (!mem) | 279 | if (!mem) |
280 | return H_TOO_HARD; | 280 | return H_TOO_HARD; |
281 | 281 | ||
282 | if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa))) | 282 | if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift, |
283 | &hpa))) | ||
283 | return H_HARDWARE; | 284 | return H_HARDWARE; |
284 | 285 | ||
285 | pua = (void *) vmalloc_to_phys(pua); | 286 | pua = (void *) vmalloc_to_phys(pua); |
@@ -469,7 +470,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, | |||
469 | 470 | ||
470 | mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K); | 471 | mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K); |
471 | if (mem) | 472 | if (mem) |
472 | prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0; | 473 | prereg = mm_iommu_ua_to_hpa_rm(mem, ua, |
474 | IOMMU_PAGE_SHIFT_4K, &tces) == 0; | ||
473 | } | 475 | } |
474 | 476 | ||
475 | if (!prereg) { | 477 | if (!prereg) { |
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c index abb43646927a..a4ca57612558 100644 --- a/arch/powerpc/mm/mmu_context_iommu.c +++ b/arch/powerpc/mm/mmu_context_iommu.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/hugetlb.h> | 19 | #include <linux/hugetlb.h> |
20 | #include <linux/swap.h> | 20 | #include <linux/swap.h> |
21 | #include <asm/mmu_context.h> | 21 | #include <asm/mmu_context.h> |
22 | #include <asm/pte-walk.h> | ||
22 | 23 | ||
23 | static DEFINE_MUTEX(mem_list_mutex); | 24 | static DEFINE_MUTEX(mem_list_mutex); |
24 | 25 | ||
@@ -27,6 +28,7 @@ struct mm_iommu_table_group_mem_t { | |||
27 | struct rcu_head rcu; | 28 | struct rcu_head rcu; |
28 | unsigned long used; | 29 | unsigned long used; |
29 | atomic64_t mapped; | 30 | atomic64_t mapped; |
31 | unsigned int pageshift; | ||
30 | u64 ua; /* userspace address */ | 32 | u64 ua; /* userspace address */ |
31 | u64 entries; /* number of entries in hpas[] */ | 33 | u64 entries; /* number of entries in hpas[] */ |
32 | u64 *hpas; /* vmalloc'ed */ | 34 | u64 *hpas; /* vmalloc'ed */ |
@@ -125,6 +127,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, | |||
125 | { | 127 | { |
126 | struct mm_iommu_table_group_mem_t *mem; | 128 | struct mm_iommu_table_group_mem_t *mem; |
127 | long i, j, ret = 0, locked_entries = 0; | 129 | long i, j, ret = 0, locked_entries = 0; |
130 | unsigned int pageshift; | ||
131 | unsigned long flags; | ||
128 | struct page *page = NULL; | 132 | struct page *page = NULL; |
129 | 133 | ||
130 | mutex_lock(&mem_list_mutex); | 134 | mutex_lock(&mem_list_mutex); |
@@ -159,6 +163,12 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, | |||
159 | goto unlock_exit; | 163 | goto unlock_exit; |
160 | } | 164 | } |
161 | 165 | ||
166 | /* | ||
167 | * For a starting point for a maximum page size calculation | ||
168 | * we use @ua and @entries natural alignment to allow IOMMU pages | ||
169 | * smaller than huge pages but still bigger than PAGE_SIZE. | ||
170 | */ | ||
171 | mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); | ||
162 | mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0]))); | 172 | mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0]))); |
163 | if (!mem->hpas) { | 173 | if (!mem->hpas) { |
164 | kfree(mem); | 174 | kfree(mem); |
@@ -199,6 +209,23 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, | |||
199 | } | 209 | } |
200 | } | 210 | } |
201 | populate: | 211 | populate: |
212 | pageshift = PAGE_SHIFT; | ||
213 | if (PageCompound(page)) { | ||
214 | pte_t *pte; | ||
215 | struct page *head = compound_head(page); | ||
216 | unsigned int compshift = compound_order(head); | ||
217 | |||
218 | local_irq_save(flags); /* disables as well */ | ||
219 | pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift); | ||
220 | local_irq_restore(flags); | ||
221 | |||
222 | /* Double check it is still the same pinned page */ | ||
223 | if (pte && pte_page(*pte) == head && | ||
224 | pageshift == compshift) | ||
225 | pageshift = max_t(unsigned int, pageshift, | ||
226 | PAGE_SHIFT); | ||
227 | } | ||
228 | mem->pageshift = min(mem->pageshift, pageshift); | ||
202 | mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; | 229 | mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; |
203 | } | 230 | } |
204 | 231 | ||
@@ -349,7 +376,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, | |||
349 | EXPORT_SYMBOL_GPL(mm_iommu_find); | 376 | EXPORT_SYMBOL_GPL(mm_iommu_find); |
350 | 377 | ||
351 | long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, | 378 | long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, |
352 | unsigned long ua, unsigned long *hpa) | 379 | unsigned long ua, unsigned int pageshift, unsigned long *hpa) |
353 | { | 380 | { |
354 | const long entry = (ua - mem->ua) >> PAGE_SHIFT; | 381 | const long entry = (ua - mem->ua) >> PAGE_SHIFT; |
355 | u64 *va = &mem->hpas[entry]; | 382 | u64 *va = &mem->hpas[entry]; |
@@ -357,6 +384,9 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, | |||
357 | if (entry >= mem->entries) | 384 | if (entry >= mem->entries) |
358 | return -EFAULT; | 385 | return -EFAULT; |
359 | 386 | ||
387 | if (pageshift > mem->pageshift) | ||
388 | return -EFAULT; | ||
389 | |||
360 | *hpa = *va | (ua & ~PAGE_MASK); | 390 | *hpa = *va | (ua & ~PAGE_MASK); |
361 | 391 | ||
362 | return 0; | 392 | return 0; |
@@ -364,7 +394,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, | |||
364 | EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa); | 394 | EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa); |
365 | 395 | ||
366 | long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, | 396 | long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, |
367 | unsigned long ua, unsigned long *hpa) | 397 | unsigned long ua, unsigned int pageshift, unsigned long *hpa) |
368 | { | 398 | { |
369 | const long entry = (ua - mem->ua) >> PAGE_SHIFT; | 399 | const long entry = (ua - mem->ua) >> PAGE_SHIFT; |
370 | void *va = &mem->hpas[entry]; | 400 | void *va = &mem->hpas[entry]; |
@@ -373,6 +403,9 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, | |||
373 | if (entry >= mem->entries) | 403 | if (entry >= mem->entries) |
374 | return -EFAULT; | 404 | return -EFAULT; |
375 | 405 | ||
406 | if (pageshift > mem->pageshift) | ||
407 | return -EFAULT; | ||
408 | |||
376 | pa = (void *) vmalloc_to_phys(va); | 409 | pa = (void *) vmalloc_to_phys(va); |
377 | if (!pa) | 410 | if (!pa) |
378 | return -EFAULT; | 411 | return -EFAULT; |
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 47166ad2a669..196978733e64 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c | |||
@@ -2734,7 +2734,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr, | |||
2734 | { | 2734 | { |
2735 | int nr, dotted; | 2735 | int nr, dotted; |
2736 | unsigned long first_adr; | 2736 | unsigned long first_adr; |
2737 | unsigned long inst, last_inst = 0; | 2737 | unsigned int inst, last_inst = 0; |
2738 | unsigned char val[4]; | 2738 | unsigned char val[4]; |
2739 | 2739 | ||
2740 | dotted = 0; | 2740 | dotted = 0; |
@@ -2758,7 +2758,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr, | |||
2758 | dotted = 0; | 2758 | dotted = 0; |
2759 | last_inst = inst; | 2759 | last_inst = inst; |
2760 | if (praddr) | 2760 | if (praddr) |
2761 | printf(REG" %.8lx", adr, inst); | 2761 | printf(REG" %.8x", adr, inst); |
2762 | printf("\t"); | 2762 | printf("\t"); |
2763 | dump_func(inst, adr); | 2763 | dump_func(inst, adr); |
2764 | printf("\n"); | 2764 | printf("\n"); |
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 759a5bdd40e1..7cd63b0c1a46 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c | |||
@@ -457,17 +457,17 @@ static void tce_iommu_unuse_page(struct tce_container *container, | |||
457 | } | 457 | } |
458 | 458 | ||
459 | static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container, | 459 | static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container, |
460 | unsigned long tce, unsigned long size, | 460 | unsigned long tce, unsigned long shift, |
461 | unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem) | 461 | unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem) |
462 | { | 462 | { |
463 | long ret = 0; | 463 | long ret = 0; |
464 | struct mm_iommu_table_group_mem_t *mem; | 464 | struct mm_iommu_table_group_mem_t *mem; |
465 | 465 | ||
466 | mem = mm_iommu_lookup(container->mm, tce, size); | 466 | mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift); |
467 | if (!mem) | 467 | if (!mem) |
468 | return -EINVAL; | 468 | return -EINVAL; |
469 | 469 | ||
470 | ret = mm_iommu_ua_to_hpa(mem, tce, phpa); | 470 | ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa); |
471 | if (ret) | 471 | if (ret) |
472 | return -EINVAL; | 472 | return -EINVAL; |
473 | 473 | ||
@@ -487,7 +487,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container, | |||
487 | if (!pua) | 487 | if (!pua) |
488 | return; | 488 | return; |
489 | 489 | ||
490 | ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl), | 490 | ret = tce_iommu_prereg_ua_to_hpa(container, *pua, tbl->it_page_shift, |
491 | &hpa, &mem); | 491 | &hpa, &mem); |
492 | if (ret) | 492 | if (ret) |
493 | pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n", | 493 | pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n", |
@@ -611,7 +611,7 @@ static long tce_iommu_build_v2(struct tce_container *container, | |||
611 | entry + i); | 611 | entry + i); |
612 | 612 | ||
613 | ret = tce_iommu_prereg_ua_to_hpa(container, | 613 | ret = tce_iommu_prereg_ua_to_hpa(container, |
614 | tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem); | 614 | tce, tbl->it_page_shift, &hpa, &mem); |
615 | if (ret) | 615 | if (ret) |
616 | break; | 616 | break; |
617 | 617 | ||