aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/book3s64/iommu_api.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-17 11:58:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-17 11:58:04 -0400
commit57a8ec387e1441ea5e1232bc0749fb99a8cba7e7 (patch)
treeb5fb03fc6bc5754de8b5b1f8b0e4f36d67c8315c /arch/powerpc/mm/book3s64/iommu_api.c
parent0a8ad0ffa4d80a544f6cbff703bf6394339afcdf (diff)
parent43e11fa2d1d3b6e35629fa556eb7d571edba2010 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: "VM: - z3fold fixes and enhancements by Henry Burns and Vitaly Wool - more accurate reclaimed slab caches calculations by Yafang Shao - fix MAP_UNINITIALIZED UAPI symbol to not depend on config, by Christoph Hellwig - !CONFIG_MMU fixes by Christoph Hellwig - new novmcoredd parameter to omit device dumps from vmcore, by Kairui Song - new test_meminit module for testing heap and pagealloc initialization, by Alexander Potapenko - ioremap improvements for huge mappings, by Anshuman Khandual - generalize kprobe page fault handling, by Anshuman Khandual - device-dax hotplug fixes and improvements, by Pavel Tatashin - enable synchronous DAX fault on powerpc, by Aneesh Kumar K.V - add pte_devmap() support for arm64, by Robin Murphy - unify locked_vm accounting with a helper, by Daniel Jordan - several misc fixes core/lib: - new typeof_member() macro including some users, by Alexey Dobriyan - make BIT() and GENMASK() available in asm, by Masahiro Yamada - changed LIST_POISON2 on x86_64 to 0xdead000000000122 for better code generation, by Alexey Dobriyan - rbtree code size optimizations, by Michel Lespinasse - convert struct pid count to refcount_t, by Joel Fernandes get_maintainer.pl: - add --no-moderated switch to skip moderated ML's, by Joe Perches misc: - ptrace PTRACE_GET_SYSCALL_INFO interface - coda updates - gdb scripts, various" [ Using merge message suggestion from Vlastimil Babka, with some editing - Linus ] * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (100 commits) fs/select.c: use struct_size() in kmalloc() mm: add account_locked_vm utility function arm64: mm: implement pte_devmap support mm: introduce ARCH_HAS_PTE_DEVMAP mm: clean up is_device_*_page() definitions mm/mmap: move common defines to mman-common.h mm: move MAP_SYNC to asm-generic/mman-common.h device-dax: "Hotremove" persistent memory that is used like normal RAM mm/hotplug: make remove_memory() interface usable device-dax: fix memory and resource leak if hotplug fails include/linux/lz4.h: fix spelling and copy-paste errors in documentation ipc/mqueue.c: only perform resource calculation if user valid include/asm-generic/bug.h: fix "cut here" for WARN_ON for __WARN_TAINT architectures scripts/gdb: add helpers to find and list devices scripts/gdb: add lx-genpd-summary command drivers/pps/pps.c: clear offset flags in PPS_SETPARAMS ioctl kernel/pid.c: convert struct pid count to refcount_t drivers/rapidio/devices/rio_mport_cdev.c: NUL terminate some strings select: shift restore_saved_sigmask_unless() into poll_select_copy_remaining() select: change do_poll() to return -ERESTARTNOHAND rather than -EINTR ...
Diffstat (limited to 'arch/powerpc/mm/book3s64/iommu_api.c')
-rw-r--r--arch/powerpc/mm/book3s64/iommu_api.c41
1 files changed, 4 insertions, 37 deletions
diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c
index 90ee3a89722c..b056cae3388b 100644
--- a/arch/powerpc/mm/book3s64/iommu_api.c
+++ b/arch/powerpc/mm/book3s64/iommu_api.c
@@ -14,6 +14,7 @@
14#include <linux/hugetlb.h> 14#include <linux/hugetlb.h>
15#include <linux/swap.h> 15#include <linux/swap.h>
16#include <linux/sizes.h> 16#include <linux/sizes.h>
17#include <linux/mm.h>
17#include <asm/mmu_context.h> 18#include <asm/mmu_context.h>
18#include <asm/pte-walk.h> 19#include <asm/pte-walk.h>
19#include <linux/mm_inline.h> 20#include <linux/mm_inline.h>
@@ -46,40 +47,6 @@ struct mm_iommu_table_group_mem_t {
46 u64 dev_hpa; /* Device memory base address */ 47 u64 dev_hpa; /* Device memory base address */
47}; 48};
48 49
49static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
50 unsigned long npages, bool incr)
51{
52 long ret = 0, locked, lock_limit;
53
54 if (!npages)
55 return 0;
56
57 down_write(&mm->mmap_sem);
58
59 if (incr) {
60 locked = mm->locked_vm + npages;
61 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
62 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
63 ret = -ENOMEM;
64 else
65 mm->locked_vm += npages;
66 } else {
67 if (WARN_ON_ONCE(npages > mm->locked_vm))
68 npages = mm->locked_vm;
69 mm->locked_vm -= npages;
70 }
71
72 pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
73 current ? current->pid : 0,
74 incr ? '+' : '-',
75 npages << PAGE_SHIFT,
76 mm->locked_vm << PAGE_SHIFT,
77 rlimit(RLIMIT_MEMLOCK));
78 up_write(&mm->mmap_sem);
79
80 return ret;
81}
82
83bool mm_iommu_preregistered(struct mm_struct *mm) 50bool mm_iommu_preregistered(struct mm_struct *mm)
84{ 51{
85 return !list_empty(&mm->context.iommu_group_mem_list); 52 return !list_empty(&mm->context.iommu_group_mem_list);
@@ -96,7 +63,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
96 unsigned long entry, chunk; 63 unsigned long entry, chunk;
97 64
98 if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) { 65 if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
99 ret = mm_iommu_adjust_locked_vm(mm, entries, true); 66 ret = account_locked_vm(mm, entries, true);
100 if (ret) 67 if (ret)
101 return ret; 68 return ret;
102 69
@@ -211,7 +178,7 @@ free_exit:
211 kfree(mem); 178 kfree(mem);
212 179
213unlock_exit: 180unlock_exit:
214 mm_iommu_adjust_locked_vm(mm, locked_entries, false); 181 account_locked_vm(mm, locked_entries, false);
215 182
216 return ret; 183 return ret;
217} 184}
@@ -311,7 +278,7 @@ long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
311unlock_exit: 278unlock_exit:
312 mutex_unlock(&mem_list_mutex); 279 mutex_unlock(&mem_list_mutex);
313 280
314 mm_iommu_adjust_locked_vm(mm, unlock_entries, false); 281 account_locked_vm(mm, unlock_entries, false);
315 282
316 return ret; 283 return ret;
317} 284}