diff options
| author | Alexey Kardashevskiy <aik@ozlabs.ru> | 2017-03-16 20:48:27 -0400 |
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-03-22 07:43:37 -0400 |
| commit | 5b34666bd2e70046f9880db01119c7d908e0888d (patch) | |
| tree | 47e3102213d5ccd30647bad1102dbfcdc8556680 /arch/powerpc | |
| parent | 2ba7ef21820ec5cb2376078774e24d6f6a7386b2 (diff) | |
powerpc/iommu: Stop using @current in mm_iommu_xxx
[ Upstream commit d7baee6901b34c4895eb78efdbf13a49079d7404 ]
This changes mm_iommu_xxx helpers to take mm_struct as a parameter
instead of getting it from @current which in some situations may
not have a valid reference to mm.
This changes helpers to receive @mm and moves all references to @current
to the caller, including checks for !current and !current->mm;
checks in mm_iommu_preregistered() are removed as there is no caller
yet.
This moves the mm_iommu_adjust_locked_vm() call to the caller as
it receives mm_iommu_table_group_mem_t but it needs mm.
This should cause no behavioral change.
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Acked-by: Alex Williamson <alex.williamson@redhat.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/powerpc')
| -rw-r--r-- | arch/powerpc/include/asm/mmu_context.h | 16 | ||||
| -rw-r--r-- | arch/powerpc/mm/mmu_context_iommu.c | 46 |
2 files changed, 26 insertions, 36 deletions
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 424844bc2a57..b9e3f0aca261 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h | |||
| @@ -19,16 +19,18 @@ extern void destroy_context(struct mm_struct *mm); | |||
| 19 | struct mm_iommu_table_group_mem_t; | 19 | struct mm_iommu_table_group_mem_t; |
| 20 | 20 | ||
| 21 | extern int isolate_lru_page(struct page *page); /* from internal.h */ | 21 | extern int isolate_lru_page(struct page *page); /* from internal.h */ |
| 22 | extern bool mm_iommu_preregistered(void); | 22 | extern bool mm_iommu_preregistered(struct mm_struct *mm); |
| 23 | extern long mm_iommu_get(unsigned long ua, unsigned long entries, | 23 | extern long mm_iommu_get(struct mm_struct *mm, |
| 24 | unsigned long ua, unsigned long entries, | ||
| 24 | struct mm_iommu_table_group_mem_t **pmem); | 25 | struct mm_iommu_table_group_mem_t **pmem); |
| 25 | extern long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem); | 26 | extern long mm_iommu_put(struct mm_struct *mm, |
| 27 | struct mm_iommu_table_group_mem_t *mem); | ||
| 26 | extern void mm_iommu_init(struct mm_struct *mm); | 28 | extern void mm_iommu_init(struct mm_struct *mm); |
| 27 | extern void mm_iommu_cleanup(struct mm_struct *mm); | 29 | extern void mm_iommu_cleanup(struct mm_struct *mm); |
| 28 | extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua, | 30 | extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, |
| 29 | unsigned long size); | 31 | unsigned long ua, unsigned long size); |
| 30 | extern struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua, | 32 | extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, |
| 31 | unsigned long entries); | 33 | unsigned long ua, unsigned long entries); |
| 32 | extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, | 34 | extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, |
| 33 | unsigned long ua, unsigned long *hpa); | 35 | unsigned long ua, unsigned long *hpa); |
| 34 | extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); | 36 | extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); |
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c index ad2e575fd418..4c6db09e77ad 100644 --- a/arch/powerpc/mm/mmu_context_iommu.c +++ b/arch/powerpc/mm/mmu_context_iommu.c | |||
| @@ -56,7 +56,7 @@ static long mm_iommu_adjust_locked_vm(struct mm_struct *mm, | |||
| 56 | } | 56 | } |
| 57 | 57 | ||
| 58 | pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n", | 58 | pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n", |
| 59 | current->pid, | 59 | current ? current->pid : 0, |
| 60 | incr ? '+' : '-', | 60 | incr ? '+' : '-', |
| 61 | npages << PAGE_SHIFT, | 61 | npages << PAGE_SHIFT, |
| 62 | mm->locked_vm << PAGE_SHIFT, | 62 | mm->locked_vm << PAGE_SHIFT, |
| @@ -66,12 +66,9 @@ static long mm_iommu_adjust_locked_vm(struct mm_struct *mm, | |||
| 66 | return ret; | 66 | return ret; |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | bool mm_iommu_preregistered(void) | 69 | bool mm_iommu_preregistered(struct mm_struct *mm) |
| 70 | { | 70 | { |
| 71 | if (!current || !current->mm) | 71 | return !list_empty(&mm->context.iommu_group_mem_list); |
| 72 | return false; | ||
| 73 | |||
| 74 | return !list_empty(¤t->mm->context.iommu_group_mem_list); | ||
| 75 | } | 72 | } |
| 76 | EXPORT_SYMBOL_GPL(mm_iommu_preregistered); | 73 | EXPORT_SYMBOL_GPL(mm_iommu_preregistered); |
| 77 | 74 | ||
| @@ -124,19 +121,16 @@ static int mm_iommu_move_page_from_cma(struct page *page) | |||
| 124 | return 0; | 121 | return 0; |
| 125 | } | 122 | } |
| 126 | 123 | ||
| 127 | long mm_iommu_get(unsigned long ua, unsigned long entries, | 124 | long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, |
| 128 | struct mm_iommu_table_group_mem_t **pmem) | 125 | struct mm_iommu_table_group_mem_t **pmem) |
| 129 | { | 126 | { |
| 130 | struct mm_iommu_table_group_mem_t *mem; | 127 | struct mm_iommu_table_group_mem_t *mem; |
| 131 | long i, j, ret = 0, locked_entries = 0; | 128 | long i, j, ret = 0, locked_entries = 0; |
| 132 | struct page *page = NULL; | 129 | struct page *page = NULL; |
| 133 | 130 | ||
| 134 | if (!current || !current->mm) | ||
| 135 | return -ESRCH; /* process exited */ | ||
| 136 | |||
| 137 | mutex_lock(&mem_list_mutex); | 131 | mutex_lock(&mem_list_mutex); |
| 138 | 132 | ||
| 139 | list_for_each_entry_rcu(mem, ¤t->mm->context.iommu_group_mem_list, | 133 | list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, |
| 140 | next) { | 134 | next) { |
| 141 | if ((mem->ua == ua) && (mem->entries == entries)) { | 135 | if ((mem->ua == ua) && (mem->entries == entries)) { |
| 142 | ++mem->used; | 136 | ++mem->used; |
| @@ -154,7 +148,7 @@ long mm_iommu_get(unsigned long ua, unsigned long entries, | |||
| 154 | 148 | ||
| 155 | } | 149 | } |
| 156 | 150 | ||
| 157 | ret = mm_iommu_adjust_locked_vm(current->mm, entries, true); | 151 | ret = mm_iommu_adjust_locked_vm(mm, entries, true); |
| 158 | if (ret) | 152 | if (ret) |
| 159 | goto unlock_exit; | 153 | goto unlock_exit; |
| 160 | 154 | ||
| @@ -215,11 +209,11 @@ populate: | |||
| 215 | mem->entries = entries; | 209 | mem->entries = entries; |
| 216 | *pmem = mem; | 210 | *pmem = mem; |
| 217 | 211 | ||
| 218 | list_add_rcu(&mem->next, ¤t->mm->context.iommu_group_mem_list); | 212 | list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list); |
| 219 | 213 | ||
| 220 | unlock_exit: | 214 | unlock_exit: |
| 221 | if (locked_entries && ret) | 215 | if (locked_entries && ret) |
| 222 | mm_iommu_adjust_locked_vm(current->mm, locked_entries, false); | 216 | mm_iommu_adjust_locked_vm(mm, locked_entries, false); |
| 223 | 217 | ||
| 224 | mutex_unlock(&mem_list_mutex); | 218 | mutex_unlock(&mem_list_mutex); |
| 225 | 219 | ||
| @@ -264,17 +258,13 @@ static void mm_iommu_free(struct rcu_head *head) | |||
| 264 | static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem) | 258 | static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem) |
| 265 | { | 259 | { |
| 266 | list_del_rcu(&mem->next); | 260 | list_del_rcu(&mem->next); |
| 267 | mm_iommu_adjust_locked_vm(current->mm, mem->entries, false); | ||
| 268 | call_rcu(&mem->rcu, mm_iommu_free); | 261 | call_rcu(&mem->rcu, mm_iommu_free); |
| 269 | } | 262 | } |
| 270 | 263 | ||
| 271 | long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem) | 264 | long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem) |
| 272 | { | 265 | { |
| 273 | long ret = 0; | 266 | long ret = 0; |
| 274 | 267 | ||
| 275 | if (!current || !current->mm) | ||
| 276 | return -ESRCH; /* process exited */ | ||
| 277 | |||
| 278 | mutex_lock(&mem_list_mutex); | 268 | mutex_lock(&mem_list_mutex); |
| 279 | 269 | ||
| 280 | if (mem->used == 0) { | 270 | if (mem->used == 0) { |
| @@ -297,6 +287,8 @@ long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem) | |||
| 297 | /* @mapped became 0 so now mappings are disabled, release the region */ | 287 | /* @mapped became 0 so now mappings are disabled, release the region */ |
| 298 | mm_iommu_release(mem); | 288 | mm_iommu_release(mem); |
| 299 | 289 | ||
| 290 | mm_iommu_adjust_locked_vm(mm, mem->entries, false); | ||
| 291 | |||
| 300 | unlock_exit: | 292 | unlock_exit: |
| 301 | mutex_unlock(&mem_list_mutex); | 293 | mutex_unlock(&mem_list_mutex); |
| 302 | 294 | ||
| @@ -304,14 +296,12 @@ unlock_exit: | |||
| 304 | } | 296 | } |
| 305 | EXPORT_SYMBOL_GPL(mm_iommu_put); | 297 | EXPORT_SYMBOL_GPL(mm_iommu_put); |
| 306 | 298 | ||
| 307 | struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua, | 299 | struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, |
| 308 | unsigned long size) | 300 | unsigned long ua, unsigned long size) |
| 309 | { | 301 | { |
| 310 | struct mm_iommu_table_group_mem_t *mem, *ret = NULL; | 302 | struct mm_iommu_table_group_mem_t *mem, *ret = NULL; |
| 311 | 303 | ||
| 312 | list_for_each_entry_rcu(mem, | 304 | list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { |
| 313 | ¤t->mm->context.iommu_group_mem_list, | ||
| 314 | next) { | ||
| 315 | if ((mem->ua <= ua) && | 305 | if ((mem->ua <= ua) && |
| 316 | (ua + size <= mem->ua + | 306 | (ua + size <= mem->ua + |
| 317 | (mem->entries << PAGE_SHIFT))) { | 307 | (mem->entries << PAGE_SHIFT))) { |
| @@ -324,14 +314,12 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua, | |||
| 324 | } | 314 | } |
| 325 | EXPORT_SYMBOL_GPL(mm_iommu_lookup); | 315 | EXPORT_SYMBOL_GPL(mm_iommu_lookup); |
| 326 | 316 | ||
| 327 | struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua, | 317 | struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, |
| 328 | unsigned long entries) | 318 | unsigned long ua, unsigned long entries) |
| 329 | { | 319 | { |
| 330 | struct mm_iommu_table_group_mem_t *mem, *ret = NULL; | 320 | struct mm_iommu_table_group_mem_t *mem, *ret = NULL; |
| 331 | 321 | ||
| 332 | list_for_each_entry_rcu(mem, | 322 | list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { |
| 333 | ¤t->mm->context.iommu_group_mem_list, | ||
| 334 | next) { | ||
| 335 | if ((mem->ua == ua) && (mem->entries == entries)) { | 323 | if ((mem->ua == ua) && (mem->entries == entries)) { |
| 336 | ret = mem; | 324 | ret = mem; |
| 337 | break; | 325 | break; |
