aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2018-08-22 00:52:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-22 13:52:44 -0400
commit93065ac753e4443840a057bfef4be71ec766fde9 (patch)
tree6293538251b5e62affb5cdbe56a4d2f7257fc656 /drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
parentc2343d2761f86ae1b857f78c7cdb9f51e5fa1641 (diff)
mm, oom: distinguish blockable mode for mmu notifiers
There are several blockable mmu notifiers which might sleep in mmu_notifier_invalidate_range_start and that is a problem for the oom_reaper because it needs to guarantee a forward progress so it cannot depend on any sleepable locks. Currently we simply back off and mark an oom victim with blockable mmu notifiers as done after a short sleep. That can result in selecting a new oom victim prematurely because the previous one still hasn't torn its memory down yet. We can do much better though. Even if mmu notifiers use sleepable locks there is no reason to automatically assume those locks are held. Moreover majority of notifiers only care about a portion of the address space and there is absolutely zero reason to fail when we are unmapping an unrelated range. Many notifiers do really block and wait for HW which is harder to handle and we have to bail out though. This patch handles the low hanging fruit. __mmu_notifier_invalidate_range_start gets a blockable flag and callbacks are not allowed to sleep if the flag is set to false. This is achieved by using trylock instead of the sleepable lock for most callbacks and continue as long as we do not block down the call chain. I think we can improve that even further because there is a common pattern to do a range lookup first and then do something about that. The first part can be done without a sleeping lock in most cases AFAICS. The oom_reaper end then simply retries if there is at least one notifier which couldn't make any progress in !blockable mode. A retry loop is already implemented to wait for the mmap_sem and this is basically the same thing. The simplest way for driver developers to test this code path is to wrap userspace code which uses these notifiers into a memcg and set the hard limit to hit the oom. This can be done e.g. after the test faults in all the mmu notifier managed memory and set the hard limit to something really small. Then we are looking for a proper process tear down. [akpm@linux-foundation.org: coding style fixes] [akpm@linux-foundation.org: minor code simplification] Link: http://lkml.kernel.org/r/20180716115058.5559-1-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Acked-by: Christian König <christian.koenig@amd.com> # AMD notifiers Acked-by: Leon Romanovsky <leonro@mellanox.com> # mlx and umem_odp Reported-by: David Rientjes <rientjes@google.com> Cc: "David (ChunMing) Zhou" <David1.Zhou@amd.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: David Airlie <airlied@linux.ie> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Doug Ledford <dledford@redhat.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Mike Marciniszyn <mike.marciniszyn@intel.com> Cc: Dennis Dalessandro <dennis.dalessandro@intel.com> Cc: Sudeep Dutt <sudeep.dutt@intel.com> Cc: Ashutosh Dixit <ashutosh.dixit@intel.com> Cc: Dimitri Sivanich <sivanich@sgi.com> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Juergen Gross <jgross@suse.com> Cc: "Jérôme Glisse" <jglisse@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Felix Kuehling <felix.kuehling@amd.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c43
1 files changed, 35 insertions, 8 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index a365ea2383d1..e55508b39496 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -178,12 +178,18 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
178 * 178 *
179 * @amn: our notifier 179 * @amn: our notifier
180 */ 180 */
181static void amdgpu_mn_read_lock(struct amdgpu_mn *amn) 181static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable)
182{ 182{
183 mutex_lock(&amn->read_lock); 183 if (blockable)
184 mutex_lock(&amn->read_lock);
185 else if (!mutex_trylock(&amn->read_lock))
186 return -EAGAIN;
187
184 if (atomic_inc_return(&amn->recursion) == 1) 188 if (atomic_inc_return(&amn->recursion) == 1)
185 down_read_non_owner(&amn->lock); 189 down_read_non_owner(&amn->lock);
186 mutex_unlock(&amn->read_lock); 190 mutex_unlock(&amn->read_lock);
191
192 return 0;
187} 193}
188 194
189/** 195/**
@@ -239,10 +245,11 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
239 * Block for operations on BOs to finish and mark pages as accessed and 245 * Block for operations on BOs to finish and mark pages as accessed and
240 * potentially dirty. 246 * potentially dirty.
241 */ 247 */
242static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, 248static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
243 struct mm_struct *mm, 249 struct mm_struct *mm,
244 unsigned long start, 250 unsigned long start,
245 unsigned long end) 251 unsigned long end,
252 bool blockable)
246{ 253{
247 struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); 254 struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
248 struct interval_tree_node *it; 255 struct interval_tree_node *it;
@@ -250,17 +257,28 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
250 /* notification is exclusive, but interval is inclusive */ 257 /* notification is exclusive, but interval is inclusive */
251 end -= 1; 258 end -= 1;
252 259
253 amdgpu_mn_read_lock(amn); 260 /* TODO we should be able to split locking for interval tree and
261 * amdgpu_mn_invalidate_node
262 */
263 if (amdgpu_mn_read_lock(amn, blockable))
264 return -EAGAIN;
254 265
255 it = interval_tree_iter_first(&amn->objects, start, end); 266 it = interval_tree_iter_first(&amn->objects, start, end);
256 while (it) { 267 while (it) {
257 struct amdgpu_mn_node *node; 268 struct amdgpu_mn_node *node;
258 269
270 if (!blockable) {
271 amdgpu_mn_read_unlock(amn);
272 return -EAGAIN;
273 }
274
259 node = container_of(it, struct amdgpu_mn_node, it); 275 node = container_of(it, struct amdgpu_mn_node, it);
260 it = interval_tree_iter_next(it, start, end); 276 it = interval_tree_iter_next(it, start, end);
261 277
262 amdgpu_mn_invalidate_node(node, start, end); 278 amdgpu_mn_invalidate_node(node, start, end);
263 } 279 }
280
281 return 0;
264} 282}
265 283
266/** 284/**
@@ -275,10 +293,11 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
275 * necessitates evicting all user-mode queues of the process. The BOs 293 * necessitates evicting all user-mode queues of the process. The BOs
276 * are restorted in amdgpu_mn_invalidate_range_end_hsa. 294 * are restorted in amdgpu_mn_invalidate_range_end_hsa.
277 */ 295 */
278static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, 296static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
279 struct mm_struct *mm, 297 struct mm_struct *mm,
280 unsigned long start, 298 unsigned long start,
281 unsigned long end) 299 unsigned long end,
300 bool blockable)
282{ 301{
283 struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); 302 struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
284 struct interval_tree_node *it; 303 struct interval_tree_node *it;
@@ -286,13 +305,19 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
286 /* notification is exclusive, but interval is inclusive */ 305 /* notification is exclusive, but interval is inclusive */
287 end -= 1; 306 end -= 1;
288 307
289 amdgpu_mn_read_lock(amn); 308 if (amdgpu_mn_read_lock(amn, blockable))
309 return -EAGAIN;
290 310
291 it = interval_tree_iter_first(&amn->objects, start, end); 311 it = interval_tree_iter_first(&amn->objects, start, end);
292 while (it) { 312 while (it) {
293 struct amdgpu_mn_node *node; 313 struct amdgpu_mn_node *node;
294 struct amdgpu_bo *bo; 314 struct amdgpu_bo *bo;
295 315
316 if (!blockable) {
317 amdgpu_mn_read_unlock(amn);
318 return -EAGAIN;
319 }
320
296 node = container_of(it, struct amdgpu_mn_node, it); 321 node = container_of(it, struct amdgpu_mn_node, it);
297 it = interval_tree_iter_next(it, start, end); 322 it = interval_tree_iter_next(it, start, end);
298 323
@@ -304,6 +329,8 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
304 amdgpu_amdkfd_evict_userptr(mem, mm); 329 amdgpu_amdkfd_evict_userptr(mem, mm);
305 } 330 }
306 } 331 }
332
333 return 0;
307} 334}
308 335
309/** 336/**