diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 43 |
1 files changed, 35 insertions, 8 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index a365ea2383d1..e55508b39496 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | |||
@@ -178,12 +178,18 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn) | |||
178 | * | 178 | * |
179 | * @amn: our notifier | 179 | * @amn: our notifier |
180 | */ | 180 | */ |
181 | static void amdgpu_mn_read_lock(struct amdgpu_mn *amn) | 181 | static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable) |
182 | { | 182 | { |
183 | mutex_lock(&amn->read_lock); | 183 | if (blockable) |
184 | mutex_lock(&amn->read_lock); | ||
185 | else if (!mutex_trylock(&amn->read_lock)) | ||
186 | return -EAGAIN; | ||
187 | |||
184 | if (atomic_inc_return(&amn->recursion) == 1) | 188 | if (atomic_inc_return(&amn->recursion) == 1) |
185 | down_read_non_owner(&amn->lock); | 189 | down_read_non_owner(&amn->lock); |
186 | mutex_unlock(&amn->read_lock); | 190 | mutex_unlock(&amn->read_lock); |
191 | |||
192 | return 0; | ||
187 | } | 193 | } |
188 | 194 | ||
189 | /** | 195 | /** |
@@ -239,10 +245,11 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, | |||
239 | * Block for operations on BOs to finish and mark pages as accessed and | 245 | * Block for operations on BOs to finish and mark pages as accessed and |
240 | * potentially dirty. | 246 | * potentially dirty. |
241 | */ | 247 | */ |
242 | static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, | 248 | static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, |
243 | struct mm_struct *mm, | 249 | struct mm_struct *mm, |
244 | unsigned long start, | 250 | unsigned long start, |
245 | unsigned long end) | 251 | unsigned long end, |
252 | bool blockable) | ||
246 | { | 253 | { |
247 | struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); | 254 | struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); |
248 | struct interval_tree_node *it; | 255 | struct interval_tree_node *it; |
@@ -250,17 +257,28 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, | |||
250 | /* notification is exclusive, but interval is inclusive */ | 257 | /* notification is exclusive, but interval is inclusive */ |
251 | end -= 1; | 258 | end -= 1; |
252 | 259 | ||
253 | amdgpu_mn_read_lock(amn); | 260 | /* TODO we should be able to split locking for interval tree and |
261 | * amdgpu_mn_invalidate_node | ||
262 | */ | ||
263 | if (amdgpu_mn_read_lock(amn, blockable)) | ||
264 | return -EAGAIN; | ||
254 | 265 | ||
255 | it = interval_tree_iter_first(&amn->objects, start, end); | 266 | it = interval_tree_iter_first(&amn->objects, start, end); |
256 | while (it) { | 267 | while (it) { |
257 | struct amdgpu_mn_node *node; | 268 | struct amdgpu_mn_node *node; |
258 | 269 | ||
270 | if (!blockable) { | ||
271 | amdgpu_mn_read_unlock(amn); | ||
272 | return -EAGAIN; | ||
273 | } | ||
274 | |||
259 | node = container_of(it, struct amdgpu_mn_node, it); | 275 | node = container_of(it, struct amdgpu_mn_node, it); |
260 | it = interval_tree_iter_next(it, start, end); | 276 | it = interval_tree_iter_next(it, start, end); |
261 | 277 | ||
262 | amdgpu_mn_invalidate_node(node, start, end); | 278 | amdgpu_mn_invalidate_node(node, start, end); |
263 | } | 279 | } |
280 | |||
281 | return 0; | ||
264 | } | 282 | } |
265 | 283 | ||
266 | /** | 284 | /** |
@@ -275,10 +293,11 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, | |||
275 | * necessitates evicting all user-mode queues of the process. The BOs | 293 | * necessitates evicting all user-mode queues of the process. The BOs |
276 | * are restorted in amdgpu_mn_invalidate_range_end_hsa. | 294 | * are restorted in amdgpu_mn_invalidate_range_end_hsa. |
277 | */ | 295 | */ |
278 | static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, | 296 | static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, |
279 | struct mm_struct *mm, | 297 | struct mm_struct *mm, |
280 | unsigned long start, | 298 | unsigned long start, |
281 | unsigned long end) | 299 | unsigned long end, |
300 | bool blockable) | ||
282 | { | 301 | { |
283 | struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); | 302 | struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); |
284 | struct interval_tree_node *it; | 303 | struct interval_tree_node *it; |
@@ -286,13 +305,19 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, | |||
286 | /* notification is exclusive, but interval is inclusive */ | 305 | /* notification is exclusive, but interval is inclusive */ |
287 | end -= 1; | 306 | end -= 1; |
288 | 307 | ||
289 | amdgpu_mn_read_lock(amn); | 308 | if (amdgpu_mn_read_lock(amn, blockable)) |
309 | return -EAGAIN; | ||
290 | 310 | ||
291 | it = interval_tree_iter_first(&amn->objects, start, end); | 311 | it = interval_tree_iter_first(&amn->objects, start, end); |
292 | while (it) { | 312 | while (it) { |
293 | struct amdgpu_mn_node *node; | 313 | struct amdgpu_mn_node *node; |
294 | struct amdgpu_bo *bo; | 314 | struct amdgpu_bo *bo; |
295 | 315 | ||
316 | if (!blockable) { | ||
317 | amdgpu_mn_read_unlock(amn); | ||
318 | return -EAGAIN; | ||
319 | } | ||
320 | |||
296 | node = container_of(it, struct amdgpu_mn_node, it); | 321 | node = container_of(it, struct amdgpu_mn_node, it); |
297 | it = interval_tree_iter_next(it, start, end); | 322 | it = interval_tree_iter_next(it, start, end); |
298 | 323 | ||
@@ -304,6 +329,8 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, | |||
304 | amdgpu_amdkfd_evict_userptr(mem, mm); | 329 | amdgpu_amdkfd_evict_userptr(mem, mm); |
305 | } | 330 | } |
306 | } | 331 | } |
332 | |||
333 | return 0; | ||
307 | } | 334 | } |
308 | 335 | ||
309 | /** | 336 | /** |