diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2016-01-12 05:01:12 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2016-01-12 05:01:12 -0500 |
commit | 1f16f116b01c110db20ab808562c8b8bc3ee3d6e (patch) | |
tree | 44db563f64cf5f8d62af8f99a61e2b248c44ea3a /drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |
parent | 03724ac3d48f8f0e3caf1d30fa134f8fd96c94e2 (diff) | |
parent | f9eccf24615672896dc13251410c3f2f33a14f95 (diff) |
Merge branches 'clockevents/4.4-fixes' and 'clockevents/4.5-fixes' of http://git.linaro.org/people/daniel.lezcano/linux into timers/urgent
Pull in fixes from Daniel Lezcano:
- Fix the vt8500 timer leading to a system lock up when dealing with too
small delta (Roman Volkov)
- Select the CLKSRC_MMIO when the fsl_ftm_timer is enabled with COMPILE_TEST
(Daniel Lezcano)
- Prevent to compile timers using the 'iomem' API when the architecture has
not HAS_IOMEM set (Richard Weinberger)
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 27 |
1 files changed, 13 insertions, 14 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 00c5b580f56c..9c253c535d26 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -115,12 +115,9 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri | |||
115 | struct amdgpu_vm *vm = &fpriv->vm; | 115 | struct amdgpu_vm *vm = &fpriv->vm; |
116 | struct amdgpu_bo_va *bo_va; | 116 | struct amdgpu_bo_va *bo_va; |
117 | int r; | 117 | int r; |
118 | mutex_lock(&vm->mutex); | ||
119 | r = amdgpu_bo_reserve(rbo, false); | 118 | r = amdgpu_bo_reserve(rbo, false); |
120 | if (r) { | 119 | if (r) |
121 | mutex_unlock(&vm->mutex); | ||
122 | return r; | 120 | return r; |
123 | } | ||
124 | 121 | ||
125 | bo_va = amdgpu_vm_bo_find(vm, rbo); | 122 | bo_va = amdgpu_vm_bo_find(vm, rbo); |
126 | if (!bo_va) { | 123 | if (!bo_va) { |
@@ -129,7 +126,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri | |||
129 | ++bo_va->ref_count; | 126 | ++bo_va->ref_count; |
130 | } | 127 | } |
131 | amdgpu_bo_unreserve(rbo); | 128 | amdgpu_bo_unreserve(rbo); |
132 | mutex_unlock(&vm->mutex); | ||
133 | return 0; | 129 | return 0; |
134 | } | 130 | } |
135 | 131 | ||
@@ -142,10 +138,8 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, | |||
142 | struct amdgpu_vm *vm = &fpriv->vm; | 138 | struct amdgpu_vm *vm = &fpriv->vm; |
143 | struct amdgpu_bo_va *bo_va; | 139 | struct amdgpu_bo_va *bo_va; |
144 | int r; | 140 | int r; |
145 | mutex_lock(&vm->mutex); | ||
146 | r = amdgpu_bo_reserve(rbo, true); | 141 | r = amdgpu_bo_reserve(rbo, true); |
147 | if (r) { | 142 | if (r) { |
148 | mutex_unlock(&vm->mutex); | ||
149 | dev_err(adev->dev, "leaking bo va because " | 143 | dev_err(adev->dev, "leaking bo va because " |
150 | "we fail to reserve bo (%d)\n", r); | 144 | "we fail to reserve bo (%d)\n", r); |
151 | return; | 145 | return; |
@@ -157,7 +151,6 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, | |||
157 | } | 151 | } |
158 | } | 152 | } |
159 | amdgpu_bo_unreserve(rbo); | 153 | amdgpu_bo_unreserve(rbo); |
160 | mutex_unlock(&vm->mutex); | ||
161 | } | 154 | } |
162 | 155 | ||
163 | static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) | 156 | static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) |
@@ -242,8 +235,9 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, | |||
242 | AMDGPU_GEM_USERPTR_REGISTER)) | 235 | AMDGPU_GEM_USERPTR_REGISTER)) |
243 | return -EINVAL; | 236 | return -EINVAL; |
244 | 237 | ||
245 | if (!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) || | 238 | if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && ( |
246 | !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) { | 239 | !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) || |
240 | !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) { | ||
247 | 241 | ||
248 | /* if we want to write to it we must require anonymous | 242 | /* if we want to write to it we must require anonymous |
249 | memory and install a MMU notifier */ | 243 | memory and install a MMU notifier */ |
@@ -483,6 +477,14 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | |||
483 | if (domain == AMDGPU_GEM_DOMAIN_CPU) | 477 | if (domain == AMDGPU_GEM_DOMAIN_CPU) |
484 | goto error_unreserve; | 478 | goto error_unreserve; |
485 | } | 479 | } |
480 | list_for_each_entry(entry, &duplicates, head) { | ||
481 | domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); | ||
482 | /* if anything is swapped out don't swap it in here, | ||
483 | just abort and wait for the next CS */ | ||
484 | if (domain == AMDGPU_GEM_DOMAIN_CPU) | ||
485 | goto error_unreserve; | ||
486 | } | ||
487 | |||
486 | r = amdgpu_vm_update_page_directory(adev, bo_va->vm); | 488 | r = amdgpu_vm_update_page_directory(adev, bo_va->vm); |
487 | if (r) | 489 | if (r) |
488 | goto error_unreserve; | 490 | goto error_unreserve; |
@@ -553,7 +555,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
553 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | 555 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
554 | if (gobj == NULL) | 556 | if (gobj == NULL) |
555 | return -ENOENT; | 557 | return -ENOENT; |
556 | mutex_lock(&fpriv->vm.mutex); | ||
557 | rbo = gem_to_amdgpu_bo(gobj); | 558 | rbo = gem_to_amdgpu_bo(gobj); |
558 | INIT_LIST_HEAD(&list); | 559 | INIT_LIST_HEAD(&list); |
559 | INIT_LIST_HEAD(&duplicates); | 560 | INIT_LIST_HEAD(&duplicates); |
@@ -568,7 +569,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
568 | } | 569 | } |
569 | r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); | 570 | r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); |
570 | if (r) { | 571 | if (r) { |
571 | mutex_unlock(&fpriv->vm.mutex); | ||
572 | drm_gem_object_unreference_unlocked(gobj); | 572 | drm_gem_object_unreference_unlocked(gobj); |
573 | return r; | 573 | return r; |
574 | } | 574 | } |
@@ -577,7 +577,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
577 | if (!bo_va) { | 577 | if (!bo_va) { |
578 | ttm_eu_backoff_reservation(&ticket, &list); | 578 | ttm_eu_backoff_reservation(&ticket, &list); |
579 | drm_gem_object_unreference_unlocked(gobj); | 579 | drm_gem_object_unreference_unlocked(gobj); |
580 | mutex_unlock(&fpriv->vm.mutex); | ||
581 | return -ENOENT; | 580 | return -ENOENT; |
582 | } | 581 | } |
583 | 582 | ||
@@ -602,7 +601,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
602 | ttm_eu_backoff_reservation(&ticket, &list); | 601 | ttm_eu_backoff_reservation(&ticket, &list); |
603 | if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) | 602 | if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) |
604 | amdgpu_gem_va_update_vm(adev, bo_va, args->operation); | 603 | amdgpu_gem_va_update_vm(adev, bo_va, args->operation); |
605 | mutex_unlock(&fpriv->vm.mutex); | 604 | |
606 | drm_gem_object_unreference_unlocked(gobj); | 605 | drm_gem_object_unreference_unlocked(gobj); |
607 | return r; | 606 | return r; |
608 | } | 607 | } |