diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2016-01-12 05:01:12 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2016-01-12 05:01:12 -0500 |
commit | 1f16f116b01c110db20ab808562c8b8bc3ee3d6e (patch) | |
tree | 44db563f64cf5f8d62af8f99a61e2b248c44ea3a /drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |
parent | 03724ac3d48f8f0e3caf1d30fa134f8fd96c94e2 (diff) | |
parent | f9eccf24615672896dc13251410c3f2f33a14f95 (diff) |
Merge branches 'clockevents/4.4-fixes' and 'clockevents/4.5-fixes' of http://git.linaro.org/people/daniel.lezcano/linux into timers/urgent
Pull in fixes from Daniel Lezcano:
- Fix the vt8500 timer leading to a system lock up when dealing with too
small delta (Roman Volkov)
- Select the CLKSRC_MMIO when the fsl_ftm_timer is enabled with COMPILE_TEST
(Daniel Lezcano)
- Prevent to compile timers using the 'iomem' API when the architecture has
not HAS_IOMEM set (Richard Weinberger)
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 67 |
1 files changed, 43 insertions, 24 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 3afcf0237c25..25a3e2485cc2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -127,6 +127,37 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, | |||
127 | return 0; | 127 | return 0; |
128 | } | 128 | } |
129 | 129 | ||
130 | static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, | ||
131 | struct drm_amdgpu_cs_chunk_fence *fence_data) | ||
132 | { | ||
133 | struct drm_gem_object *gobj; | ||
134 | uint32_t handle; | ||
135 | |||
136 | handle = fence_data->handle; | ||
137 | gobj = drm_gem_object_lookup(p->adev->ddev, p->filp, | ||
138 | fence_data->handle); | ||
139 | if (gobj == NULL) | ||
140 | return -EINVAL; | ||
141 | |||
142 | p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); | ||
143 | p->uf.offset = fence_data->offset; | ||
144 | |||
145 | if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) { | ||
146 | drm_gem_object_unreference_unlocked(gobj); | ||
147 | return -EINVAL; | ||
148 | } | ||
149 | |||
150 | p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo); | ||
151 | p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT; | ||
152 | p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT; | ||
153 | p->uf_entry.priority = 0; | ||
154 | p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; | ||
155 | p->uf_entry.tv.shared = true; | ||
156 | |||
157 | drm_gem_object_unreference_unlocked(gobj); | ||
158 | return 0; | ||
159 | } | ||
160 | |||
130 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) | 161 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) |
131 | { | 162 | { |
132 | union drm_amdgpu_cs *cs = data; | 163 | union drm_amdgpu_cs *cs = data; |
@@ -207,26 +238,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) | |||
207 | 238 | ||
208 | case AMDGPU_CHUNK_ID_FENCE: | 239 | case AMDGPU_CHUNK_ID_FENCE: |
209 | size = sizeof(struct drm_amdgpu_cs_chunk_fence); | 240 | size = sizeof(struct drm_amdgpu_cs_chunk_fence); |
210 | if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) { | 241 | if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { |
211 | uint32_t handle; | ||
212 | struct drm_gem_object *gobj; | ||
213 | struct drm_amdgpu_cs_chunk_fence *fence_data; | ||
214 | |||
215 | fence_data = (void *)p->chunks[i].kdata; | ||
216 | handle = fence_data->handle; | ||
217 | gobj = drm_gem_object_lookup(p->adev->ddev, | ||
218 | p->filp, handle); | ||
219 | if (gobj == NULL) { | ||
220 | ret = -EINVAL; | ||
221 | goto free_partial_kdata; | ||
222 | } | ||
223 | |||
224 | p->uf.bo = gem_to_amdgpu_bo(gobj); | ||
225 | p->uf.offset = fence_data->offset; | ||
226 | } else { | ||
227 | ret = -EINVAL; | 242 | ret = -EINVAL; |
228 | goto free_partial_kdata; | 243 | goto free_partial_kdata; |
229 | } | 244 | } |
245 | |||
246 | ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata); | ||
247 | if (ret) | ||
248 | goto free_partial_kdata; | ||
249 | |||
230 | break; | 250 | break; |
231 | 251 | ||
232 | case AMDGPU_CHUNK_ID_DEPENDENCIES: | 252 | case AMDGPU_CHUNK_ID_DEPENDENCIES: |
@@ -389,6 +409,9 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) | |||
389 | p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm, | 409 | p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm, |
390 | &p->validated); | 410 | &p->validated); |
391 | 411 | ||
412 | if (p->uf.bo) | ||
413 | list_add(&p->uf_entry.tv.head, &p->validated); | ||
414 | |||
392 | if (need_mmap_lock) | 415 | if (need_mmap_lock) |
393 | down_read(¤t->mm->mmap_sem); | 416 | down_read(¤t->mm->mmap_sem); |
394 | 417 | ||
@@ -486,8 +509,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo | |||
486 | for (i = 0; i < parser->num_ibs; i++) | 509 | for (i = 0; i < parser->num_ibs; i++) |
487 | amdgpu_ib_free(parser->adev, &parser->ibs[i]); | 510 | amdgpu_ib_free(parser->adev, &parser->ibs[i]); |
488 | kfree(parser->ibs); | 511 | kfree(parser->ibs); |
489 | if (parser->uf.bo) | 512 | amdgpu_bo_unref(&parser->uf.bo); |
490 | drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); | 513 | amdgpu_bo_unref(&parser->uf_entry.robj); |
491 | } | 514 | } |
492 | 515 | ||
493 | static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, | 516 | static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, |
@@ -776,7 +799,7 @@ static int amdgpu_cs_free_job(struct amdgpu_job *job) | |||
776 | amdgpu_ib_free(job->adev, &job->ibs[i]); | 799 | amdgpu_ib_free(job->adev, &job->ibs[i]); |
777 | kfree(job->ibs); | 800 | kfree(job->ibs); |
778 | if (job->uf.bo) | 801 | if (job->uf.bo) |
779 | drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base); | 802 | amdgpu_bo_unref(&job->uf.bo); |
780 | return 0; | 803 | return 0; |
781 | } | 804 | } |
782 | 805 | ||
@@ -784,8 +807,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
784 | { | 807 | { |
785 | struct amdgpu_device *adev = dev->dev_private; | 808 | struct amdgpu_device *adev = dev->dev_private; |
786 | union drm_amdgpu_cs *cs = data; | 809 | union drm_amdgpu_cs *cs = data; |
787 | struct amdgpu_fpriv *fpriv = filp->driver_priv; | ||
788 | struct amdgpu_vm *vm = &fpriv->vm; | ||
789 | struct amdgpu_cs_parser parser = {}; | 810 | struct amdgpu_cs_parser parser = {}; |
790 | bool reserved_buffers = false; | 811 | bool reserved_buffers = false; |
791 | int i, r; | 812 | int i, r; |
@@ -803,7 +824,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
803 | r = amdgpu_cs_handle_lockup(adev, r); | 824 | r = amdgpu_cs_handle_lockup(adev, r); |
804 | return r; | 825 | return r; |
805 | } | 826 | } |
806 | mutex_lock(&vm->mutex); | ||
807 | r = amdgpu_cs_parser_relocs(&parser); | 827 | r = amdgpu_cs_parser_relocs(&parser); |
808 | if (r == -ENOMEM) | 828 | if (r == -ENOMEM) |
809 | DRM_ERROR("Not enough memory for command submission!\n"); | 829 | DRM_ERROR("Not enough memory for command submission!\n"); |
@@ -888,7 +908,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
888 | 908 | ||
889 | out: | 909 | out: |
890 | amdgpu_cs_parser_fini(&parser, r, reserved_buffers); | 910 | amdgpu_cs_parser_fini(&parser, r, reserved_buffers); |
891 | mutex_unlock(&vm->mutex); | ||
892 | r = amdgpu_cs_handle_lockup(adev, r); | 911 | r = amdgpu_cs_handle_lockup(adev, r); |
893 | return r; | 912 | return r; |
894 | } | 913 | } |