diff options
author | Sushmita Susheelendra <ssusheel@codeaurora.org> | 2017-06-13 18:52:54 -0400 |
---|---|---|
committer | Rob Clark <robdclark@gmail.com> | 2017-06-17 08:03:07 -0400 |
commit | 0e08270a1f01bceae17d32a0d75aad2388bd1ba2 (patch) | |
tree | 31f17b04f1a459ca5ab26772626c8ece4c54158a | |
parent | 816fa34c051492c7f115ad2fd91c9e723d7fc298 (diff) |
drm/msm: Separate locking of buffer resources from struct_mutex
Buffer object specific resources like pages, domains, sg list
need not be protected with struct_mutex. They can be protected
with a buffer object level lock. This simplifies locking and
makes it easier to avoid potential recursive locking scenarios
for SVM involving mmap_sem and struct_mutex. This also removes
unnecessary serialization when creating buffer objects, and also
between buffer object creation and GPU command submission.
Signed-off-by: Sushmita Susheelendra <ssusheel@codeaurora.org>
[robclark: squash in handling new locking for shrinker]
Signed-off-by: Rob Clark <robdclark@gmail.com>
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a5xx_power.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/adreno_gpu.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/dsi/dsi_host.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_drv.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_drv.h | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_fbdev.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem.c | 274 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem.h | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem_shrinker.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem_submit.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem_vma.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_gpu.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_rd.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_ringbuffer.c | 2 |
17 files changed, 238 insertions, 144 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index f6a9eec71fec..b4b54f1c24bc 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c | |||
@@ -297,18 +297,18 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, | |||
297 | struct drm_gem_object *bo; | 297 | struct drm_gem_object *bo; |
298 | void *ptr; | 298 | void *ptr; |
299 | 299 | ||
300 | bo = msm_gem_new(drm, fw->size - 4, MSM_BO_UNCACHED); | 300 | bo = msm_gem_new_locked(drm, fw->size - 4, MSM_BO_UNCACHED); |
301 | if (IS_ERR(bo)) | 301 | if (IS_ERR(bo)) |
302 | return bo; | 302 | return bo; |
303 | 303 | ||
304 | ptr = msm_gem_get_vaddr_locked(bo); | 304 | ptr = msm_gem_get_vaddr(bo); |
305 | if (!ptr) { | 305 | if (!ptr) { |
306 | drm_gem_object_unreference(bo); | 306 | drm_gem_object_unreference(bo); |
307 | return ERR_PTR(-ENOMEM); | 307 | return ERR_PTR(-ENOMEM); |
308 | } | 308 | } |
309 | 309 | ||
310 | if (iova) { | 310 | if (iova) { |
311 | int ret = msm_gem_get_iova_locked(bo, gpu->aspace, iova); | 311 | int ret = msm_gem_get_iova(bo, gpu->aspace, iova); |
312 | 312 | ||
313 | if (ret) { | 313 | if (ret) { |
314 | drm_gem_object_unreference(bo); | 314 | drm_gem_object_unreference(bo); |
@@ -318,7 +318,7 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, | |||
318 | 318 | ||
319 | memcpy(ptr, &fw->data[4], fw->size - 4); | 319 | memcpy(ptr, &fw->data[4], fw->size - 4); |
320 | 320 | ||
321 | msm_gem_put_vaddr_locked(bo); | 321 | msm_gem_put_vaddr(bo); |
322 | return bo; | 322 | return bo; |
323 | } | 323 | } |
324 | 324 | ||
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index feb7f4fd42fb..87af6eea0483 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c | |||
@@ -294,15 +294,15 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) | |||
294 | */ | 294 | */ |
295 | bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2; | 295 | bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2; |
296 | 296 | ||
297 | a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize, MSM_BO_UNCACHED); | 297 | a5xx_gpu->gpmu_bo = msm_gem_new_locked(drm, bosize, MSM_BO_UNCACHED); |
298 | if (IS_ERR(a5xx_gpu->gpmu_bo)) | 298 | if (IS_ERR(a5xx_gpu->gpmu_bo)) |
299 | goto err; | 299 | goto err; |
300 | 300 | ||
301 | if (msm_gem_get_iova_locked(a5xx_gpu->gpmu_bo, gpu->aspace, | 301 | if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace, |
302 | &a5xx_gpu->gpmu_iova)) | 302 | &a5xx_gpu->gpmu_iova)) |
303 | goto err; | 303 | goto err; |
304 | 304 | ||
305 | ptr = msm_gem_get_vaddr_locked(a5xx_gpu->gpmu_bo); | 305 | ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo); |
306 | if (!ptr) | 306 | if (!ptr) |
307 | goto err; | 307 | goto err; |
308 | 308 | ||
@@ -321,7 +321,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) | |||
321 | cmds_size -= _size; | 321 | cmds_size -= _size; |
322 | } | 322 | } |
323 | 323 | ||
324 | msm_gem_put_vaddr_locked(a5xx_gpu->gpmu_bo); | 324 | msm_gem_put_vaddr(a5xx_gpu->gpmu_bo); |
325 | a5xx_gpu->gpmu_dwords = dwords; | 325 | a5xx_gpu->gpmu_dwords = dwords; |
326 | 326 | ||
327 | goto out; | 327 | goto out; |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 6fa694e6ae8c..f1ab2703674a 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c | |||
@@ -64,7 +64,7 @@ int adreno_hw_init(struct msm_gpu *gpu) | |||
64 | 64 | ||
65 | DBG("%s", gpu->name); | 65 | DBG("%s", gpu->name); |
66 | 66 | ||
67 | ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->aspace, &gpu->rb_iova); | 67 | ret = msm_gem_get_iova(gpu->rb->bo, gpu->aspace, &gpu->rb_iova); |
68 | if (ret) { | 68 | if (ret) { |
69 | gpu->rb_iova = 0; | 69 | gpu->rb_iova = 0; |
70 | dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret); | 70 | dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret); |
@@ -397,10 +397,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
397 | return ret; | 397 | return ret; |
398 | } | 398 | } |
399 | 399 | ||
400 | mutex_lock(&drm->struct_mutex); | ||
401 | adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs), | 400 | adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs), |
402 | MSM_BO_UNCACHED); | 401 | MSM_BO_UNCACHED); |
403 | mutex_unlock(&drm->struct_mutex); | ||
404 | if (IS_ERR(adreno_gpu->memptrs_bo)) { | 402 | if (IS_ERR(adreno_gpu->memptrs_bo)) { |
405 | ret = PTR_ERR(adreno_gpu->memptrs_bo); | 403 | ret = PTR_ERR(adreno_gpu->memptrs_bo); |
406 | adreno_gpu->memptrs_bo = NULL; | 404 | adreno_gpu->memptrs_bo = NULL; |
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 2e7077194b21..9e9c5696bc03 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c | |||
@@ -982,18 +982,16 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size) | |||
982 | uint64_t iova; | 982 | uint64_t iova; |
983 | 983 | ||
984 | if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { | 984 | if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { |
985 | mutex_lock(&dev->struct_mutex); | ||
986 | msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED); | 985 | msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED); |
987 | if (IS_ERR(msm_host->tx_gem_obj)) { | 986 | if (IS_ERR(msm_host->tx_gem_obj)) { |
988 | ret = PTR_ERR(msm_host->tx_gem_obj); | 987 | ret = PTR_ERR(msm_host->tx_gem_obj); |
989 | pr_err("%s: failed to allocate gem, %d\n", | 988 | pr_err("%s: failed to allocate gem, %d\n", |
990 | __func__, ret); | 989 | __func__, ret); |
991 | msm_host->tx_gem_obj = NULL; | 990 | msm_host->tx_gem_obj = NULL; |
992 | mutex_unlock(&dev->struct_mutex); | ||
993 | return ret; | 991 | return ret; |
994 | } | 992 | } |
995 | 993 | ||
996 | ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, | 994 | ret = msm_gem_get_iova(msm_host->tx_gem_obj, |
997 | priv->kms->aspace, &iova); | 995 | priv->kms->aspace, &iova); |
998 | mutex_unlock(&dev->struct_mutex); | 996 | mutex_unlock(&dev->struct_mutex); |
999 | if (ret) { | 997 | if (ret) { |
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 59153a4ebd18..615e1def64d9 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | |||
@@ -374,7 +374,7 @@ static void update_cursor(struct drm_crtc *crtc) | |||
374 | if (next_bo) { | 374 | if (next_bo) { |
375 | /* take a obj ref + iova ref when we start scanning out: */ | 375 | /* take a obj ref + iova ref when we start scanning out: */ |
376 | drm_gem_object_reference(next_bo); | 376 | drm_gem_object_reference(next_bo); |
377 | msm_gem_get_iova_locked(next_bo, kms->aspace, &iova); | 377 | msm_gem_get_iova(next_bo, kms->aspace, &iova); |
378 | 378 | ||
379 | /* enable cursor: */ | 379 | /* enable cursor: */ |
380 | mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma), | 380 | mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma), |
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index 3d96687a1b39..bcd1f5cac72c 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | |||
@@ -528,9 +528,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) | |||
528 | goto fail; | 528 | goto fail; |
529 | } | 529 | } |
530 | 530 | ||
531 | mutex_lock(&dev->struct_mutex); | ||
532 | mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC); | 531 | mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC); |
533 | mutex_unlock(&dev->struct_mutex); | ||
534 | if (IS_ERR(mdp4_kms->blank_cursor_bo)) { | 532 | if (IS_ERR(mdp4_kms->blank_cursor_bo)) { |
535 | ret = PTR_ERR(mdp4_kms->blank_cursor_bo); | 533 | ret = PTR_ERR(mdp4_kms->blank_cursor_bo); |
536 | dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret); | 534 | dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret); |
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 506de3862c18..f49f6ac5585c 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c | |||
@@ -336,6 +336,7 @@ static int msm_init_vram(struct drm_device *dev) | |||
336 | priv->vram.size = size; | 336 | priv->vram.size = size; |
337 | 337 | ||
338 | drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); | 338 | drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); |
339 | spin_lock_init(&priv->vram.lock); | ||
339 | 340 | ||
340 | attrs |= DMA_ATTR_NO_KERNEL_MAPPING; | 341 | attrs |= DMA_ATTR_NO_KERNEL_MAPPING; |
341 | attrs |= DMA_ATTR_WRITE_COMBINE; | 342 | attrs |= DMA_ATTR_WRITE_COMBINE; |
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 1d47ec467ded..fc8d24f7c084 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h | |||
@@ -149,6 +149,7 @@ struct msm_drm_private { | |||
149 | * and position mm_node->start is in # of pages: | 149 | * and position mm_node->start is in # of pages: |
150 | */ | 150 | */ |
151 | struct drm_mm mm; | 151 | struct drm_mm mm; |
152 | spinlock_t lock; /* Protects drm_mm node allocation/removal */ | ||
152 | } vram; | 153 | } vram; |
153 | 154 | ||
154 | struct notifier_block vmap_notifier; | 155 | struct notifier_block vmap_notifier; |
@@ -198,8 +199,6 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj, | |||
198 | int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); | 199 | int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); |
199 | int msm_gem_fault(struct vm_fault *vmf); | 200 | int msm_gem_fault(struct vm_fault *vmf); |
200 | uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); | 201 | uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); |
201 | int msm_gem_get_iova_locked(struct drm_gem_object *obj, | ||
202 | struct msm_gem_address_space *aspace, uint64_t *iova); | ||
203 | int msm_gem_get_iova(struct drm_gem_object *obj, | 202 | int msm_gem_get_iova(struct drm_gem_object *obj, |
204 | struct msm_gem_address_space *aspace, uint64_t *iova); | 203 | struct msm_gem_address_space *aspace, uint64_t *iova); |
205 | uint64_t msm_gem_iova(struct drm_gem_object *obj, | 204 | uint64_t msm_gem_iova(struct drm_gem_object *obj, |
@@ -221,13 +220,9 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, | |||
221 | struct dma_buf_attachment *attach, struct sg_table *sg); | 220 | struct dma_buf_attachment *attach, struct sg_table *sg); |
222 | int msm_gem_prime_pin(struct drm_gem_object *obj); | 221 | int msm_gem_prime_pin(struct drm_gem_object *obj); |
223 | void msm_gem_prime_unpin(struct drm_gem_object *obj); | 222 | void msm_gem_prime_unpin(struct drm_gem_object *obj); |
224 | void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj); | ||
225 | void *msm_gem_get_vaddr(struct drm_gem_object *obj); | 223 | void *msm_gem_get_vaddr(struct drm_gem_object *obj); |
226 | void msm_gem_put_vaddr_locked(struct drm_gem_object *obj); | ||
227 | void msm_gem_put_vaddr(struct drm_gem_object *obj); | 224 | void msm_gem_put_vaddr(struct drm_gem_object *obj); |
228 | int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); | 225 | int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); |
229 | void msm_gem_purge(struct drm_gem_object *obj); | ||
230 | void msm_gem_vunmap(struct drm_gem_object *obj); | ||
231 | int msm_gem_sync_object(struct drm_gem_object *obj, | 226 | int msm_gem_sync_object(struct drm_gem_object *obj, |
232 | struct msm_fence_context *fctx, bool exclusive); | 227 | struct msm_fence_context *fctx, bool exclusive); |
233 | void msm_gem_move_to_active(struct drm_gem_object *obj, | 228 | void msm_gem_move_to_active(struct drm_gem_object *obj, |
@@ -240,6 +235,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, | |||
240 | uint32_t size, uint32_t flags, uint32_t *handle); | 235 | uint32_t size, uint32_t flags, uint32_t *handle); |
241 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, | 236 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, |
242 | uint32_t size, uint32_t flags); | 237 | uint32_t size, uint32_t flags); |
238 | struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, | ||
239 | uint32_t size, uint32_t flags); | ||
243 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, | 240 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, |
244 | struct dma_buf *dmabuf, struct sg_table *sgt); | 241 | struct dma_buf *dmabuf, struct sg_table *sgt); |
245 | 242 | ||
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 803ed272dc6d..5ecf4ff9a059 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c | |||
@@ -97,10 +97,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, | |||
97 | /* allocate backing bo */ | 97 | /* allocate backing bo */ |
98 | size = mode_cmd.pitches[0] * mode_cmd.height; | 98 | size = mode_cmd.pitches[0] * mode_cmd.height; |
99 | DBG("allocating %d bytes for fb %d", size, dev->primary->index); | 99 | DBG("allocating %d bytes for fb %d", size, dev->primary->index); |
100 | mutex_lock(&dev->struct_mutex); | ||
101 | fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | | 100 | fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | |
102 | MSM_BO_WC | MSM_BO_STOLEN); | 101 | MSM_BO_WC | MSM_BO_STOLEN); |
103 | mutex_unlock(&dev->struct_mutex); | ||
104 | if (IS_ERR(fbdev->bo)) { | 102 | if (IS_ERR(fbdev->bo)) { |
105 | ret = PTR_ERR(fbdev->bo); | 103 | ret = PTR_ERR(fbdev->bo); |
106 | fbdev->bo = NULL; | 104 | fbdev->bo = NULL; |
@@ -126,7 +124,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, | |||
126 | * in panic (ie. lock-safe, etc) we could avoid pinning the | 124 | * in panic (ie. lock-safe, etc) we could avoid pinning the |
127 | * buffer now: | 125 | * buffer now: |
128 | */ | 126 | */ |
129 | ret = msm_gem_get_iova_locked(fbdev->bo, priv->kms->aspace, &paddr); | 127 | ret = msm_gem_get_iova(fbdev->bo, priv->kms->aspace, &paddr); |
130 | if (ret) { | 128 | if (ret) { |
131 | dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret); | 129 | dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret); |
132 | goto fail_unlock; | 130 | goto fail_unlock; |
@@ -155,7 +153,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, | |||
155 | 153 | ||
156 | dev->mode_config.fb_base = paddr; | 154 | dev->mode_config.fb_base = paddr; |
157 | 155 | ||
158 | fbi->screen_base = msm_gem_get_vaddr_locked(fbdev->bo); | 156 | fbi->screen_base = msm_gem_get_vaddr(fbdev->bo); |
159 | if (IS_ERR(fbi->screen_base)) { | 157 | if (IS_ERR(fbi->screen_base)) { |
160 | ret = PTR_ERR(fbi->screen_base); | 158 | ret = PTR_ERR(fbi->screen_base); |
161 | goto fail_unlock; | 159 | goto fail_unlock; |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 9951c78ee215..65f35544c1ec 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -26,6 +26,9 @@ | |||
26 | #include "msm_gpu.h" | 26 | #include "msm_gpu.h" |
27 | #include "msm_mmu.h" | 27 | #include "msm_mmu.h" |
28 | 28 | ||
29 | static void msm_gem_vunmap_locked(struct drm_gem_object *obj); | ||
30 | |||
31 | |||
29 | static dma_addr_t physaddr(struct drm_gem_object *obj) | 32 | static dma_addr_t physaddr(struct drm_gem_object *obj) |
30 | { | 33 | { |
31 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 34 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
@@ -41,8 +44,7 @@ static bool use_pages(struct drm_gem_object *obj) | |||
41 | } | 44 | } |
42 | 45 | ||
43 | /* allocate pages from VRAM carveout, used when no IOMMU: */ | 46 | /* allocate pages from VRAM carveout, used when no IOMMU: */ |
44 | static struct page **get_pages_vram(struct drm_gem_object *obj, | 47 | static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) |
45 | int npages) | ||
46 | { | 48 | { |
47 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 49 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
48 | struct msm_drm_private *priv = obj->dev->dev_private; | 50 | struct msm_drm_private *priv = obj->dev->dev_private; |
@@ -54,7 +56,9 @@ static struct page **get_pages_vram(struct drm_gem_object *obj, | |||
54 | if (!p) | 56 | if (!p) |
55 | return ERR_PTR(-ENOMEM); | 57 | return ERR_PTR(-ENOMEM); |
56 | 58 | ||
59 | spin_lock(&priv->vram.lock); | ||
57 | ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); | 60 | ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); |
61 | spin_unlock(&priv->vram.lock); | ||
58 | if (ret) { | 62 | if (ret) { |
59 | kvfree(p); | 63 | kvfree(p); |
60 | return ERR_PTR(ret); | 64 | return ERR_PTR(ret); |
@@ -69,7 +73,6 @@ static struct page **get_pages_vram(struct drm_gem_object *obj, | |||
69 | return p; | 73 | return p; |
70 | } | 74 | } |
71 | 75 | ||
72 | /* called with dev->struct_mutex held */ | ||
73 | static struct page **get_pages(struct drm_gem_object *obj) | 76 | static struct page **get_pages(struct drm_gem_object *obj) |
74 | { | 77 | { |
75 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 78 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
@@ -109,6 +112,18 @@ static struct page **get_pages(struct drm_gem_object *obj) | |||
109 | return msm_obj->pages; | 112 | return msm_obj->pages; |
110 | } | 113 | } |
111 | 114 | ||
115 | static void put_pages_vram(struct drm_gem_object *obj) | ||
116 | { | ||
117 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
118 | struct msm_drm_private *priv = obj->dev->dev_private; | ||
119 | |||
120 | spin_lock(&priv->vram.lock); | ||
121 | drm_mm_remove_node(msm_obj->vram_node); | ||
122 | spin_unlock(&priv->vram.lock); | ||
123 | |||
124 | kvfree(msm_obj->pages); | ||
125 | } | ||
126 | |||
112 | static void put_pages(struct drm_gem_object *obj) | 127 | static void put_pages(struct drm_gem_object *obj) |
113 | { | 128 | { |
114 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 129 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
@@ -125,10 +140,8 @@ static void put_pages(struct drm_gem_object *obj) | |||
125 | 140 | ||
126 | if (use_pages(obj)) | 141 | if (use_pages(obj)) |
127 | drm_gem_put_pages(obj, msm_obj->pages, true, false); | 142 | drm_gem_put_pages(obj, msm_obj->pages, true, false); |
128 | else { | 143 | else |
129 | drm_mm_remove_node(msm_obj->vram_node); | 144 | put_pages_vram(obj); |
130 | kvfree(msm_obj->pages); | ||
131 | } | ||
132 | 145 | ||
133 | msm_obj->pages = NULL; | 146 | msm_obj->pages = NULL; |
134 | } | 147 | } |
@@ -136,11 +149,18 @@ static void put_pages(struct drm_gem_object *obj) | |||
136 | 149 | ||
137 | struct page **msm_gem_get_pages(struct drm_gem_object *obj) | 150 | struct page **msm_gem_get_pages(struct drm_gem_object *obj) |
138 | { | 151 | { |
139 | struct drm_device *dev = obj->dev; | 152 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
140 | struct page **p; | 153 | struct page **p; |
141 | mutex_lock(&dev->struct_mutex); | 154 | |
155 | mutex_lock(&msm_obj->lock); | ||
156 | |||
157 | if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { | ||
158 | mutex_unlock(&msm_obj->lock); | ||
159 | return ERR_PTR(-EBUSY); | ||
160 | } | ||
161 | |||
142 | p = get_pages(obj); | 162 | p = get_pages(obj); |
143 | mutex_unlock(&dev->struct_mutex); | 163 | mutex_unlock(&msm_obj->lock); |
144 | return p; | 164 | return p; |
145 | } | 165 | } |
146 | 166 | ||
@@ -195,28 +215,25 @@ int msm_gem_fault(struct vm_fault *vmf) | |||
195 | { | 215 | { |
196 | struct vm_area_struct *vma = vmf->vma; | 216 | struct vm_area_struct *vma = vmf->vma; |
197 | struct drm_gem_object *obj = vma->vm_private_data; | 217 | struct drm_gem_object *obj = vma->vm_private_data; |
198 | struct drm_device *dev = obj->dev; | 218 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
199 | struct msm_drm_private *priv = dev->dev_private; | ||
200 | struct page **pages; | 219 | struct page **pages; |
201 | unsigned long pfn; | 220 | unsigned long pfn; |
202 | pgoff_t pgoff; | 221 | pgoff_t pgoff; |
203 | int ret; | 222 | int ret; |
204 | 223 | ||
205 | /* This should only happen if userspace tries to pass a mmap'd | 224 | /* |
206 | * but unfaulted gem bo vaddr into submit ioctl, triggering | 225 | * vm_ops.open/drm_gem_mmap_obj and close get and put |
207 | * a page fault while struct_mutex is already held. This is | 226 | * a reference on obj. So, we dont need to hold one here. |
208 | * not a valid use-case so just bail. | ||
209 | */ | ||
210 | if (priv->struct_mutex_task == current) | ||
211 | return VM_FAULT_SIGBUS; | ||
212 | |||
213 | /* Make sure we don't parallel update on a fault, nor move or remove | ||
214 | * something from beneath our feet | ||
215 | */ | 227 | */ |
216 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 228 | ret = mutex_lock_interruptible(&msm_obj->lock); |
217 | if (ret) | 229 | if (ret) |
218 | goto out; | 230 | goto out; |
219 | 231 | ||
232 | if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { | ||
233 | mutex_unlock(&msm_obj->lock); | ||
234 | return VM_FAULT_SIGBUS; | ||
235 | } | ||
236 | |||
220 | /* make sure we have pages attached now */ | 237 | /* make sure we have pages attached now */ |
221 | pages = get_pages(obj); | 238 | pages = get_pages(obj); |
222 | if (IS_ERR(pages)) { | 239 | if (IS_ERR(pages)) { |
@@ -235,7 +252,7 @@ int msm_gem_fault(struct vm_fault *vmf) | |||
235 | ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); | 252 | ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); |
236 | 253 | ||
237 | out_unlock: | 254 | out_unlock: |
238 | mutex_unlock(&dev->struct_mutex); | 255 | mutex_unlock(&msm_obj->lock); |
239 | out: | 256 | out: |
240 | switch (ret) { | 257 | switch (ret) { |
241 | case -EAGAIN: | 258 | case -EAGAIN: |
@@ -259,9 +276,10 @@ out: | |||
259 | static uint64_t mmap_offset(struct drm_gem_object *obj) | 276 | static uint64_t mmap_offset(struct drm_gem_object *obj) |
260 | { | 277 | { |
261 | struct drm_device *dev = obj->dev; | 278 | struct drm_device *dev = obj->dev; |
279 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
262 | int ret; | 280 | int ret; |
263 | 281 | ||
264 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 282 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
265 | 283 | ||
266 | /* Make it mmapable */ | 284 | /* Make it mmapable */ |
267 | ret = drm_gem_create_mmap_offset(obj); | 285 | ret = drm_gem_create_mmap_offset(obj); |
@@ -277,9 +295,11 @@ static uint64_t mmap_offset(struct drm_gem_object *obj) | |||
277 | uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) | 295 | uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) |
278 | { | 296 | { |
279 | uint64_t offset; | 297 | uint64_t offset; |
280 | mutex_lock(&obj->dev->struct_mutex); | 298 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
299 | |||
300 | mutex_lock(&msm_obj->lock); | ||
281 | offset = mmap_offset(obj); | 301 | offset = mmap_offset(obj); |
282 | mutex_unlock(&obj->dev->struct_mutex); | 302 | mutex_unlock(&msm_obj->lock); |
283 | return offset; | 303 | return offset; |
284 | } | 304 | } |
285 | 305 | ||
@@ -289,6 +309,8 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, | |||
289 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 309 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
290 | struct msm_gem_vma *vma; | 310 | struct msm_gem_vma *vma; |
291 | 311 | ||
312 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); | ||
313 | |||
292 | vma = kzalloc(sizeof(*vma), GFP_KERNEL); | 314 | vma = kzalloc(sizeof(*vma), GFP_KERNEL); |
293 | if (!vma) | 315 | if (!vma) |
294 | return ERR_PTR(-ENOMEM); | 316 | return ERR_PTR(-ENOMEM); |
@@ -306,7 +328,7 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, | |||
306 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 328 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
307 | struct msm_gem_vma *vma; | 329 | struct msm_gem_vma *vma; |
308 | 330 | ||
309 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); | 331 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
310 | 332 | ||
311 | list_for_each_entry(vma, &msm_obj->vmas, list) { | 333 | list_for_each_entry(vma, &msm_obj->vmas, list) { |
312 | if (vma->aspace == aspace) | 334 | if (vma->aspace == aspace) |
@@ -325,13 +347,14 @@ static void del_vma(struct msm_gem_vma *vma) | |||
325 | kfree(vma); | 347 | kfree(vma); |
326 | } | 348 | } |
327 | 349 | ||
350 | /* Called with msm_obj->lock locked */ | ||
328 | static void | 351 | static void |
329 | put_iova(struct drm_gem_object *obj) | 352 | put_iova(struct drm_gem_object *obj) |
330 | { | 353 | { |
331 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 354 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
332 | struct msm_gem_vma *vma, *tmp; | 355 | struct msm_gem_vma *vma, *tmp; |
333 | 356 | ||
334 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); | 357 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
335 | 358 | ||
336 | list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { | 359 | list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { |
337 | msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt); | 360 | msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt); |
@@ -339,21 +362,20 @@ put_iova(struct drm_gem_object *obj) | |||
339 | } | 362 | } |
340 | } | 363 | } |
341 | 364 | ||
342 | /* should be called under struct_mutex.. although it can be called | 365 | /* get iova, taking a reference. Should have a matching put */ |
343 | * from atomic context without struct_mutex to acquire an extra | 366 | int msm_gem_get_iova(struct drm_gem_object *obj, |
344 | * iova ref if you know one is already held. | ||
345 | * | ||
346 | * That means when I do eventually need to add support for unpinning | ||
347 | * the refcnt counter needs to be atomic_t. | ||
348 | */ | ||
349 | int msm_gem_get_iova_locked(struct drm_gem_object *obj, | ||
350 | struct msm_gem_address_space *aspace, uint64_t *iova) | 367 | struct msm_gem_address_space *aspace, uint64_t *iova) |
351 | { | 368 | { |
352 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 369 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
353 | struct msm_gem_vma *vma; | 370 | struct msm_gem_vma *vma; |
354 | int ret = 0; | 371 | int ret = 0; |
355 | 372 | ||
356 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); | 373 | mutex_lock(&msm_obj->lock); |
374 | |||
375 | if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { | ||
376 | mutex_unlock(&msm_obj->lock); | ||
377 | return -EBUSY; | ||
378 | } | ||
357 | 379 | ||
358 | vma = lookup_vma(obj, aspace); | 380 | vma = lookup_vma(obj, aspace); |
359 | 381 | ||
@@ -377,24 +399,14 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, | |||
377 | } | 399 | } |
378 | 400 | ||
379 | *iova = vma->iova; | 401 | *iova = vma->iova; |
402 | |||
403 | mutex_unlock(&msm_obj->lock); | ||
380 | return 0; | 404 | return 0; |
381 | 405 | ||
382 | fail: | 406 | fail: |
383 | del_vma(vma); | 407 | del_vma(vma); |
384 | 408 | ||
385 | return ret; | 409 | mutex_unlock(&msm_obj->lock); |
386 | } | ||
387 | |||
388 | /* get iova, taking a reference. Should have a matching put */ | ||
389 | int msm_gem_get_iova(struct drm_gem_object *obj, | ||
390 | struct msm_gem_address_space *aspace, uint64_t *iova) | ||
391 | { | ||
392 | int ret; | ||
393 | |||
394 | mutex_lock(&obj->dev->struct_mutex); | ||
395 | ret = msm_gem_get_iova_locked(obj, aspace, iova); | ||
396 | mutex_unlock(&obj->dev->struct_mutex); | ||
397 | |||
398 | return ret; | 410 | return ret; |
399 | } | 411 | } |
400 | 412 | ||
@@ -404,11 +416,12 @@ int msm_gem_get_iova(struct drm_gem_object *obj, | |||
404 | uint64_t msm_gem_iova(struct drm_gem_object *obj, | 416 | uint64_t msm_gem_iova(struct drm_gem_object *obj, |
405 | struct msm_gem_address_space *aspace) | 417 | struct msm_gem_address_space *aspace) |
406 | { | 418 | { |
419 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
407 | struct msm_gem_vma *vma; | 420 | struct msm_gem_vma *vma; |
408 | 421 | ||
409 | mutex_lock(&obj->dev->struct_mutex); | 422 | mutex_lock(&msm_obj->lock); |
410 | vma = lookup_vma(obj, aspace); | 423 | vma = lookup_vma(obj, aspace); |
411 | mutex_unlock(&obj->dev->struct_mutex); | 424 | mutex_unlock(&msm_obj->lock); |
412 | WARN_ON(!vma); | 425 | WARN_ON(!vma); |
413 | 426 | ||
414 | return vma ? vma->iova : 0; | 427 | return vma ? vma->iova : 0; |
@@ -455,45 +468,57 @@ fail: | |||
455 | return ret; | 468 | return ret; |
456 | } | 469 | } |
457 | 470 | ||
458 | void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) | 471 | void *msm_gem_get_vaddr(struct drm_gem_object *obj) |
459 | { | 472 | { |
460 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 473 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
461 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); | 474 | int ret = 0; |
475 | |||
476 | mutex_lock(&msm_obj->lock); | ||
477 | |||
478 | if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { | ||
479 | mutex_unlock(&msm_obj->lock); | ||
480 | return ERR_PTR(-EBUSY); | ||
481 | } | ||
482 | |||
483 | /* increment vmap_count *before* vmap() call, so shrinker can | ||
484 | * check vmap_count (is_vunmapable()) outside of msm_obj->lock. | ||
485 | * This guarantees that we won't try to msm_gem_vunmap() this | ||
486 | * same object from within the vmap() call (while we already | ||
487 | * hold msm_obj->lock) | ||
488 | */ | ||
489 | msm_obj->vmap_count++; | ||
490 | |||
462 | if (!msm_obj->vaddr) { | 491 | if (!msm_obj->vaddr) { |
463 | struct page **pages = get_pages(obj); | 492 | struct page **pages = get_pages(obj); |
464 | if (IS_ERR(pages)) | 493 | if (IS_ERR(pages)) { |
465 | return ERR_CAST(pages); | 494 | ret = PTR_ERR(pages); |
495 | goto fail; | ||
496 | } | ||
466 | msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, | 497 | msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, |
467 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); | 498 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); |
468 | if (msm_obj->vaddr == NULL) | 499 | if (msm_obj->vaddr == NULL) { |
469 | return ERR_PTR(-ENOMEM); | 500 | ret = -ENOMEM; |
501 | goto fail; | ||
502 | } | ||
470 | } | 503 | } |
471 | msm_obj->vmap_count++; | 504 | |
505 | mutex_unlock(&msm_obj->lock); | ||
472 | return msm_obj->vaddr; | 506 | return msm_obj->vaddr; |
473 | } | ||
474 | 507 | ||
475 | void *msm_gem_get_vaddr(struct drm_gem_object *obj) | 508 | fail: |
476 | { | 509 | msm_obj->vmap_count--; |
477 | void *ret; | 510 | mutex_unlock(&msm_obj->lock); |
478 | mutex_lock(&obj->dev->struct_mutex); | 511 | return ERR_PTR(ret); |
479 | ret = msm_gem_get_vaddr_locked(obj); | ||
480 | mutex_unlock(&obj->dev->struct_mutex); | ||
481 | return ret; | ||
482 | } | 512 | } |
483 | 513 | ||
484 | void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) | 514 | void msm_gem_put_vaddr(struct drm_gem_object *obj) |
485 | { | 515 | { |
486 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 516 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
487 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); | 517 | |
518 | mutex_lock(&msm_obj->lock); | ||
488 | WARN_ON(msm_obj->vmap_count < 1); | 519 | WARN_ON(msm_obj->vmap_count < 1); |
489 | msm_obj->vmap_count--; | 520 | msm_obj->vmap_count--; |
490 | } | 521 | mutex_unlock(&msm_obj->lock); |
491 | |||
492 | void msm_gem_put_vaddr(struct drm_gem_object *obj) | ||
493 | { | ||
494 | mutex_lock(&obj->dev->struct_mutex); | ||
495 | msm_gem_put_vaddr_locked(obj); | ||
496 | mutex_unlock(&obj->dev->struct_mutex); | ||
497 | } | 522 | } |
498 | 523 | ||
499 | /* Update madvise status, returns true if not purged, else | 524 | /* Update madvise status, returns true if not purged, else |
@@ -503,15 +528,21 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) | |||
503 | { | 528 | { |
504 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 529 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
505 | 530 | ||
531 | mutex_lock(&msm_obj->lock); | ||
532 | |||
506 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); | 533 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
507 | 534 | ||
508 | if (msm_obj->madv != __MSM_MADV_PURGED) | 535 | if (msm_obj->madv != __MSM_MADV_PURGED) |
509 | msm_obj->madv = madv; | 536 | msm_obj->madv = madv; |
510 | 537 | ||
511 | return (msm_obj->madv != __MSM_MADV_PURGED); | 538 | madv = msm_obj->madv; |
539 | |||
540 | mutex_unlock(&msm_obj->lock); | ||
541 | |||
542 | return (madv != __MSM_MADV_PURGED); | ||
512 | } | 543 | } |
513 | 544 | ||
514 | void msm_gem_purge(struct drm_gem_object *obj) | 545 | void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass) |
515 | { | 546 | { |
516 | struct drm_device *dev = obj->dev; | 547 | struct drm_device *dev = obj->dev; |
517 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 548 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
@@ -520,9 +551,11 @@ void msm_gem_purge(struct drm_gem_object *obj) | |||
520 | WARN_ON(!is_purgeable(msm_obj)); | 551 | WARN_ON(!is_purgeable(msm_obj)); |
521 | WARN_ON(obj->import_attach); | 552 | WARN_ON(obj->import_attach); |
522 | 553 | ||
554 | mutex_lock_nested(&msm_obj->lock, subclass); | ||
555 | |||
523 | put_iova(obj); | 556 | put_iova(obj); |
524 | 557 | ||
525 | msm_gem_vunmap(obj); | 558 | msm_gem_vunmap_locked(obj); |
526 | 559 | ||
527 | put_pages(obj); | 560 | put_pages(obj); |
528 | 561 | ||
@@ -540,12 +573,16 @@ void msm_gem_purge(struct drm_gem_object *obj) | |||
540 | 573 | ||
541 | invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, | 574 | invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, |
542 | 0, (loff_t)-1); | 575 | 0, (loff_t)-1); |
576 | |||
577 | mutex_unlock(&msm_obj->lock); | ||
543 | } | 578 | } |
544 | 579 | ||
545 | void msm_gem_vunmap(struct drm_gem_object *obj) | 580 | static void msm_gem_vunmap_locked(struct drm_gem_object *obj) |
546 | { | 581 | { |
547 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 582 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
548 | 583 | ||
584 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); | ||
585 | |||
549 | if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) | 586 | if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) |
550 | return; | 587 | return; |
551 | 588 | ||
@@ -553,6 +590,15 @@ void msm_gem_vunmap(struct drm_gem_object *obj) | |||
553 | msm_obj->vaddr = NULL; | 590 | msm_obj->vaddr = NULL; |
554 | } | 591 | } |
555 | 592 | ||
593 | void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass) | ||
594 | { | ||
595 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
596 | |||
597 | mutex_lock_nested(&msm_obj->lock, subclass); | ||
598 | msm_gem_vunmap_locked(obj); | ||
599 | mutex_unlock(&msm_obj->lock); | ||
600 | } | ||
601 | |||
556 | /* must be called before _move_to_active().. */ | 602 | /* must be called before _move_to_active().. */ |
557 | int msm_gem_sync_object(struct drm_gem_object *obj, | 603 | int msm_gem_sync_object(struct drm_gem_object *obj, |
558 | struct msm_fence_context *fctx, bool exclusive) | 604 | struct msm_fence_context *fctx, bool exclusive) |
@@ -674,7 +720,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) | |||
674 | uint64_t off = drm_vma_node_start(&obj->vma_node); | 720 | uint64_t off = drm_vma_node_start(&obj->vma_node); |
675 | const char *madv; | 721 | const char *madv; |
676 | 722 | ||
677 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); | 723 | mutex_lock(&msm_obj->lock); |
678 | 724 | ||
679 | switch (msm_obj->madv) { | 725 | switch (msm_obj->madv) { |
680 | case __MSM_MADV_PURGED: | 726 | case __MSM_MADV_PURGED: |
@@ -715,6 +761,8 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) | |||
715 | if (fence) | 761 | if (fence) |
716 | describe_fence(fence, "Exclusive", m); | 762 | describe_fence(fence, "Exclusive", m); |
717 | rcu_read_unlock(); | 763 | rcu_read_unlock(); |
764 | |||
765 | mutex_unlock(&msm_obj->lock); | ||
718 | } | 766 | } |
719 | 767 | ||
720 | void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) | 768 | void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) |
@@ -747,6 +795,8 @@ void msm_gem_free_object(struct drm_gem_object *obj) | |||
747 | 795 | ||
748 | list_del(&msm_obj->mm_list); | 796 | list_del(&msm_obj->mm_list); |
749 | 797 | ||
798 | mutex_lock(&msm_obj->lock); | ||
799 | |||
750 | put_iova(obj); | 800 | put_iova(obj); |
751 | 801 | ||
752 | if (obj->import_attach) { | 802 | if (obj->import_attach) { |
@@ -761,7 +811,7 @@ void msm_gem_free_object(struct drm_gem_object *obj) | |||
761 | 811 | ||
762 | drm_prime_gem_destroy(obj, msm_obj->sgt); | 812 | drm_prime_gem_destroy(obj, msm_obj->sgt); |
763 | } else { | 813 | } else { |
764 | msm_gem_vunmap(obj); | 814 | msm_gem_vunmap_locked(obj); |
765 | put_pages(obj); | 815 | put_pages(obj); |
766 | } | 816 | } |
767 | 817 | ||
@@ -770,6 +820,7 @@ void msm_gem_free_object(struct drm_gem_object *obj) | |||
770 | 820 | ||
771 | drm_gem_object_release(obj); | 821 | drm_gem_object_release(obj); |
772 | 822 | ||
823 | mutex_unlock(&msm_obj->lock); | ||
773 | kfree(msm_obj); | 824 | kfree(msm_obj); |
774 | } | 825 | } |
775 | 826 | ||
@@ -780,14 +831,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, | |||
780 | struct drm_gem_object *obj; | 831 | struct drm_gem_object *obj; |
781 | int ret; | 832 | int ret; |
782 | 833 | ||
783 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
784 | if (ret) | ||
785 | return ret; | ||
786 | |||
787 | obj = msm_gem_new(dev, size, flags); | 834 | obj = msm_gem_new(dev, size, flags); |
788 | 835 | ||
789 | mutex_unlock(&dev->struct_mutex); | ||
790 | |||
791 | if (IS_ERR(obj)) | 836 | if (IS_ERR(obj)) |
792 | return PTR_ERR(obj); | 837 | return PTR_ERR(obj); |
793 | 838 | ||
@@ -802,13 +847,12 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, | |||
802 | static int msm_gem_new_impl(struct drm_device *dev, | 847 | static int msm_gem_new_impl(struct drm_device *dev, |
803 | uint32_t size, uint32_t flags, | 848 | uint32_t size, uint32_t flags, |
804 | struct reservation_object *resv, | 849 | struct reservation_object *resv, |
805 | struct drm_gem_object **obj) | 850 | struct drm_gem_object **obj, |
851 | bool struct_mutex_locked) | ||
806 | { | 852 | { |
807 | struct msm_drm_private *priv = dev->dev_private; | 853 | struct msm_drm_private *priv = dev->dev_private; |
808 | struct msm_gem_object *msm_obj; | 854 | struct msm_gem_object *msm_obj; |
809 | 855 | ||
810 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
811 | |||
812 | switch (flags & MSM_BO_CACHE_MASK) { | 856 | switch (flags & MSM_BO_CACHE_MASK) { |
813 | case MSM_BO_UNCACHED: | 857 | case MSM_BO_UNCACHED: |
814 | case MSM_BO_CACHED: | 858 | case MSM_BO_CACHED: |
@@ -824,6 +868,8 @@ static int msm_gem_new_impl(struct drm_device *dev, | |||
824 | if (!msm_obj) | 868 | if (!msm_obj) |
825 | return -ENOMEM; | 869 | return -ENOMEM; |
826 | 870 | ||
871 | mutex_init(&msm_obj->lock); | ||
872 | |||
827 | msm_obj->flags = flags; | 873 | msm_obj->flags = flags; |
828 | msm_obj->madv = MSM_MADV_WILLNEED; | 874 | msm_obj->madv = MSM_MADV_WILLNEED; |
829 | 875 | ||
@@ -837,23 +883,28 @@ static int msm_gem_new_impl(struct drm_device *dev, | |||
837 | INIT_LIST_HEAD(&msm_obj->submit_entry); | 883 | INIT_LIST_HEAD(&msm_obj->submit_entry); |
838 | INIT_LIST_HEAD(&msm_obj->vmas); | 884 | INIT_LIST_HEAD(&msm_obj->vmas); |
839 | 885 | ||
840 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | 886 | if (struct_mutex_locked) { |
887 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
888 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | ||
889 | } else { | ||
890 | mutex_lock(&dev->struct_mutex); | ||
891 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | ||
892 | mutex_unlock(&dev->struct_mutex); | ||
893 | } | ||
841 | 894 | ||
842 | *obj = &msm_obj->base; | 895 | *obj = &msm_obj->base; |
843 | 896 | ||
844 | return 0; | 897 | return 0; |
845 | } | 898 | } |
846 | 899 | ||
847 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, | 900 | static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, |
848 | uint32_t size, uint32_t flags) | 901 | uint32_t size, uint32_t flags, bool struct_mutex_locked) |
849 | { | 902 | { |
850 | struct msm_drm_private *priv = dev->dev_private; | 903 | struct msm_drm_private *priv = dev->dev_private; |
851 | struct drm_gem_object *obj = NULL; | 904 | struct drm_gem_object *obj = NULL; |
852 | bool use_vram = false; | 905 | bool use_vram = false; |
853 | int ret; | 906 | int ret; |
854 | 907 | ||
855 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
856 | |||
857 | size = PAGE_ALIGN(size); | 908 | size = PAGE_ALIGN(size); |
858 | 909 | ||
859 | if (!iommu_present(&platform_bus_type)) | 910 | if (!iommu_present(&platform_bus_type)) |
@@ -870,7 +921,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, | |||
870 | if (size == 0) | 921 | if (size == 0) |
871 | return ERR_PTR(-EINVAL); | 922 | return ERR_PTR(-EINVAL); |
872 | 923 | ||
873 | ret = msm_gem_new_impl(dev, size, flags, NULL, &obj); | 924 | ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked); |
874 | if (ret) | 925 | if (ret) |
875 | goto fail; | 926 | goto fail; |
876 | 927 | ||
@@ -904,10 +955,22 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, | |||
904 | return obj; | 955 | return obj; |
905 | 956 | ||
906 | fail: | 957 | fail: |
907 | drm_gem_object_unreference(obj); | 958 | drm_gem_object_unreference_unlocked(obj); |
908 | return ERR_PTR(ret); | 959 | return ERR_PTR(ret); |
909 | } | 960 | } |
910 | 961 | ||
962 | struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, | ||
963 | uint32_t size, uint32_t flags) | ||
964 | { | ||
965 | return _msm_gem_new(dev, size, flags, true); | ||
966 | } | ||
967 | |||
968 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, | ||
969 | uint32_t size, uint32_t flags) | ||
970 | { | ||
971 | return _msm_gem_new(dev, size, flags, false); | ||
972 | } | ||
973 | |||
911 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, | 974 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, |
912 | struct dma_buf *dmabuf, struct sg_table *sgt) | 975 | struct dma_buf *dmabuf, struct sg_table *sgt) |
913 | { | 976 | { |
@@ -924,11 +987,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, | |||
924 | 987 | ||
925 | size = PAGE_ALIGN(dmabuf->size); | 988 | size = PAGE_ALIGN(dmabuf->size); |
926 | 989 | ||
927 | /* Take mutex so we can modify the inactive list in msm_gem_new_impl */ | 990 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false); |
928 | mutex_lock(&dev->struct_mutex); | ||
929 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); | ||
930 | mutex_unlock(&dev->struct_mutex); | ||
931 | |||
932 | if (ret) | 991 | if (ret) |
933 | goto fail; | 992 | goto fail; |
934 | 993 | ||
@@ -937,17 +996,22 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, | |||
937 | npages = size / PAGE_SIZE; | 996 | npages = size / PAGE_SIZE; |
938 | 997 | ||
939 | msm_obj = to_msm_bo(obj); | 998 | msm_obj = to_msm_bo(obj); |
999 | mutex_lock(&msm_obj->lock); | ||
940 | msm_obj->sgt = sgt; | 1000 | msm_obj->sgt = sgt; |
941 | msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); | 1001 | msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); |
942 | if (!msm_obj->pages) { | 1002 | if (!msm_obj->pages) { |
1003 | mutex_unlock(&msm_obj->lock); | ||
943 | ret = -ENOMEM; | 1004 | ret = -ENOMEM; |
944 | goto fail; | 1005 | goto fail; |
945 | } | 1006 | } |
946 | 1007 | ||
947 | ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); | 1008 | ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); |
948 | if (ret) | 1009 | if (ret) { |
1010 | mutex_unlock(&msm_obj->lock); | ||
949 | goto fail; | 1011 | goto fail; |
1012 | } | ||
950 | 1013 | ||
1014 | mutex_unlock(&msm_obj->lock); | ||
951 | return obj; | 1015 | return obj; |
952 | 1016 | ||
953 | fail: | 1017 | fail: |
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 112eb63b5908..91c210d2359c 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h | |||
@@ -31,6 +31,7 @@ struct msm_gem_address_space { | |||
31 | * and position mm_node->start is in # of pages: | 31 | * and position mm_node->start is in # of pages: |
32 | */ | 32 | */ |
33 | struct drm_mm mm; | 33 | struct drm_mm mm; |
34 | spinlock_t lock; /* Protects drm_mm node allocation/removal */ | ||
34 | struct msm_mmu *mmu; | 35 | struct msm_mmu *mmu; |
35 | struct kref kref; | 36 | struct kref kref; |
36 | }; | 37 | }; |
@@ -89,6 +90,7 @@ struct msm_gem_object { | |||
89 | * an IOMMU. Also used for stolen/splashscreen buffer. | 90 | * an IOMMU. Also used for stolen/splashscreen buffer. |
90 | */ | 91 | */ |
91 | struct drm_mm_node *vram_node; | 92 | struct drm_mm_node *vram_node; |
93 | struct mutex lock; /* Protects resources associated with bo */ | ||
92 | }; | 94 | }; |
93 | #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) | 95 | #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) |
94 | 96 | ||
@@ -99,6 +101,7 @@ static inline bool is_active(struct msm_gem_object *msm_obj) | |||
99 | 101 | ||
100 | static inline bool is_purgeable(struct msm_gem_object *msm_obj) | 102 | static inline bool is_purgeable(struct msm_gem_object *msm_obj) |
101 | { | 103 | { |
104 | WARN_ON(!mutex_is_locked(&msm_obj->base.dev->struct_mutex)); | ||
102 | return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt && | 105 | return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt && |
103 | !msm_obj->base.dma_buf && !msm_obj->base.import_attach; | 106 | !msm_obj->base.dma_buf && !msm_obj->base.import_attach; |
104 | } | 107 | } |
@@ -108,6 +111,25 @@ static inline bool is_vunmapable(struct msm_gem_object *msm_obj) | |||
108 | return (msm_obj->vmap_count == 0) && msm_obj->vaddr; | 111 | return (msm_obj->vmap_count == 0) && msm_obj->vaddr; |
109 | } | 112 | } |
110 | 113 | ||
114 | /* The shrinker can be triggered while we hold objA->lock, and need | ||
115 | * to grab objB->lock to purge it. Lockdep just sees these as a single | ||
116 | * class of lock, so we use subclasses to teach it the difference. | ||
117 | * | ||
118 | * OBJ_LOCK_NORMAL is implicit (ie. normal mutex_lock() call), and | ||
119 | * OBJ_LOCK_SHRINKER is used by shrinker. | ||
120 | * | ||
121 | * It is *essential* that we never go down paths that could trigger the | ||
122 | * shrinker for a purgable object. This is ensured by checking that | ||
123 | * msm_obj->madv == MSM_MADV_WILLNEED. | ||
124 | */ | ||
125 | enum msm_gem_lock { | ||
126 | OBJ_LOCK_NORMAL, | ||
127 | OBJ_LOCK_SHRINKER, | ||
128 | }; | ||
129 | |||
130 | void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass); | ||
131 | void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass); | ||
132 | |||
111 | /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, | 133 | /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, |
112 | * associated with the cmdstream submission for synchronization (and | 134 | * associated with the cmdstream submission for synchronization (and |
113 | * make it easier to unwind when things go wrong, etc). This only | 135 | * make it easier to unwind when things go wrong, etc). This only |
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index ab1dd020eb04..b72d8e6cd51d 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c | |||
@@ -20,6 +20,18 @@ | |||
20 | 20 | ||
21 | static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock) | 21 | static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock) |
22 | { | 22 | { |
23 | /* NOTE: we are *closer* to being able to get rid of | ||
24 | * mutex_trylock_recursive().. the msm_gem code itself does | ||
25 | * not need struct_mutex, although codepaths that can trigger | ||
26 | * shrinker are still called in code-paths that hold the | ||
27 | * struct_mutex. | ||
28 | * | ||
29 | * Also, msm_obj->madv is protected by struct_mutex. | ||
30 | * | ||
31 | * The next step is probably split out a seperate lock for | ||
32 | * protecting inactive_list, so that shrinker does not need | ||
33 | * struct_mutex. | ||
34 | */ | ||
23 | switch (mutex_trylock_recursive(&dev->struct_mutex)) { | 35 | switch (mutex_trylock_recursive(&dev->struct_mutex)) { |
24 | case MUTEX_TRYLOCK_FAILED: | 36 | case MUTEX_TRYLOCK_FAILED: |
25 | return false; | 37 | return false; |
@@ -77,7 +89,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) | |||
77 | if (freed >= sc->nr_to_scan) | 89 | if (freed >= sc->nr_to_scan) |
78 | break; | 90 | break; |
79 | if (is_purgeable(msm_obj)) { | 91 | if (is_purgeable(msm_obj)) { |
80 | msm_gem_purge(&msm_obj->base); | 92 | msm_gem_purge(&msm_obj->base, OBJ_LOCK_SHRINKER); |
81 | freed += msm_obj->base.size >> PAGE_SHIFT; | 93 | freed += msm_obj->base.size >> PAGE_SHIFT; |
82 | } | 94 | } |
83 | } | 95 | } |
@@ -106,7 +118,7 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) | |||
106 | 118 | ||
107 | list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) { | 119 | list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) { |
108 | if (is_vunmapable(msm_obj)) { | 120 | if (is_vunmapable(msm_obj)) { |
109 | msm_gem_vunmap(&msm_obj->base); | 121 | msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER); |
110 | /* since we don't know any better, lets bail after a few | 122 | /* since we don't know any better, lets bail after a few |
111 | * and if necessary the shrinker will be invoked again. | 123 | * and if necessary the shrinker will be invoked again. |
112 | * Seems better than unmapping *everything* | 124 | * Seems better than unmapping *everything* |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index c8d01df993da..179cfc60b6ca 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
@@ -245,7 +245,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit) | |||
245 | uint64_t iova; | 245 | uint64_t iova; |
246 | 246 | ||
247 | /* if locking succeeded, pin bo: */ | 247 | /* if locking succeeded, pin bo: */ |
248 | ret = msm_gem_get_iova_locked(&msm_obj->base, | 248 | ret = msm_gem_get_iova(&msm_obj->base, |
249 | submit->gpu->aspace, &iova); | 249 | submit->gpu->aspace, &iova); |
250 | 250 | ||
251 | if (ret) | 251 | if (ret) |
@@ -301,7 +301,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob | |||
301 | /* For now, just map the entire thing. Eventually we probably | 301 | /* For now, just map the entire thing. Eventually we probably |
302 | * to do it page-by-page, w/ kmap() if not vmap()d.. | 302 | * to do it page-by-page, w/ kmap() if not vmap()d.. |
303 | */ | 303 | */ |
304 | ptr = msm_gem_get_vaddr_locked(&obj->base); | 304 | ptr = msm_gem_get_vaddr(&obj->base); |
305 | 305 | ||
306 | if (IS_ERR(ptr)) { | 306 | if (IS_ERR(ptr)) { |
307 | ret = PTR_ERR(ptr); | 307 | ret = PTR_ERR(ptr); |
@@ -359,7 +359,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob | |||
359 | } | 359 | } |
360 | 360 | ||
361 | out: | 361 | out: |
362 | msm_gem_put_vaddr_locked(&obj->base); | 362 | msm_gem_put_vaddr(&obj->base); |
363 | 363 | ||
364 | return ret; | 364 | return ret; |
365 | } | 365 | } |
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index f285d7e210db..c36321bc8714 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c | |||
@@ -50,7 +50,9 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace, | |||
50 | aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size); | 50 | aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size); |
51 | } | 51 | } |
52 | 52 | ||
53 | spin_lock(&aspace->lock); | ||
53 | drm_mm_remove_node(&vma->node); | 54 | drm_mm_remove_node(&vma->node); |
55 | spin_unlock(&aspace->lock); | ||
54 | 56 | ||
55 | vma->iova = 0; | 57 | vma->iova = 0; |
56 | 58 | ||
@@ -63,10 +65,15 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace, | |||
63 | { | 65 | { |
64 | int ret; | 66 | int ret; |
65 | 67 | ||
66 | if (WARN_ON(drm_mm_node_allocated(&vma->node))) | 68 | spin_lock(&aspace->lock); |
69 | if (WARN_ON(drm_mm_node_allocated(&vma->node))) { | ||
70 | spin_unlock(&aspace->lock); | ||
67 | return 0; | 71 | return 0; |
72 | } | ||
68 | 73 | ||
69 | ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages); | 74 | ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages); |
75 | spin_unlock(&aspace->lock); | ||
76 | |||
70 | if (ret) | 77 | if (ret) |
71 | return ret; | 78 | return ret; |
72 | 79 | ||
@@ -94,6 +101,7 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, | |||
94 | if (!aspace) | 101 | if (!aspace) |
95 | return ERR_PTR(-ENOMEM); | 102 | return ERR_PTR(-ENOMEM); |
96 | 103 | ||
104 | spin_lock_init(&aspace->lock); | ||
97 | aspace->name = name; | 105 | aspace->name = name; |
98 | aspace->mmu = msm_iommu_new(dev, domain); | 106 | aspace->mmu = msm_iommu_new(dev, domain); |
99 | 107 | ||
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 2d5c9afbcdbe..9f3dbc236ab3 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
@@ -497,7 +497,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, | |||
497 | 497 | ||
498 | /* submit takes a reference to the bo and iova until retired: */ | 498 | /* submit takes a reference to the bo and iova until retired: */ |
499 | drm_gem_object_reference(&msm_obj->base); | 499 | drm_gem_object_reference(&msm_obj->base); |
500 | msm_gem_get_iova_locked(&msm_obj->base, | 500 | msm_gem_get_iova(&msm_obj->base, |
501 | submit->gpu->aspace, &iova); | 501 | submit->gpu->aspace, &iova); |
502 | 502 | ||
503 | if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) | 503 | if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) |
@@ -661,9 +661,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
661 | } | 661 | } |
662 | 662 | ||
663 | /* Create ringbuffer: */ | 663 | /* Create ringbuffer: */ |
664 | mutex_lock(&drm->struct_mutex); | ||
665 | gpu->rb = msm_ringbuffer_new(gpu, config->ringsz); | 664 | gpu->rb = msm_ringbuffer_new(gpu, config->ringsz); |
666 | mutex_unlock(&drm->struct_mutex); | ||
667 | if (IS_ERR(gpu->rb)) { | 665 | if (IS_ERR(gpu->rb)) { |
668 | ret = PTR_ERR(gpu->rb); | 666 | ret = PTR_ERR(gpu->rb); |
669 | gpu->rb = NULL; | 667 | gpu->rb = NULL; |
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index 0e81faab2c50..0366b8092f97 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c | |||
@@ -268,7 +268,7 @@ static void snapshot_buf(struct msm_rd_state *rd, | |||
268 | struct msm_gem_object *obj = submit->bos[idx].obj; | 268 | struct msm_gem_object *obj = submit->bos[idx].obj; |
269 | const char *buf; | 269 | const char *buf; |
270 | 270 | ||
271 | buf = msm_gem_get_vaddr_locked(&obj->base); | 271 | buf = msm_gem_get_vaddr(&obj->base); |
272 | if (IS_ERR(buf)) | 272 | if (IS_ERR(buf)) |
273 | return; | 273 | return; |
274 | 274 | ||
@@ -283,7 +283,7 @@ static void snapshot_buf(struct msm_rd_state *rd, | |||
283 | (uint32_t[3]){ iova, size, iova >> 32 }, 12); | 283 | (uint32_t[3]){ iova, size, iova >> 32 }, 12); |
284 | rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size); | 284 | rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size); |
285 | 285 | ||
286 | msm_gem_put_vaddr_locked(&obj->base); | 286 | msm_gem_put_vaddr(&obj->base); |
287 | } | 287 | } |
288 | 288 | ||
289 | /* called under struct_mutex */ | 289 | /* called under struct_mutex */ |
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 67b34e069abf..791bca3c6a9c 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c | |||
@@ -40,7 +40,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) | |||
40 | goto fail; | 40 | goto fail; |
41 | } | 41 | } |
42 | 42 | ||
43 | ring->start = msm_gem_get_vaddr_locked(ring->bo); | 43 | ring->start = msm_gem_get_vaddr(ring->bo); |
44 | if (IS_ERR(ring->start)) { | 44 | if (IS_ERR(ring->start)) { |
45 | ret = PTR_ERR(ring->start); | 45 | ret = PTR_ERR(ring->start); |
46 | goto fail; | 46 | goto fail; |