diff options
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c | 19 |
1 files changed, 8 insertions, 11 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c index 9dec58ec3d9f..cd5adbec5e57 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c | |||
@@ -94,7 +94,7 @@ struct gk20a_instmem { | |||
94 | struct nvkm_instmem base; | 94 | struct nvkm_instmem base; |
95 | 95 | ||
96 | /* protects vaddr_* and gk20a_instobj::vaddr* */ | 96 | /* protects vaddr_* and gk20a_instobj::vaddr* */ |
97 | spinlock_t lock; | 97 | struct mutex lock; |
98 | 98 | ||
99 | /* CPU mappings LRU */ | 99 | /* CPU mappings LRU */ |
100 | unsigned int vaddr_use; | 100 | unsigned int vaddr_use; |
@@ -184,11 +184,10 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory) | |||
184 | struct gk20a_instmem *imem = node->base.imem; | 184 | struct gk20a_instmem *imem = node->base.imem; |
185 | struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; | 185 | struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; |
186 | const u64 size = nvkm_memory_size(memory); | 186 | const u64 size = nvkm_memory_size(memory); |
187 | unsigned long flags; | ||
188 | 187 | ||
189 | nvkm_ltc_flush(ltc); | 188 | nvkm_ltc_flush(ltc); |
190 | 189 | ||
191 | spin_lock_irqsave(&imem->lock, flags); | 190 | mutex_lock(&imem->lock); |
192 | 191 | ||
193 | if (node->base.vaddr) { | 192 | if (node->base.vaddr) { |
194 | if (!node->use_cpt) { | 193 | if (!node->use_cpt) { |
@@ -216,7 +215,7 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory) | |||
216 | 215 | ||
217 | out: | 216 | out: |
218 | node->use_cpt++; | 217 | node->use_cpt++; |
219 | spin_unlock_irqrestore(&imem->lock, flags); | 218 | mutex_unlock(&imem->lock); |
220 | 219 | ||
221 | return node->base.vaddr; | 220 | return node->base.vaddr; |
222 | } | 221 | } |
@@ -239,9 +238,8 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory) | |||
239 | struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); | 238 | struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); |
240 | struct gk20a_instmem *imem = node->base.imem; | 239 | struct gk20a_instmem *imem = node->base.imem; |
241 | struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; | 240 | struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; |
242 | unsigned long flags; | ||
243 | 241 | ||
244 | spin_lock_irqsave(&imem->lock, flags); | 242 | mutex_lock(&imem->lock); |
245 | 243 | ||
246 | /* we should at least have one user to release... */ | 244 | /* we should at least have one user to release... */ |
247 | if (WARN_ON(node->use_cpt == 0)) | 245 | if (WARN_ON(node->use_cpt == 0)) |
@@ -252,7 +250,7 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory) | |||
252 | list_add_tail(&node->vaddr_node, &imem->vaddr_lru); | 250 | list_add_tail(&node->vaddr_node, &imem->vaddr_lru); |
253 | 251 | ||
254 | out: | 252 | out: |
255 | spin_unlock_irqrestore(&imem->lock, flags); | 253 | mutex_unlock(&imem->lock); |
256 | 254 | ||
257 | wmb(); | 255 | wmb(); |
258 | nvkm_ltc_invalidate(ltc); | 256 | nvkm_ltc_invalidate(ltc); |
@@ -306,19 +304,18 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory) | |||
306 | struct gk20a_instmem *imem = node->base.imem; | 304 | struct gk20a_instmem *imem = node->base.imem; |
307 | struct device *dev = imem->base.subdev.device->dev; | 305 | struct device *dev = imem->base.subdev.device->dev; |
308 | struct nvkm_mm_node *r = node->base.mem.mem; | 306 | struct nvkm_mm_node *r = node->base.mem.mem; |
309 | unsigned long flags; | ||
310 | int i; | 307 | int i; |
311 | 308 | ||
312 | if (unlikely(!r)) | 309 | if (unlikely(!r)) |
313 | goto out; | 310 | goto out; |
314 | 311 | ||
315 | spin_lock_irqsave(&imem->lock, flags); | 312 | mutex_lock(&imem->lock); |
316 | 313 | ||
317 | /* vaddr has already been recycled */ | 314 | /* vaddr has already been recycled */ |
318 | if (node->base.vaddr) | 315 | if (node->base.vaddr) |
319 | gk20a_instobj_iommu_recycle_vaddr(node); | 316 | gk20a_instobj_iommu_recycle_vaddr(node); |
320 | 317 | ||
321 | spin_unlock_irqrestore(&imem->lock, flags); | 318 | mutex_unlock(&imem->lock); |
322 | 319 | ||
323 | /* clear IOMMU bit to unmap pages */ | 320 | /* clear IOMMU bit to unmap pages */ |
324 | r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift); | 321 | r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift); |
@@ -571,7 +568,7 @@ gk20a_instmem_new(struct nvkm_device *device, int index, | |||
571 | if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) | 568 | if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) |
572 | return -ENOMEM; | 569 | return -ENOMEM; |
573 | nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base); | 570 | nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base); |
574 | spin_lock_init(&imem->lock); | 571 | mutex_init(&imem->lock); |
575 | *pimem = &imem->base; | 572 | *pimem = &imem->base; |
576 | 573 | ||
577 | /* do not allow more than 1MB of CPU-mapped instmem */ | 574 | /* do not allow more than 1MB of CPU-mapped instmem */ |