aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2013-05-13 07:13:15 -0400
committerBen Skeggs <bskeggs@redhat.com>2013-06-30 23:44:50 -0400
commitca97a36698ca3f76d3cee542e69dcf1b66210b0c (patch)
tree059dbba049c0be7cf335e701480bab416f9adeba
parent464d636bd0a7a905209816d1dee0838ccb79e57a (diff)
drm/nv50-/vm: take mutex rather than irqsave spinlock
These operations can take quite some time, and we really don't want to have to hold a spinlock for too long. Now that the lock ordering for vm and the gr/nv84 hw bug workaround has been reversed, it's possible to use a mutex here. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c7
2 files changed, 4 insertions, 10 deletions
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
index 966e61434c7a..50c66122cc89 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
@@ -31,7 +31,6 @@
31 31
32struct nv50_vmmgr_priv { 32struct nv50_vmmgr_priv {
33 struct nouveau_vmmgr base; 33 struct nouveau_vmmgr base;
34 spinlock_t lock;
35}; 34};
36 35
37static void 36static void
@@ -153,10 +152,9 @@ nv50_vm_flush(struct nouveau_vm *vm)
153{ 152{
154 struct nv50_vmmgr_priv *priv = (void *)vm->vmm; 153 struct nv50_vmmgr_priv *priv = (void *)vm->vmm;
155 struct nouveau_engine *engine; 154 struct nouveau_engine *engine;
156 unsigned long flags;
157 int i, vme; 155 int i, vme;
158 156
159 spin_lock_irqsave(&priv->lock, flags); 157 mutex_lock(&nv_subdev(priv)->mutex);
160 for (i = 0; i < NVDEV_SUBDEV_NR; i++) { 158 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
161 if (!atomic_read(&vm->engref[i])) 159 if (!atomic_read(&vm->engref[i]))
162 continue; 160 continue;
@@ -182,7 +180,7 @@ nv50_vm_flush(struct nouveau_vm *vm)
182 if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000)) 180 if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000))
183 nv_error(priv, "vm flush timeout: engine %d\n", vme); 181 nv_error(priv, "vm flush timeout: engine %d\n", vme);
184 } 182 }
185 spin_unlock_irqrestore(&priv->lock, flags); 183 mutex_unlock(&nv_subdev(priv)->mutex);
186} 184}
187 185
188static int 186static int
@@ -220,7 +218,6 @@ nv50_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
220 priv->base.map_sg = nv50_vm_map_sg; 218 priv->base.map_sg = nv50_vm_map_sg;
221 priv->base.unmap = nv50_vm_unmap; 219 priv->base.unmap = nv50_vm_unmap;
222 priv->base.flush = nv50_vm_flush; 220 priv->base.flush = nv50_vm_flush;
223 spin_lock_init(&priv->lock);
224 return 0; 221 return 0;
225} 222}
226 223
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
index 4c3b0a23b9d6..beb09743aaff 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
@@ -32,7 +32,6 @@
32 32
33struct nvc0_vmmgr_priv { 33struct nvc0_vmmgr_priv {
34 struct nouveau_vmmgr base; 34 struct nouveau_vmmgr base;
35 spinlock_t lock;
36}; 35};
37 36
38 37
@@ -164,12 +163,11 @@ void
164nvc0_vm_flush_engine(struct nouveau_subdev *subdev, u64 addr, int type) 163nvc0_vm_flush_engine(struct nouveau_subdev *subdev, u64 addr, int type)
165{ 164{
166 struct nvc0_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev); 165 struct nvc0_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev);
167 unsigned long flags;
168 166
169 /* looks like maybe a "free flush slots" counter, the 167 /* looks like maybe a "free flush slots" counter, the
170 * faster you write to 0x100cbc to more it decreases 168 * faster you write to 0x100cbc to more it decreases
171 */ 169 */
172 spin_lock_irqsave(&priv->lock, flags); 170 mutex_lock(&nv_subdev(priv)->mutex);
173 if (!nv_wait_ne(subdev, 0x100c80, 0x00ff0000, 0x00000000)) { 171 if (!nv_wait_ne(subdev, 0x100c80, 0x00ff0000, 0x00000000)) {
174 nv_error(subdev, "vm timeout 0: 0x%08x %d\n", 172 nv_error(subdev, "vm timeout 0: 0x%08x %d\n",
175 nv_rd32(subdev, 0x100c80), type); 173 nv_rd32(subdev, 0x100c80), type);
@@ -183,7 +181,7 @@ nvc0_vm_flush_engine(struct nouveau_subdev *subdev, u64 addr, int type)
183 nv_error(subdev, "vm timeout 1: 0x%08x %d\n", 181 nv_error(subdev, "vm timeout 1: 0x%08x %d\n",
184 nv_rd32(subdev, 0x100c80), type); 182 nv_rd32(subdev, 0x100c80), type);
185 } 183 }
186 spin_unlock_irqrestore(&priv->lock, flags); 184 mutex_unlock(&nv_subdev(priv)->mutex);
187} 185}
188 186
189static void 187static void
@@ -227,7 +225,6 @@ nvc0_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
227 priv->base.map_sg = nvc0_vm_map_sg; 225 priv->base.map_sg = nvc0_vm_map_sg;
228 priv->base.unmap = nvc0_vm_unmap; 226 priv->base.unmap = nvc0_vm_unmap;
229 priv->base.flush = nvc0_vm_flush; 227 priv->base.flush = nvc0_vm_flush;
230 spin_lock_init(&priv->lock);
231 return 0; 228 return 0;
232} 229}
233 230