diff options
author | Maarten Maathuis <madman2003@gmail.com> | 2010-02-01 14:58:27 -0500 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2010-02-25 00:07:53 -0500 |
commit | ff9e5279b14dc024599cc705ee199dadb94e90a3 (patch) | |
tree | 4881498b0c5f0defdc14890783249b0514a8afde | |
parent | 6c42966768b0254f465a8f451333795283f53d22 (diff) |
drm/nouveau: protect channel create/destroy and irq handler with a spinlock
The nv50 pgraph handler (for example) could reenable pgraph fifo access
and that would be bad when pgraph context is being unloaded (we need the
guarantee a ctxprog isn't running).
Signed-off-by: Maarten Maathuis <madman2003@gmail.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_channel.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_drv.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_irq.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_state.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv04_fifo.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv40_fifo.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_fifo.c | 5 |
7 files changed, 36 insertions, 1 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 2281f99da7fc..f7ca95003f54 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
@@ -275,9 +275,18 @@ nouveau_channel_free(struct nouveau_channel *chan) | |||
275 | */ | 275 | */ |
276 | nouveau_fence_fini(chan); | 276 | nouveau_fence_fini(chan); |
277 | 277 | ||
278 | /* Ensure the channel is no longer active on the GPU */ | 278 | /* This will prevent pfifo from switching channels. */ |
279 | pfifo->reassign(dev, false); | 279 | pfifo->reassign(dev, false); |
280 | 280 | ||
281 | /* We want to give pgraph a chance to idle and get rid of all potential | ||
282 | * errors. We need to do this before the lock, otherwise the irq handler | ||
283 | * is unable to process them. | ||
284 | */ | ||
285 | if (pgraph->channel(dev) == chan) | ||
286 | nouveau_wait_for_idle(dev); | ||
287 | |||
288 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
289 | |||
281 | pgraph->fifo_access(dev, false); | 290 | pgraph->fifo_access(dev, false); |
282 | if (pgraph->channel(dev) == chan) | 291 | if (pgraph->channel(dev) == chan) |
283 | pgraph->unload_context(dev); | 292 | pgraph->unload_context(dev); |
@@ -293,6 +302,8 @@ nouveau_channel_free(struct nouveau_channel *chan) | |||
293 | 302 | ||
294 | pfifo->reassign(dev, true); | 303 | pfifo->reassign(dev, true); |
295 | 304 | ||
305 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
306 | |||
296 | /* Release the channel's resources */ | 307 | /* Release the channel's resources */ |
297 | nouveau_gpuobj_ref_del(dev, &chan->pushbuf); | 308 | nouveau_gpuobj_ref_del(dev, &chan->pushbuf); |
298 | if (chan->pushbuf_bo) { | 309 | if (chan->pushbuf_bo) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 1c15ef37b71c..52cc13bd02b9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -533,6 +533,9 @@ struct drm_nouveau_private { | |||
533 | struct nouveau_engine engine; | 533 | struct nouveau_engine engine; |
534 | struct nouveau_channel *channel; | 534 | struct nouveau_channel *channel; |
535 | 535 | ||
536 | /* For PFIFO and PGRAPH. */ | ||
537 | spinlock_t context_switch_lock; | ||
538 | |||
536 | /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ | 539 | /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ |
537 | struct nouveau_gpuobj *ramht; | 540 | struct nouveau_gpuobj *ramht; |
538 | uint32_t ramin_rsvd_vram; | 541 | uint32_t ramin_rsvd_vram; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 447f9f69d6b1..95220ddebb45 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c | |||
@@ -691,11 +691,14 @@ nouveau_irq_handler(DRM_IRQ_ARGS) | |||
691 | struct drm_device *dev = (struct drm_device *)arg; | 691 | struct drm_device *dev = (struct drm_device *)arg; |
692 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 692 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
693 | uint32_t status, fbdev_flags = 0; | 693 | uint32_t status, fbdev_flags = 0; |
694 | unsigned long flags; | ||
694 | 695 | ||
695 | status = nv_rd32(dev, NV03_PMC_INTR_0); | 696 | status = nv_rd32(dev, NV03_PMC_INTR_0); |
696 | if (!status) | 697 | if (!status) |
697 | return IRQ_NONE; | 698 | return IRQ_NONE; |
698 | 699 | ||
700 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
701 | |||
699 | if (dev_priv->fbdev_info) { | 702 | if (dev_priv->fbdev_info) { |
700 | fbdev_flags = dev_priv->fbdev_info->flags; | 703 | fbdev_flags = dev_priv->fbdev_info->flags; |
701 | dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; | 704 | dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; |
@@ -733,5 +736,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS) | |||
733 | if (dev_priv->fbdev_info) | 736 | if (dev_priv->fbdev_info) |
734 | dev_priv->fbdev_info->flags = fbdev_flags; | 737 | dev_priv->fbdev_info->flags = fbdev_flags; |
735 | 738 | ||
739 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
740 | |||
736 | return IRQ_HANDLED; | 741 | return IRQ_HANDLED; |
737 | } | 742 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index a4851af5b05e..ed5ac0b9a0ac 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -391,6 +391,7 @@ nouveau_card_init(struct drm_device *dev) | |||
391 | goto out; | 391 | goto out; |
392 | engine = &dev_priv->engine; | 392 | engine = &dev_priv->engine; |
393 | dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; | 393 | dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; |
394 | spin_lock_init(&dev_priv->context_switch_lock); | ||
394 | 395 | ||
395 | /* Parse BIOS tables / Run init tables if card not POSTed */ | 396 | /* Parse BIOS tables / Run init tables if card not POSTed */ |
396 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 397 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c index f31347b8c9b0..66fe55983b6e 100644 --- a/drivers/gpu/drm/nouveau/nv04_fifo.c +++ b/drivers/gpu/drm/nouveau/nv04_fifo.c | |||
@@ -117,6 +117,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan) | |||
117 | { | 117 | { |
118 | struct drm_device *dev = chan->dev; | 118 | struct drm_device *dev = chan->dev; |
119 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 119 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
120 | unsigned long flags; | ||
120 | int ret; | 121 | int ret; |
121 | 122 | ||
122 | ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0, | 123 | ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0, |
@@ -127,6 +128,8 @@ nv04_fifo_create_context(struct nouveau_channel *chan) | |||
127 | if (ret) | 128 | if (ret) |
128 | return ret; | 129 | return ret; |
129 | 130 | ||
131 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
132 | |||
130 | /* Setup initial state */ | 133 | /* Setup initial state */ |
131 | dev_priv->engine.instmem.prepare_access(dev, true); | 134 | dev_priv->engine.instmem.prepare_access(dev, true); |
132 | RAMFC_WR(DMA_PUT, chan->pushbuf_base); | 135 | RAMFC_WR(DMA_PUT, chan->pushbuf_base); |
@@ -144,6 +147,8 @@ nv04_fifo_create_context(struct nouveau_channel *chan) | |||
144 | /* enable the fifo dma operation */ | 147 | /* enable the fifo dma operation */ |
145 | nv_wr32(dev, NV04_PFIFO_MODE, | 148 | nv_wr32(dev, NV04_PFIFO_MODE, |
146 | nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); | 149 | nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); |
150 | |||
151 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
147 | return 0; | 152 | return 0; |
148 | } | 153 | } |
149 | 154 | ||
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c index b4f19ccb8b41..6b2ef4a9fce1 100644 --- a/drivers/gpu/drm/nouveau/nv40_fifo.c +++ b/drivers/gpu/drm/nouveau/nv40_fifo.c | |||
@@ -37,6 +37,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan) | |||
37 | struct drm_device *dev = chan->dev; | 37 | struct drm_device *dev = chan->dev; |
38 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 38 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
39 | uint32_t fc = NV40_RAMFC(chan->id); | 39 | uint32_t fc = NV40_RAMFC(chan->id); |
40 | unsigned long flags; | ||
40 | int ret; | 41 | int ret; |
41 | 42 | ||
42 | ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, | 43 | ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, |
@@ -45,6 +46,8 @@ nv40_fifo_create_context(struct nouveau_channel *chan) | |||
45 | if (ret) | 46 | if (ret) |
46 | return ret; | 47 | return ret; |
47 | 48 | ||
49 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
50 | |||
48 | dev_priv->engine.instmem.prepare_access(dev, true); | 51 | dev_priv->engine.instmem.prepare_access(dev, true); |
49 | nv_wi32(dev, fc + 0, chan->pushbuf_base); | 52 | nv_wi32(dev, fc + 0, chan->pushbuf_base); |
50 | nv_wi32(dev, fc + 4, chan->pushbuf_base); | 53 | nv_wi32(dev, fc + 4, chan->pushbuf_base); |
@@ -63,6 +66,8 @@ nv40_fifo_create_context(struct nouveau_channel *chan) | |||
63 | /* enable the fifo dma operation */ | 66 | /* enable the fifo dma operation */ |
64 | nv_wr32(dev, NV04_PFIFO_MODE, | 67 | nv_wr32(dev, NV04_PFIFO_MODE, |
65 | nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); | 68 | nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); |
69 | |||
70 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
66 | return 0; | 71 | return 0; |
67 | } | 72 | } |
68 | 73 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index 204a79ff10f4..369ecb4cee57 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c | |||
@@ -243,6 +243,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan) | |||
243 | struct drm_device *dev = chan->dev; | 243 | struct drm_device *dev = chan->dev; |
244 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 244 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
245 | struct nouveau_gpuobj *ramfc = NULL; | 245 | struct nouveau_gpuobj *ramfc = NULL; |
246 | unsigned long flags; | ||
246 | int ret; | 247 | int ret; |
247 | 248 | ||
248 | NV_DEBUG(dev, "ch%d\n", chan->id); | 249 | NV_DEBUG(dev, "ch%d\n", chan->id); |
@@ -278,6 +279,8 @@ nv50_fifo_create_context(struct nouveau_channel *chan) | |||
278 | return ret; | 279 | return ret; |
279 | } | 280 | } |
280 | 281 | ||
282 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
283 | |||
281 | dev_priv->engine.instmem.prepare_access(dev, true); | 284 | dev_priv->engine.instmem.prepare_access(dev, true); |
282 | 285 | ||
283 | nv_wo32(dev, ramfc, 0x08/4, chan->pushbuf_base); | 286 | nv_wo32(dev, ramfc, 0x08/4, chan->pushbuf_base); |
@@ -306,10 +309,12 @@ nv50_fifo_create_context(struct nouveau_channel *chan) | |||
306 | ret = nv50_fifo_channel_enable(dev, chan->id, false); | 309 | ret = nv50_fifo_channel_enable(dev, chan->id, false); |
307 | if (ret) { | 310 | if (ret) { |
308 | NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret); | 311 | NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret); |
312 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
309 | nouveau_gpuobj_ref_del(dev, &chan->ramfc); | 313 | nouveau_gpuobj_ref_del(dev, &chan->ramfc); |
310 | return ret; | 314 | return ret; |
311 | } | 315 | } |
312 | 316 | ||
317 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
313 | return 0; | 318 | return 0; |
314 | } | 319 | } |
315 | 320 | ||