aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_mem.c
diff options
context:
space:
mode:
authorFrancisco Jerez <currojerez@riseup.net>2010-10-24 10:14:41 -0400
committerBen Skeggs <bskeggs@redhat.com>2010-12-03 00:11:20 -0500
commita5cf68b04b2b8ea716cf6fd8499c1c54d05fdf5e (patch)
tree87f6007ef18c8d488f590523b90bcd8766537778 /drivers/gpu/drm/nouveau/nouveau_mem.c
parente419cf0954901bb3a987f8b76cbc9654ca06121c (diff)
drm/nouveau: Rework tile region handling.
The point is to share more code between the PFB/PGRAPH tile region hooks, and give the hardware specific functions a chance to allocate per-region resources. Signed-off-by: Francisco Jerez <currojerez@riseup.net> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_mem.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c111
1 files changed, 66 insertions, 45 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index a7c3e08aa7b5..549f59052881 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -42,83 +42,104 @@
42 */ 42 */
43 43
44static void 44static void
45nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, 45nv10_mem_update_tile_region(struct drm_device *dev,
46 uint32_t size, uint32_t pitch) 46 struct nouveau_tile_reg *tile, uint32_t addr,
47 uint32_t size, uint32_t pitch, uint32_t flags)
47{ 48{
48 struct drm_nouveau_private *dev_priv = dev->dev_private; 49 struct drm_nouveau_private *dev_priv = dev->dev_private;
49 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 50 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
50 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 51 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
51 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 52 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
52 struct nouveau_tile_reg *tile = &dev_priv->tile[i]; 53 int i = tile - dev_priv->tile.reg;
54 unsigned long save;
53 55
54 tile->addr = addr;
55 tile->size = size;
56 tile->used = !!pitch;
57 nouveau_fence_unref(&tile->fence); 56 nouveau_fence_unref(&tile->fence);
58 57
58 if (tile->pitch)
59 pfb->free_tile_region(dev, i);
60
61 if (pitch)
62 pfb->init_tile_region(dev, i, addr, size, pitch, flags);
63
64 spin_lock_irqsave(&dev_priv->context_switch_lock, save);
59 pfifo->reassign(dev, false); 65 pfifo->reassign(dev, false);
60 pfifo->cache_pull(dev, false); 66 pfifo->cache_pull(dev, false);
61 67
62 nouveau_wait_for_idle(dev); 68 nouveau_wait_for_idle(dev);
63 69
64 pgraph->set_region_tiling(dev, i, addr, size, pitch); 70 pfb->set_tile_region(dev, i);
65 pfb->set_region_tiling(dev, i, addr, size, pitch); 71 pgraph->set_tile_region(dev, i);
66 72
67 pfifo->cache_pull(dev, true); 73 pfifo->cache_pull(dev, true);
68 pfifo->reassign(dev, true); 74 pfifo->reassign(dev, true);
75 spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
69} 76}
70 77
71struct nouveau_tile_reg * 78static struct nouveau_tile_reg *
72nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, 79nv10_mem_get_tile_region(struct drm_device *dev, int i)
73 uint32_t pitch)
74{ 80{
75 struct drm_nouveau_private *dev_priv = dev->dev_private; 81 struct drm_nouveau_private *dev_priv = dev->dev_private;
76 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 82 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
77 struct nouveau_tile_reg *found = NULL;
78 unsigned long i, flags;
79 83
80 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 84 spin_lock(&dev_priv->tile.lock);
81 85
82 for (i = 0; i < pfb->num_tiles; i++) { 86 if (!tile->used &&
83 struct nouveau_tile_reg *tile = &dev_priv->tile[i]; 87 (!tile->fence || nouveau_fence_signalled(tile->fence)))
84 88 tile->used = true;
85 if (tile->used) 89 else
86 /* Tile region in use. */ 90 tile = NULL;
87 continue;
88 91
89 if (tile->fence && 92 spin_unlock(&dev_priv->tile.lock);
90 !nouveau_fence_signalled(tile->fence)) 93 return tile;
91 /* Pending tile region. */ 94}
92 continue;
93 95
94 if (max(tile->addr, addr) < 96void
95 min(tile->addr + tile->size, addr + size)) 97nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
96 /* Kill an intersecting tile region. */ 98 struct nouveau_fence *fence)
97 nv10_mem_set_region_tiling(dev, i, 0, 0, 0); 99{
100 struct drm_nouveau_private *dev_priv = dev->dev_private;
98 101
99 if (pitch && !found) { 102 if (tile) {
100 /* Free tile region. */ 103 spin_lock(&dev_priv->tile.lock);
101 nv10_mem_set_region_tiling(dev, i, addr, size, pitch); 104 if (fence) {
102 found = tile; 105 /* Mark it as pending. */
106 tile->fence = fence;
107 nouveau_fence_ref(fence);
103 } 108 }
104 }
105
106 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
107 109
108 return found; 110 tile->used = false;
111 spin_unlock(&dev_priv->tile.lock);
112 }
109} 113}
110 114
111void 115struct nouveau_tile_reg *
112nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile, 116nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
113 struct nouveau_fence *fence) 117 uint32_t pitch, uint32_t flags)
114{ 118{
115 if (fence) { 119 struct drm_nouveau_private *dev_priv = dev->dev_private;
116 /* Mark it as pending. */ 120 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
117 tile->fence = fence; 121 struct nouveau_tile_reg *tile, *found = NULL;
118 nouveau_fence_ref(fence); 122 int i;
123
124 for (i = 0; i < pfb->num_tiles; i++) {
125 tile = nv10_mem_get_tile_region(dev, i);
126
127 if (pitch && !found) {
128 found = tile;
129 continue;
130
131 } else if (tile && tile->pitch) {
132 /* Kill an unused tile region. */
133 nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
134 }
135
136 nv10_mem_put_tile_region(dev, tile, NULL);
119 } 137 }
120 138
121 tile->used = false; 139 if (found)
140 nv10_mem_update_tile_region(dev, found, addr, size,
141 pitch, flags);
142 return found;
122} 143}
123 144
124/* 145/*