aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau
diff options
context:
space:
mode:
authorFrancisco Jerez <currojerez@riseup.net>2010-10-24 10:14:41 -0400
committerBen Skeggs <bskeggs@redhat.com>2010-12-03 00:11:20 -0500
commita5cf68b04b2b8ea716cf6fd8499c1c54d05fdf5e (patch)
tree87f6007ef18c8d488f590523b90bcd8766537778 /drivers/gpu/drm/nouveau
parente419cf0954901bb3a987f8b76cbc9654ca06121c (diff)
drm/nouveau: Rework tile region handling.
The point is to share more code between the PFB/PGRAPH tile region hooks, and give the hardware specific functions a chance to allocate per-region resources. Signed-off-by: Francisco Jerez <currojerez@riseup.net> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h59
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c111
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c25
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c48
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c17
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c25
-rw-r--r--drivers/gpu/drm/nouveau/nv30_fb.c23
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c40
10 files changed, 221 insertions, 160 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 099f806f39ed..8d5dd980240d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -46,9 +46,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
46 if (unlikely(nvbo->gem)) 46 if (unlikely(nvbo->gem))
47 DRM_ERROR("bo %p still attached to GEM object\n", bo); 47 DRM_ERROR("bo %p still attached to GEM object\n", bo);
48 48
49 if (nvbo->tile) 49 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
50 nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
51
52 kfree(nvbo); 50 kfree(nvbo);
53} 51}
54 52
@@ -792,7 +790,8 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
792 790
793 } else if (dev_priv->card_type >= NV_10) { 791 } else if (dev_priv->card_type >= NV_10) {
794 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, 792 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
795 nvbo->tile_mode); 793 nvbo->tile_mode,
794 nvbo->tile_flags);
796 } 795 }
797 796
798 return 0; 797 return 0;
@@ -808,9 +807,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
808 807
809 if (dev_priv->card_type >= NV_10 && 808 if (dev_priv->card_type >= NV_10 &&
810 dev_priv->card_type < NV_50) { 809 dev_priv->card_type < NV_50) {
811 if (*old_tile) 810 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
812 nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
813
814 *old_tile = new_tile; 811 *old_tile = new_tile;
815 } 812 }
816} 813}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index ce0475ead381..8b524d894f18 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -66,10 +66,11 @@ struct nouveau_grctx;
66#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK) 66#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
67 67
68struct nouveau_tile_reg { 68struct nouveau_tile_reg {
69 struct nouveau_fence *fence;
70 uint32_t addr;
71 uint32_t size;
72 bool used; 69 bool used;
70 uint32_t addr;
71 uint32_t limit;
72 uint32_t pitch;
73 struct nouveau_fence *fence;
73}; 74};
74 75
75struct nouveau_bo { 76struct nouveau_bo {
@@ -309,8 +310,11 @@ struct nouveau_fb_engine {
309 int (*init)(struct drm_device *dev); 310 int (*init)(struct drm_device *dev);
310 void (*takedown)(struct drm_device *dev); 311 void (*takedown)(struct drm_device *dev);
311 312
312 void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, 313 void (*init_tile_region)(struct drm_device *dev, int i,
313 uint32_t size, uint32_t pitch); 314 uint32_t addr, uint32_t size,
315 uint32_t pitch, uint32_t flags);
316 void (*set_tile_region)(struct drm_device *dev, int i);
317 void (*free_tile_region)(struct drm_device *dev, int i);
314}; 318};
315 319
316struct nouveau_fifo_engine { 320struct nouveau_fifo_engine {
@@ -356,8 +360,7 @@ struct nouveau_pgraph_engine {
356 int (*unload_context)(struct drm_device *); 360 int (*unload_context)(struct drm_device *);
357 void (*tlb_flush)(struct drm_device *dev); 361 void (*tlb_flush)(struct drm_device *dev);
358 362
359 void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, 363 void (*set_tile_region)(struct drm_device *dev, int i);
360 uint32_t size, uint32_t pitch);
361}; 364};
362 365
363struct nouveau_display_engine { 366struct nouveau_display_engine {
@@ -668,7 +671,10 @@ struct drm_nouveau_private {
668 } gart_info; 671 } gart_info;
669 672
670 /* nv10-nv40 tiling regions */ 673 /* nv10-nv40 tiling regions */
671 struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR]; 674 struct {
675 struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
676 spinlock_t lock;
677 } tile;
672 678
673 /* VRAM/fb configuration */ 679 /* VRAM/fb configuration */
674 uint64_t vram_size; 680 uint64_t vram_size;
@@ -798,13 +804,12 @@ extern void nouveau_mem_gart_fini(struct drm_device *);
798extern int nouveau_mem_init_agp(struct drm_device *); 804extern int nouveau_mem_init_agp(struct drm_device *);
799extern int nouveau_mem_reset_agp(struct drm_device *); 805extern int nouveau_mem_reset_agp(struct drm_device *);
800extern void nouveau_mem_close(struct drm_device *); 806extern void nouveau_mem_close(struct drm_device *);
801extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev, 807extern struct nouveau_tile_reg *nv10_mem_set_tiling(
802 uint32_t addr, 808 struct drm_device *dev, uint32_t addr, uint32_t size,
803 uint32_t size, 809 uint32_t pitch, uint32_t flags);
804 uint32_t pitch); 810extern void nv10_mem_put_tile_region(struct drm_device *dev,
805extern void nv10_mem_expire_tiling(struct drm_device *dev, 811 struct nouveau_tile_reg *tile,
806 struct nouveau_tile_reg *tile, 812 struct nouveau_fence *fence);
807 struct nouveau_fence *fence);
808extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt, 813extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
809 uint32_t size, uint32_t flags, 814 uint32_t size, uint32_t flags,
810 uint64_t phys); 815 uint64_t phys);
@@ -1011,18 +1016,25 @@ extern void nv04_fb_takedown(struct drm_device *);
1011/* nv10_fb.c */ 1016/* nv10_fb.c */
1012extern int nv10_fb_init(struct drm_device *); 1017extern int nv10_fb_init(struct drm_device *);
1013extern void nv10_fb_takedown(struct drm_device *); 1018extern void nv10_fb_takedown(struct drm_device *);
1014extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t, 1019extern void nv10_fb_init_tile_region(struct drm_device *dev, int i,
1015 uint32_t, uint32_t); 1020 uint32_t addr, uint32_t size,
1021 uint32_t pitch, uint32_t flags);
1022extern void nv10_fb_set_tile_region(struct drm_device *dev, int i);
1023extern void nv10_fb_free_tile_region(struct drm_device *dev, int i);
1016 1024
1017/* nv30_fb.c */ 1025/* nv30_fb.c */
1018extern int nv30_fb_init(struct drm_device *); 1026extern int nv30_fb_init(struct drm_device *);
1019extern void nv30_fb_takedown(struct drm_device *); 1027extern void nv30_fb_takedown(struct drm_device *);
1028extern void nv30_fb_init_tile_region(struct drm_device *dev, int i,
1029 uint32_t addr, uint32_t size,
1030 uint32_t pitch, uint32_t flags);
1031extern void nv30_fb_free_tile_region(struct drm_device *dev, int i);
1020 1032
1021/* nv40_fb.c */ 1033/* nv40_fb.c */
1022extern int nv40_fb_init(struct drm_device *); 1034extern int nv40_fb_init(struct drm_device *);
1023extern void nv40_fb_takedown(struct drm_device *); 1035extern void nv40_fb_takedown(struct drm_device *);
1024extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t, 1036extern void nv40_fb_set_tile_region(struct drm_device *dev, int i);
1025 uint32_t, uint32_t); 1037
1026/* nv50_fb.c */ 1038/* nv50_fb.c */
1027extern int nv50_fb_init(struct drm_device *); 1039extern int nv50_fb_init(struct drm_device *);
1028extern void nv50_fb_takedown(struct drm_device *); 1040extern void nv50_fb_takedown(struct drm_device *);
@@ -1102,8 +1114,7 @@ extern void nv10_graph_destroy_context(struct nouveau_channel *);
1102extern int nv10_graph_load_context(struct nouveau_channel *); 1114extern int nv10_graph_load_context(struct nouveau_channel *);
1103extern int nv10_graph_unload_context(struct drm_device *); 1115extern int nv10_graph_unload_context(struct drm_device *);
1104extern void nv10_graph_context_switch(struct drm_device *); 1116extern void nv10_graph_context_switch(struct drm_device *);
1105extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t, 1117extern void nv10_graph_set_tile_region(struct drm_device *dev, int i);
1106 uint32_t, uint32_t);
1107 1118
1108/* nv20_graph.c */ 1119/* nv20_graph.c */
1109extern int nv20_graph_create_context(struct nouveau_channel *); 1120extern int nv20_graph_create_context(struct nouveau_channel *);
@@ -1113,8 +1124,7 @@ extern int nv20_graph_unload_context(struct drm_device *);
1113extern int nv20_graph_init(struct drm_device *); 1124extern int nv20_graph_init(struct drm_device *);
1114extern void nv20_graph_takedown(struct drm_device *); 1125extern void nv20_graph_takedown(struct drm_device *);
1115extern int nv30_graph_init(struct drm_device *); 1126extern int nv30_graph_init(struct drm_device *);
1116extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t, 1127extern void nv20_graph_set_tile_region(struct drm_device *dev, int i);
1117 uint32_t, uint32_t);
1118 1128
1119/* nv40_graph.c */ 1129/* nv40_graph.c */
1120extern int nv40_graph_init(struct drm_device *); 1130extern int nv40_graph_init(struct drm_device *);
@@ -1125,8 +1135,7 @@ extern void nv40_graph_destroy_context(struct nouveau_channel *);
1125extern int nv40_graph_load_context(struct nouveau_channel *); 1135extern int nv40_graph_load_context(struct nouveau_channel *);
1126extern int nv40_graph_unload_context(struct drm_device *); 1136extern int nv40_graph_unload_context(struct drm_device *);
1127extern void nv40_grctx_init(struct nouveau_grctx *); 1137extern void nv40_grctx_init(struct nouveau_grctx *);
1128extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t, 1138extern void nv40_graph_set_tile_region(struct drm_device *dev, int i);
1129 uint32_t, uint32_t);
1130 1139
1131/* nv50_graph.c */ 1140/* nv50_graph.c */
1132extern int nv50_graph_init(struct drm_device *); 1141extern int nv50_graph_init(struct drm_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index a7c3e08aa7b5..549f59052881 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -42,83 +42,104 @@
42 */ 42 */
43 43
44static void 44static void
45nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, 45nv10_mem_update_tile_region(struct drm_device *dev,
46 uint32_t size, uint32_t pitch) 46 struct nouveau_tile_reg *tile, uint32_t addr,
47 uint32_t size, uint32_t pitch, uint32_t flags)
47{ 48{
48 struct drm_nouveau_private *dev_priv = dev->dev_private; 49 struct drm_nouveau_private *dev_priv = dev->dev_private;
49 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 50 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
50 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 51 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
51 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 52 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
52 struct nouveau_tile_reg *tile = &dev_priv->tile[i]; 53 int i = tile - dev_priv->tile.reg;
54 unsigned long save;
53 55
54 tile->addr = addr;
55 tile->size = size;
56 tile->used = !!pitch;
57 nouveau_fence_unref(&tile->fence); 56 nouveau_fence_unref(&tile->fence);
58 57
58 if (tile->pitch)
59 pfb->free_tile_region(dev, i);
60
61 if (pitch)
62 pfb->init_tile_region(dev, i, addr, size, pitch, flags);
63
64 spin_lock_irqsave(&dev_priv->context_switch_lock, save);
59 pfifo->reassign(dev, false); 65 pfifo->reassign(dev, false);
60 pfifo->cache_pull(dev, false); 66 pfifo->cache_pull(dev, false);
61 67
62 nouveau_wait_for_idle(dev); 68 nouveau_wait_for_idle(dev);
63 69
64 pgraph->set_region_tiling(dev, i, addr, size, pitch); 70 pfb->set_tile_region(dev, i);
65 pfb->set_region_tiling(dev, i, addr, size, pitch); 71 pgraph->set_tile_region(dev, i);
66 72
67 pfifo->cache_pull(dev, true); 73 pfifo->cache_pull(dev, true);
68 pfifo->reassign(dev, true); 74 pfifo->reassign(dev, true);
75 spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
69} 76}
70 77
71struct nouveau_tile_reg * 78static struct nouveau_tile_reg *
72nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, 79nv10_mem_get_tile_region(struct drm_device *dev, int i)
73 uint32_t pitch)
74{ 80{
75 struct drm_nouveau_private *dev_priv = dev->dev_private; 81 struct drm_nouveau_private *dev_priv = dev->dev_private;
76 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 82 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
77 struct nouveau_tile_reg *found = NULL;
78 unsigned long i, flags;
79 83
80 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 84 spin_lock(&dev_priv->tile.lock);
81 85
82 for (i = 0; i < pfb->num_tiles; i++) { 86 if (!tile->used &&
83 struct nouveau_tile_reg *tile = &dev_priv->tile[i]; 87 (!tile->fence || nouveau_fence_signalled(tile->fence)))
84 88 tile->used = true;
85 if (tile->used) 89 else
86 /* Tile region in use. */ 90 tile = NULL;
87 continue;
88 91
89 if (tile->fence && 92 spin_unlock(&dev_priv->tile.lock);
90 !nouveau_fence_signalled(tile->fence)) 93 return tile;
91 /* Pending tile region. */ 94}
92 continue;
93 95
94 if (max(tile->addr, addr) < 96void
95 min(tile->addr + tile->size, addr + size)) 97nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
96 /* Kill an intersecting tile region. */ 98 struct nouveau_fence *fence)
97 nv10_mem_set_region_tiling(dev, i, 0, 0, 0); 99{
100 struct drm_nouveau_private *dev_priv = dev->dev_private;
98 101
99 if (pitch && !found) { 102 if (tile) {
100 /* Free tile region. */ 103 spin_lock(&dev_priv->tile.lock);
101 nv10_mem_set_region_tiling(dev, i, addr, size, pitch); 104 if (fence) {
102 found = tile; 105 /* Mark it as pending. */
106 tile->fence = fence;
107 nouveau_fence_ref(fence);
103 } 108 }
104 }
105
106 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
107 109
108 return found; 110 tile->used = false;
111 spin_unlock(&dev_priv->tile.lock);
112 }
109} 113}
110 114
111void 115struct nouveau_tile_reg *
112nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile, 116nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
113 struct nouveau_fence *fence) 117 uint32_t pitch, uint32_t flags)
114{ 118{
115 if (fence) { 119 struct drm_nouveau_private *dev_priv = dev->dev_private;
116 /* Mark it as pending. */ 120 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
117 tile->fence = fence; 121 struct nouveau_tile_reg *tile, *found = NULL;
118 nouveau_fence_ref(fence); 122 int i;
123
124 for (i = 0; i < pfb->num_tiles; i++) {
125 tile = nv10_mem_get_tile_region(dev, i);
126
127 if (pitch && !found) {
128 found = tile;
129 continue;
130
131 } else if (tile && tile->pitch) {
132 /* Kill an unused tile region. */
133 nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
134 }
135
136 nv10_mem_put_tile_region(dev, tile, NULL);
119 } 137 }
120 138
121 tile->used = false; 139 if (found)
140 nv10_mem_update_tile_region(dev, found, addr, size,
141 pitch, flags);
142 return found;
122} 143}
123 144
124/* 145/*
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index ec9d193b6f61..1a7a50ccb7c8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -118,7 +118,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
118 engine->timer.takedown = nv04_timer_takedown; 118 engine->timer.takedown = nv04_timer_takedown;
119 engine->fb.init = nv10_fb_init; 119 engine->fb.init = nv10_fb_init;
120 engine->fb.takedown = nv10_fb_takedown; 120 engine->fb.takedown = nv10_fb_takedown;
121 engine->fb.set_region_tiling = nv10_fb_set_region_tiling; 121 engine->fb.init_tile_region = nv10_fb_init_tile_region;
122 engine->fb.set_tile_region = nv10_fb_set_tile_region;
123 engine->fb.free_tile_region = nv10_fb_free_tile_region;
122 engine->graph.init = nv10_graph_init; 124 engine->graph.init = nv10_graph_init;
123 engine->graph.takedown = nv10_graph_takedown; 125 engine->graph.takedown = nv10_graph_takedown;
124 engine->graph.channel = nv10_graph_channel; 126 engine->graph.channel = nv10_graph_channel;
@@ -127,7 +129,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
127 engine->graph.fifo_access = nv04_graph_fifo_access; 129 engine->graph.fifo_access = nv04_graph_fifo_access;
128 engine->graph.load_context = nv10_graph_load_context; 130 engine->graph.load_context = nv10_graph_load_context;
129 engine->graph.unload_context = nv10_graph_unload_context; 131 engine->graph.unload_context = nv10_graph_unload_context;
130 engine->graph.set_region_tiling = nv10_graph_set_region_tiling; 132 engine->graph.set_tile_region = nv10_graph_set_tile_region;
131 engine->fifo.channels = 32; 133 engine->fifo.channels = 32;
132 engine->fifo.init = nv10_fifo_init; 134 engine->fifo.init = nv10_fifo_init;
133 engine->fifo.takedown = nouveau_stub_takedown; 135 engine->fifo.takedown = nouveau_stub_takedown;
@@ -173,7 +175,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
173 engine->timer.takedown = nv04_timer_takedown; 175 engine->timer.takedown = nv04_timer_takedown;
174 engine->fb.init = nv10_fb_init; 176 engine->fb.init = nv10_fb_init;
175 engine->fb.takedown = nv10_fb_takedown; 177 engine->fb.takedown = nv10_fb_takedown;
176 engine->fb.set_region_tiling = nv10_fb_set_region_tiling; 178 engine->fb.init_tile_region = nv10_fb_init_tile_region;
179 engine->fb.set_tile_region = nv10_fb_set_tile_region;
180 engine->fb.free_tile_region = nv10_fb_free_tile_region;
177 engine->graph.init = nv20_graph_init; 181 engine->graph.init = nv20_graph_init;
178 engine->graph.takedown = nv20_graph_takedown; 182 engine->graph.takedown = nv20_graph_takedown;
179 engine->graph.channel = nv10_graph_channel; 183 engine->graph.channel = nv10_graph_channel;
@@ -182,7 +186,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
182 engine->graph.fifo_access = nv04_graph_fifo_access; 186 engine->graph.fifo_access = nv04_graph_fifo_access;
183 engine->graph.load_context = nv20_graph_load_context; 187 engine->graph.load_context = nv20_graph_load_context;
184 engine->graph.unload_context = nv20_graph_unload_context; 188 engine->graph.unload_context = nv20_graph_unload_context;
185 engine->graph.set_region_tiling = nv20_graph_set_region_tiling; 189 engine->graph.set_tile_region = nv20_graph_set_tile_region;
186 engine->fifo.channels = 32; 190 engine->fifo.channels = 32;
187 engine->fifo.init = nv10_fifo_init; 191 engine->fifo.init = nv10_fifo_init;
188 engine->fifo.takedown = nouveau_stub_takedown; 192 engine->fifo.takedown = nouveau_stub_takedown;
@@ -228,7 +232,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
228 engine->timer.takedown = nv04_timer_takedown; 232 engine->timer.takedown = nv04_timer_takedown;
229 engine->fb.init = nv30_fb_init; 233 engine->fb.init = nv30_fb_init;
230 engine->fb.takedown = nv30_fb_takedown; 234 engine->fb.takedown = nv30_fb_takedown;
231 engine->fb.set_region_tiling = nv10_fb_set_region_tiling; 235 engine->fb.init_tile_region = nv30_fb_init_tile_region;
236 engine->fb.set_tile_region = nv10_fb_set_tile_region;
237 engine->fb.free_tile_region = nv30_fb_free_tile_region;
232 engine->graph.init = nv30_graph_init; 238 engine->graph.init = nv30_graph_init;
233 engine->graph.takedown = nv20_graph_takedown; 239 engine->graph.takedown = nv20_graph_takedown;
234 engine->graph.fifo_access = nv04_graph_fifo_access; 240 engine->graph.fifo_access = nv04_graph_fifo_access;
@@ -237,7 +243,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
237 engine->graph.destroy_context = nv20_graph_destroy_context; 243 engine->graph.destroy_context = nv20_graph_destroy_context;
238 engine->graph.load_context = nv20_graph_load_context; 244 engine->graph.load_context = nv20_graph_load_context;
239 engine->graph.unload_context = nv20_graph_unload_context; 245 engine->graph.unload_context = nv20_graph_unload_context;
240 engine->graph.set_region_tiling = nv20_graph_set_region_tiling; 246 engine->graph.set_tile_region = nv20_graph_set_tile_region;
241 engine->fifo.channels = 32; 247 engine->fifo.channels = 32;
242 engine->fifo.init = nv10_fifo_init; 248 engine->fifo.init = nv10_fifo_init;
243 engine->fifo.takedown = nouveau_stub_takedown; 249 engine->fifo.takedown = nouveau_stub_takedown;
@@ -286,7 +292,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
286 engine->timer.takedown = nv04_timer_takedown; 292 engine->timer.takedown = nv04_timer_takedown;
287 engine->fb.init = nv40_fb_init; 293 engine->fb.init = nv40_fb_init;
288 engine->fb.takedown = nv40_fb_takedown; 294 engine->fb.takedown = nv40_fb_takedown;
289 engine->fb.set_region_tiling = nv40_fb_set_region_tiling; 295 engine->fb.init_tile_region = nv30_fb_init_tile_region;
296 engine->fb.set_tile_region = nv40_fb_set_tile_region;
297 engine->fb.free_tile_region = nv30_fb_free_tile_region;
290 engine->graph.init = nv40_graph_init; 298 engine->graph.init = nv40_graph_init;
291 engine->graph.takedown = nv40_graph_takedown; 299 engine->graph.takedown = nv40_graph_takedown;
292 engine->graph.fifo_access = nv04_graph_fifo_access; 300 engine->graph.fifo_access = nv04_graph_fifo_access;
@@ -295,7 +303,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
295 engine->graph.destroy_context = nv40_graph_destroy_context; 303 engine->graph.destroy_context = nv40_graph_destroy_context;
296 engine->graph.load_context = nv40_graph_load_context; 304 engine->graph.load_context = nv40_graph_load_context;
297 engine->graph.unload_context = nv40_graph_unload_context; 305 engine->graph.unload_context = nv40_graph_unload_context;
298 engine->graph.set_region_tiling = nv40_graph_set_region_tiling; 306 engine->graph.set_tile_region = nv40_graph_set_tile_region;
299 engine->fifo.channels = 32; 307 engine->fifo.channels = 32;
300 engine->fifo.init = nv40_fifo_init; 308 engine->fifo.init = nv40_fifo_init;
301 engine->fifo.takedown = nouveau_stub_takedown; 309 engine->fifo.takedown = nouveau_stub_takedown;
@@ -596,6 +604,7 @@ nouveau_card_init(struct drm_device *dev)
596 goto out; 604 goto out;
597 engine = &dev_priv->engine; 605 engine = &dev_priv->engine;
598 spin_lock_init(&dev_priv->channels.lock); 606 spin_lock_init(&dev_priv->channels.lock);
607 spin_lock_init(&dev_priv->tile.lock);
599 spin_lock_init(&dev_priv->context_switch_lock); 608 spin_lock_init(&dev_priv->context_switch_lock);
600 609
601 /* Make the CRTCs and I2C buses accessible */ 610 /* Make the CRTCs and I2C buses accessible */
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
index cc5cda44e501..d50acc6a90d1 100644
--- a/drivers/gpu/drm/nouveau/nv10_fb.c
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -4,22 +4,40 @@
4#include "nouveau_drm.h" 4#include "nouveau_drm.h"
5 5
6void 6void
7nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, 7nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
8 uint32_t size, uint32_t pitch) 8 uint32_t size, uint32_t pitch, uint32_t flags)
9{ 9{
10 struct drm_nouveau_private *dev_priv = dev->dev_private; 10 struct drm_nouveau_private *dev_priv = dev->dev_private;
11 uint32_t limit = max(1u, addr + size) - 1; 11 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
12 12
13 if (pitch) { 13 tile->addr = addr;
14 if (dev_priv->card_type >= NV_20) 14 tile->limit = max(1u, addr + size) - 1;
15 addr |= 1; 15 tile->pitch = pitch;
16 else 16
17 addr |= 1 << 31; 17 if (dev_priv->card_type == NV_20)
18 } 18 tile->addr |= 1;
19 19 else
20 nv_wr32(dev, NV10_PFB_TLIMIT(i), limit); 20 tile->addr |= 1 << 31;
21 nv_wr32(dev, NV10_PFB_TSIZE(i), pitch); 21}
22 nv_wr32(dev, NV10_PFB_TILE(i), addr); 22
23void
24nv10_fb_free_tile_region(struct drm_device *dev, int i)
25{
26 struct drm_nouveau_private *dev_priv = dev->dev_private;
27 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
28
29 tile->addr = tile->limit = tile->pitch = 0;
30}
31
32void
33nv10_fb_set_tile_region(struct drm_device *dev, int i)
34{
35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
37
38 nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
39 nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
40 nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
23} 41}
24 42
25int 43int
@@ -33,7 +51,7 @@ nv10_fb_init(struct drm_device *dev)
33 51
34 /* Turn all the tiling regions off. */ 52 /* Turn all the tiling regions off. */
35 for (i = 0; i < pfb->num_tiles; i++) 53 for (i = 0; i < pfb->num_tiles; i++)
36 pfb->set_region_tiling(dev, i, 0, 0, 0); 54 pfb->set_tile_region(dev, i);
37 55
38 return 0; 56 return 0;
39} 57}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 3fbb49dfc09c..1cd141edca04 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -899,17 +899,14 @@ void nv10_graph_destroy_context(struct nouveau_channel *chan)
899} 899}
900 900
901void 901void
902nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, 902nv10_graph_set_tile_region(struct drm_device *dev, int i)
903 uint32_t size, uint32_t pitch)
904{ 903{
905 uint32_t limit = max(1u, addr + size) - 1; 904 struct drm_nouveau_private *dev_priv = dev->dev_private;
906 905 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
907 if (pitch)
908 addr |= 1 << 31;
909 906
910 nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit); 907 nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit);
911 nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch); 908 nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch);
912 nv_wr32(dev, NV10_PGRAPH_TILE(i), addr); 909 nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr);
913} 910}
914 911
915int nv10_graph_init(struct drm_device *dev) 912int nv10_graph_init(struct drm_device *dev)
@@ -949,7 +946,7 @@ int nv10_graph_init(struct drm_device *dev)
949 946
950 /* Turn all the tiling regions off. */ 947 /* Turn all the tiling regions off. */
951 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) 948 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
952 nv10_graph_set_region_tiling(dev, i, 0, 0, 0); 949 nv10_graph_set_tile_region(dev, i);
953 950
954 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000); 951 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
955 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000); 952 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 51b9dd12949d..a71871b91c69 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -511,24 +511,21 @@ nv20_graph_rdi(struct drm_device *dev)
511} 511}
512 512
513void 513void
514nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, 514nv20_graph_set_tile_region(struct drm_device *dev, int i)
515 uint32_t size, uint32_t pitch)
516{ 515{
517 uint32_t limit = max(1u, addr + size) - 1; 516 struct drm_nouveau_private *dev_priv = dev->dev_private;
518 517 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
519 if (pitch)
520 addr |= 1;
521 518
522 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); 519 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
523 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); 520 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
524 nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); 521 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
525 522
526 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i); 523 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
527 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit); 524 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit);
528 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i); 525 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
529 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch); 526 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch);
530 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); 527 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
531 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr); 528 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr);
532} 529}
533 530
534int 531int
@@ -612,7 +609,7 @@ nv20_graph_init(struct drm_device *dev)
612 609
613 /* Turn all the tiling regions off. */ 610 /* Turn all the tiling regions off. */
614 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) 611 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
615 nv20_graph_set_region_tiling(dev, i, 0, 0, 0); 612 nv20_graph_set_tile_region(dev, i);
616 613
617 for (i = 0; i < 8; i++) { 614 for (i = 0; i < 8; i++) {
618 nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4)); 615 nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
@@ -751,7 +748,7 @@ nv30_graph_init(struct drm_device *dev)
751 748
752 /* Turn all the tiling regions off. */ 749 /* Turn all the tiling regions off. */
753 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) 750 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
754 nv20_graph_set_region_tiling(dev, i, 0, 0, 0); 751 nv20_graph_set_tile_region(dev, i);
755 752
756 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); 753 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
757 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); 754 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
diff --git a/drivers/gpu/drm/nouveau/nv30_fb.c b/drivers/gpu/drm/nouveau/nv30_fb.c
index 4a3f2f095128..e0135f0e2144 100644
--- a/drivers/gpu/drm/nouveau/nv30_fb.c
+++ b/drivers/gpu/drm/nouveau/nv30_fb.c
@@ -29,6 +29,27 @@
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_drm.h" 30#include "nouveau_drm.h"
31 31
32void
33nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
34 uint32_t size, uint32_t pitch, uint32_t flags)
35{
36 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
38
39 tile->addr = addr | 1;
40 tile->limit = max(1u, addr + size) - 1;
41 tile->pitch = pitch;
42}
43
44void
45nv30_fb_free_tile_region(struct drm_device *dev, int i)
46{
47 struct drm_nouveau_private *dev_priv = dev->dev_private;
48 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
49
50 tile->addr = tile->limit = tile->pitch = 0;
51}
52
32static int 53static int
33calc_bias(struct drm_device *dev, int k, int i, int j) 54calc_bias(struct drm_device *dev, int k, int i, int j)
34{ 55{
@@ -65,7 +86,7 @@ nv30_fb_init(struct drm_device *dev)
65 86
66 /* Turn all the tiling regions off. */ 87 /* Turn all the tiling regions off. */
67 for (i = 0; i < pfb->num_tiles; i++) 88 for (i = 0; i < pfb->num_tiles; i++)
68 pfb->set_region_tiling(dev, i, 0, 0, 0); 89 pfb->set_tile_region(dev, i);
69 90
70 /* Init the memory timing regs at 0x10037c/0x1003ac */ 91 /* Init the memory timing regs at 0x10037c/0x1003ac */
71 if (dev_priv->chipset == 0x30 || 92 if (dev_priv->chipset == 0x30 ||
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index 3cd07d8d5bd7..f3d9c0505f7b 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -4,26 +4,22 @@
4#include "nouveau_drm.h" 4#include "nouveau_drm.h"
5 5
6void 6void
7nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, 7nv40_fb_set_tile_region(struct drm_device *dev, int i)
8 uint32_t size, uint32_t pitch)
9{ 8{
10 struct drm_nouveau_private *dev_priv = dev->dev_private; 9 struct drm_nouveau_private *dev_priv = dev->dev_private;
11 uint32_t limit = max(1u, addr + size) - 1; 10 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
12
13 if (pitch)
14 addr |= 1;
15 11
16 switch (dev_priv->chipset) { 12 switch (dev_priv->chipset) {
17 case 0x40: 13 case 0x40:
18 nv_wr32(dev, NV10_PFB_TLIMIT(i), limit); 14 nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
19 nv_wr32(dev, NV10_PFB_TSIZE(i), pitch); 15 nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
20 nv_wr32(dev, NV10_PFB_TILE(i), addr); 16 nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
21 break; 17 break;
22 18
23 default: 19 default:
24 nv_wr32(dev, NV40_PFB_TLIMIT(i), limit); 20 nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit);
25 nv_wr32(dev, NV40_PFB_TSIZE(i), pitch); 21 nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch);
26 nv_wr32(dev, NV40_PFB_TILE(i), addr); 22 nv_wr32(dev, NV40_PFB_TILE(i), tile->addr);
27 break; 23 break;
28 } 24 }
29} 25}
@@ -64,7 +60,7 @@ nv40_fb_init(struct drm_device *dev)
64 60
65 /* Turn all the tiling regions off. */ 61 /* Turn all the tiling regions off. */
66 for (i = 0; i < pfb->num_tiles; i++) 62 for (i = 0; i < pfb->num_tiles; i++)
67 pfb->set_region_tiling(dev, i, 0, 0, 0); 63 pfb->set_tile_region(dev, i);
68 64
69 return 0; 65 return 0;
70} 66}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 159bdcd757d4..7a51608b55ba 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -192,43 +192,39 @@ nv40_graph_unload_context(struct drm_device *dev)
192} 192}
193 193
194void 194void
195nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, 195nv40_graph_set_tile_region(struct drm_device *dev, int i)
196 uint32_t size, uint32_t pitch)
197{ 196{
198 struct drm_nouveau_private *dev_priv = dev->dev_private; 197 struct drm_nouveau_private *dev_priv = dev->dev_private;
199 uint32_t limit = max(1u, addr + size) - 1; 198 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
200
201 if (pitch)
202 addr |= 1;
203 199
204 switch (dev_priv->chipset) { 200 switch (dev_priv->chipset) {
205 case 0x44: 201 case 0x44:
206 case 0x4a: 202 case 0x4a:
207 case 0x4e: 203 case 0x4e:
208 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); 204 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
209 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); 205 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
210 nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); 206 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
211 break; 207 break;
212 208
213 case 0x46: 209 case 0x46:
214 case 0x47: 210 case 0x47:
215 case 0x49: 211 case 0x49:
216 case 0x4b: 212 case 0x4b:
217 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch); 213 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
218 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit); 214 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
219 nv_wr32(dev, NV47_PGRAPH_TILE(i), addr); 215 nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
220 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch); 216 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
221 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit); 217 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
222 nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr); 218 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
223 break; 219 break;
224 220
225 default: 221 default:
226 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); 222 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
227 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); 223 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
228 nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); 224 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
229 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch); 225 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
230 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit); 226 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
231 nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr); 227 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
232 break; 228 break;
233 } 229 }
234} 230}
@@ -369,7 +365,7 @@ nv40_graph_init(struct drm_device *dev)
369 365
370 /* Turn all the tiling regions off. */ 366 /* Turn all the tiling regions off. */
371 for (i = 0; i < pfb->num_tiles; i++) 367 for (i = 0; i < pfb->num_tiles; i++)
372 nv40_graph_set_region_tiling(dev, i, 0, 0, 0); 368 nv40_graph_set_tile_region(dev, i);
373 369
374 /* begin RAM config */ 370 /* begin RAM config */
375 vramsz = pci_resource_len(dev->pdev, 0) - 1; 371 vramsz = pci_resource_len(dev->pdev, 0) - 1;