aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2010-11-15 19:13:05 -0500
committerBen Skeggs <bskeggs@redhat.com>2010-12-07 22:48:17 -0500
commitb571fe21f5c24760368b3fb927af5a7384d7721b (patch)
treea49f85fdf8ef3a3c9b969c0fbbe36c7fd2b6e40d /drivers/gpu/drm
parent5f6fdca570b13a8a2c9cab9ab6edfc17487049cf (diff)
drm/nv50: tidy up PCIEGART implementation
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c147
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c12
5 files changed, 79 insertions, 95 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index bc28aeada8c..42d1ad62b38 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -425,7 +425,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
425 man->available_caching = TTM_PL_FLAG_UNCACHED | 425 man->available_caching = TTM_PL_FLAG_UNCACHED |
426 TTM_PL_FLAG_WC; 426 TTM_PL_FLAG_WC;
427 man->default_caching = TTM_PL_FLAG_WC; 427 man->default_caching = TTM_PL_FLAG_WC;
428 man->gpu_offset = 0;
429 break; 428 break;
430 case TTM_PL_TT: 429 case TTM_PL_TT:
431 man->func = &ttm_bo_manager_func; 430 man->func = &ttm_bo_manager_func;
@@ -441,13 +440,13 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
441 TTM_MEMTYPE_FLAG_CMA; 440 TTM_MEMTYPE_FLAG_CMA;
442 man->available_caching = TTM_PL_MASK_CACHING; 441 man->available_caching = TTM_PL_MASK_CACHING;
443 man->default_caching = TTM_PL_FLAG_CACHED; 442 man->default_caching = TTM_PL_FLAG_CACHED;
443 man->gpu_offset = dev_priv->gart_info.aper_base;
444 break; 444 break;
445 default: 445 default:
446 NV_ERROR(dev, "Unknown GART type: %d\n", 446 NV_ERROR(dev, "Unknown GART type: %d\n",
447 dev_priv->gart_info.type); 447 dev_priv->gart_info.type);
448 return -EINVAL; 448 return -EINVAL;
449 } 449 }
450 man->gpu_offset = dev_priv->vm_gart_base;
451 break; 450 break;
452 default: 451 default:
453 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type); 452 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
@@ -531,12 +530,12 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
531 if (old_mem->mem_type == TTM_PL_VRAM) 530 if (old_mem->mem_type == TTM_PL_VRAM)
532 src_offset = nvbo->vma.offset; 531 src_offset = nvbo->vma.offset;
533 else 532 else
534 src_offset += dev_priv->vm_gart_base; 533 src_offset += dev_priv->gart_info.aper_base;
535 534
536 if (new_mem->mem_type == TTM_PL_VRAM) 535 if (new_mem->mem_type == TTM_PL_VRAM)
537 dst_offset = nvbo->vma.offset; 536 dst_offset = nvbo->vma.offset;
538 else 537 else
539 dst_offset += dev_priv->vm_gart_base; 538 dst_offset += dev_priv->gart_info.aper_base;
540 } 539 }
541 540
542 ret = RING_SPACE(chan, 3); 541 ret = RING_SPACE(chan, 3);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index dce9a5f6f6c..4c5fc9c9912 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -248,7 +248,6 @@ struct nouveau_channel {
248 /* NV50 VM */ 248 /* NV50 VM */
249 struct nouveau_vm *vm; 249 struct nouveau_vm *vm;
250 struct nouveau_gpuobj *vm_pd; 250 struct nouveau_gpuobj *vm_pd;
251 struct nouveau_gpuobj *vm_gart_pt;
252 251
253 /* Objects */ 252 /* Objects */
254 struct nouveau_gpuobj *ramin; /* Private instmem */ 253 struct nouveau_gpuobj *ramin; /* Private instmem */
@@ -684,6 +683,7 @@ struct drm_nouveau_private {
684 uint64_t aper_free; 683 uint64_t aper_free;
685 684
686 struct nouveau_gpuobj *sg_ctxdma; 685 struct nouveau_gpuobj *sg_ctxdma;
686 struct nouveau_vma vma;
687 } gart_info; 687 } gart_info;
688 688
689 /* nv10-nv40 tiling regions */ 689 /* nv10-nv40 tiling regions */
@@ -709,8 +709,6 @@ struct drm_nouveau_private {
709 709
710 /* G8x/G9x virtual address space */ 710 /* G8x/G9x virtual address space */
711 struct nouveau_vm *chan_vm; 711 struct nouveau_vm *chan_vm;
712 uint64_t vm_gart_base;
713 uint64_t vm_gart_size;
714 712
715 struct nvbios vbios; 713 struct nvbios vbios;
716 714
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 573fd7316d6..d1bed40dc44 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -433,7 +433,7 @@ nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
433 flags0 |= 0x00030000; 433 flags0 |= 0x00030000;
434 break; 434 break;
435 case NV_MEM_TARGET_GART: 435 case NV_MEM_TARGET_GART:
436 base += dev_priv->vm_gart_base; 436 base += dev_priv->gart_info.aper_base;
437 default: 437 default:
438 flags0 &= ~0x00100000; 438 flags0 &= ~0x00100000;
439 break; 439 break;
@@ -801,7 +801,6 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
801 return ret; 801 return ret;
802 802
803 nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd); 803 nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd);
804 chan->vm->map_pgt(chan->vm_pd, 12, 1, dev_priv->gart_info.sg_ctxdma);
805 } 804 }
806 805
807 /* RAMHT */ 806 /* RAMHT */
@@ -889,7 +888,6 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
889 888
890 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); 889 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
891 nouveau_gpuobj_ref(NULL, &chan->vm_pd); 890 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
892 nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
893 891
894 if (chan->ramin_heap.free_stack.next) 892 if (chan->ramin_heap.free_stack.next)
895 drm_mm_takedown(&chan->ramin_heap); 893 drm_mm_takedown(&chan->ramin_heap);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index a0bf130b02d..b57201ab538 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -14,7 +14,7 @@ struct nouveau_sgdma_be {
14 dma_addr_t *pages; 14 dma_addr_t *pages;
15 unsigned nr_pages; 15 unsigned nr_pages;
16 16
17 unsigned pte_start; 17 u64 offset;
18 bool bound; 18 bool bound;
19}; 19};
20 20
@@ -74,18 +74,6 @@ nouveau_sgdma_clear(struct ttm_backend *be)
74 } 74 }
75} 75}
76 76
77static inline unsigned
78nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
79{
80 struct drm_nouveau_private *dev_priv = dev->dev_private;
81 unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
82
83 if (dev_priv->card_type < NV_50)
84 return pte + 2;
85
86 return pte << 1;
87}
88
89static int 77static int
90nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) 78nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
91{ 79{
@@ -97,32 +85,17 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
97 85
98 NV_DEBUG(dev, "pg=0x%lx\n", mem->start); 86 NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
99 87
100 pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT); 88 nvbe->offset = mem->start << PAGE_SHIFT;
101 nvbe->pte_start = pte; 89 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
102 for (i = 0; i < nvbe->nr_pages; i++) { 90 for (i = 0; i < nvbe->nr_pages; i++) {
103 dma_addr_t dma_offset = nvbe->pages[i]; 91 dma_addr_t dma_offset = nvbe->pages[i];
104 uint32_t offset_l = lower_32_bits(dma_offset); 92 uint32_t offset_l = lower_32_bits(dma_offset);
105 uint32_t offset_h = upper_32_bits(dma_offset);
106
107 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
108 if (dev_priv->card_type < NV_50) {
109 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
110 pte += 1;
111 } else {
112 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
113 nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
114 pte += 2;
115 }
116 93
94 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
95 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
117 dma_offset += NV_CTXDMA_PAGE_SIZE; 96 dma_offset += NV_CTXDMA_PAGE_SIZE;
118 } 97 }
119 } 98 }
120 dev_priv->engine.instmem.flush(nvbe->dev);
121
122 if (dev_priv->card_type == NV_50) {
123 dev_priv->engine.fifo.tlb_flush(dev);
124 dev_priv->engine.graph.tlb_flush(dev);
125 }
126 99
127 nvbe->bound = true; 100 nvbe->bound = true;
128 return 0; 101 return 0;
@@ -142,24 +115,10 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
142 if (!nvbe->bound) 115 if (!nvbe->bound)
143 return 0; 116 return 0;
144 117
145 pte = nvbe->pte_start; 118 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
146 for (i = 0; i < nvbe->nr_pages; i++) { 119 for (i = 0; i < nvbe->nr_pages; i++) {
147 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) { 120 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
148 if (dev_priv->card_type < NV_50) { 121 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
149 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
150 pte += 1;
151 } else {
152 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
153 nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
154 pte += 2;
155 }
156 }
157 }
158 dev_priv->engine.instmem.flush(nvbe->dev);
159
160 if (dev_priv->card_type == NV_50) {
161 dev_priv->engine.fifo.tlb_flush(dev);
162 dev_priv->engine.graph.tlb_flush(dev);
163 } 122 }
164 123
165 nvbe->bound = false; 124 nvbe->bound = false;
@@ -182,6 +141,35 @@ nouveau_sgdma_destroy(struct ttm_backend *be)
182 } 141 }
183} 142}
184 143
144static int
145nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
146{
147 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
148 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
149
150 nvbe->offset = mem->start << PAGE_SHIFT;
151
152 nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
153 nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
154 nvbe->bound = true;
155 return 0;
156}
157
158static int
159nv50_sgdma_unbind(struct ttm_backend *be)
160{
161 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
162 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
163
164 if (!nvbe->bound)
165 return 0;
166
167 nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
168 nvbe->nr_pages << PAGE_SHIFT);
169 nvbe->bound = false;
170 return 0;
171}
172
185static struct ttm_backend_func nouveau_sgdma_backend = { 173static struct ttm_backend_func nouveau_sgdma_backend = {
186 .populate = nouveau_sgdma_populate, 174 .populate = nouveau_sgdma_populate,
187 .clear = nouveau_sgdma_clear, 175 .clear = nouveau_sgdma_clear,
@@ -190,23 +178,30 @@ static struct ttm_backend_func nouveau_sgdma_backend = {
190 .destroy = nouveau_sgdma_destroy 178 .destroy = nouveau_sgdma_destroy
191}; 179};
192 180
181static struct ttm_backend_func nv50_sgdma_backend = {
182 .populate = nouveau_sgdma_populate,
183 .clear = nouveau_sgdma_clear,
184 .bind = nv50_sgdma_bind,
185 .unbind = nv50_sgdma_unbind,
186 .destroy = nouveau_sgdma_destroy
187};
188
193struct ttm_backend * 189struct ttm_backend *
194nouveau_sgdma_init_ttm(struct drm_device *dev) 190nouveau_sgdma_init_ttm(struct drm_device *dev)
195{ 191{
196 struct drm_nouveau_private *dev_priv = dev->dev_private; 192 struct drm_nouveau_private *dev_priv = dev->dev_private;
197 struct nouveau_sgdma_be *nvbe; 193 struct nouveau_sgdma_be *nvbe;
198 194
199 if (!dev_priv->gart_info.sg_ctxdma)
200 return NULL;
201
202 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); 195 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
203 if (!nvbe) 196 if (!nvbe)
204 return NULL; 197 return NULL;
205 198
206 nvbe->dev = dev; 199 nvbe->dev = dev;
207 200
208 nvbe->backend.func = &nouveau_sgdma_backend; 201 if (dev_priv->card_type < NV_50)
209 202 nvbe->backend.func = &nouveau_sgdma_backend;
203 else
204 nvbe->backend.func = &nv50_sgdma_backend;
210 return &nvbe->backend; 205 return &nvbe->backend;
211} 206}
212 207
@@ -226,21 +221,15 @@ nouveau_sgdma_init(struct drm_device *dev)
226 221
227 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; 222 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
228 obj_size += 8; /* ctxdma header */ 223 obj_size += 8; /* ctxdma header */
229 } else {
230 /* 1 entire VM page table */
231 aper_size = (512 * 1024 * 1024);
232 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
233 }
234 224
235 ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16, 225 ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
236 NVOBJ_FLAG_ZERO_ALLOC | 226 NVOBJ_FLAG_ZERO_ALLOC |
237 NVOBJ_FLAG_ZERO_FREE, &gpuobj); 227 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
238 if (ret) { 228 if (ret) {
239 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret); 229 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
240 return ret; 230 return ret;
241 } 231 }
242 232
243 if (dev_priv->card_type < NV_50) {
244 nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | 233 nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
245 (1 << 12) /* PT present */ | 234 (1 << 12) /* PT present */ |
246 (0 << 13) /* PT *not* linear */ | 235 (0 << 13) /* PT *not* linear */ |
@@ -249,18 +238,23 @@ nouveau_sgdma_init(struct drm_device *dev)
249 nv_wo32(gpuobj, 4, aper_size - 1); 238 nv_wo32(gpuobj, 4, aper_size - 1);
250 for (i = 2; i < 2 + (aper_size >> 12); i++) 239 for (i = 2; i < 2 + (aper_size >> 12); i++)
251 nv_wo32(gpuobj, i * 4, 0x00000000); 240 nv_wo32(gpuobj, i * 4, 0x00000000);
252 } else { 241
253 for (i = 0; i < obj_size; i += 8) { 242 dev_priv->gart_info.sg_ctxdma = gpuobj;
254 nv_wo32(gpuobj, i + 0, 0x00000000); 243 dev_priv->gart_info.aper_base = 0;
255 nv_wo32(gpuobj, i + 4, 0x00000000); 244 dev_priv->gart_info.aper_size = aper_size;
256 } 245 } else
246 if (dev_priv->chan_vm) {
247 ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
248 12, NV_MEM_ACCESS_RW,
249 &dev_priv->gart_info.vma);
250 if (ret)
251 return ret;
252
253 dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
254 dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
257 } 255 }
258 dev_priv->engine.instmem.flush(dev);
259 256
260 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA; 257 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
261 dev_priv->gart_info.aper_base = 0;
262 dev_priv->gart_info.aper_size = aper_size;
263 dev_priv->gart_info.sg_ctxdma = gpuobj;
264 return 0; 258 return 0;
265} 259}
266 260
@@ -270,6 +264,7 @@ nouveau_sgdma_takedown(struct drm_device *dev)
270 struct drm_nouveau_private *dev_priv = dev->dev_private; 264 struct drm_nouveau_private *dev_priv = dev->dev_private;
271 265
272 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma); 266 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
267 nouveau_vm_put(&dev_priv->gart_info.vma);
273} 268}
274 269
275int 270int
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 08202fd682e..ec102bda844 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -131,7 +131,6 @@ nv50_instmem_init(struct drm_device *dev)
131 struct nouveau_channel *chan; 131 struct nouveau_channel *chan;
132 struct nouveau_vm *vm; 132 struct nouveau_vm *vm;
133 int ret, i; 133 int ret, i;
134 u64 nongart_o;
135 u32 tmp; 134 u32 tmp;
136 135
137 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 136 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -216,15 +215,10 @@ nv50_instmem_init(struct drm_device *dev)
216 for (i = 0; i < 8; i++) 215 for (i = 0; i < 8; i++)
217 nv_wr32(dev, 0x1900 + (i*4), 0); 216 nv_wr32(dev, 0x1900 + (i*4), 0);
218 217
219 /* Create shared channel VM, space is reserved for GART mappings at 218 /* Create shared channel VM, space is reserved at the beginning
220 * the beginning of this address space, it's managed separately 219 * to catch "NULL pointer" references
221 * because TTM makes life painful
222 */ 220 */
223 dev_priv->vm_gart_base = 0x0020000000ULL; 221 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
224 dev_priv->vm_gart_size = 512 * 1024 * 1024;
225 nongart_o = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
226
227 ret = nouveau_vm_new(dev, 0, (1ULL << 40), nongart_o,
228 29, 12, 16, &dev_priv->chan_vm); 222 29, 12, 16, &dev_priv->chan_vm);
229 if (ret) 223 if (ret)
230 return ret; 224 return ret;