aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_bo.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2014-08-09 14:10:22 -0400
committerBen Skeggs <bskeggs@redhat.com>2014-08-09 15:13:13 -0400
commit967e7bde8739fe3b215f7537e8f1f39c044902af (patch)
tree5d96683b3b139da0b681ae038a8cd5f25ca32ef8 /drivers/gpu/drm/nouveau/nouveau_bo.c
parentdb2bec187dd68e79d512112df1f6e7a849e7f0ce (diff)
drm/nouveau: initial pass at moving to struct nvif_device
This is an attempt at isolating some of the changes necessary to port to NVIF in a separate commit. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c57
1 files changed, 28 insertions, 29 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index cfece9d603d9..9c9291b3bfb5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -52,7 +52,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
52{ 52{
53 struct nouveau_drm *drm = nouveau_drm(dev); 53 struct nouveau_drm *drm = nouveau_drm(dev);
54 int i = reg - drm->tile.reg; 54 int i = reg - drm->tile.reg;
55 struct nouveau_fb *pfb = nouveau_fb(drm->device); 55 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
56 struct nouveau_fb_tile *tile = &pfb->tile.region[i]; 56 struct nouveau_fb_tile *tile = &pfb->tile.region[i];
57 struct nouveau_engine *engine; 57 struct nouveau_engine *engine;
58 58
@@ -109,7 +109,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
109 u32 size, u32 pitch, u32 flags) 109 u32 size, u32 pitch, u32 flags)
110{ 110{
111 struct nouveau_drm *drm = nouveau_drm(dev); 111 struct nouveau_drm *drm = nouveau_drm(dev);
112 struct nouveau_fb *pfb = nouveau_fb(drm->device); 112 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
113 struct nouveau_drm_tile *tile, *found = NULL; 113 struct nouveau_drm_tile *tile, *found = NULL;
114 int i; 114 int i;
115 115
@@ -153,23 +153,23 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
153 int *align, int *size) 153 int *align, int *size)
154{ 154{
155 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 155 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
156 struct nouveau_device *device = nv_device(drm->device); 156 struct nvif_device *device = &drm->device;
157 157
158 if (device->card_type < NV_50) { 158 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
159 if (nvbo->tile_mode) { 159 if (nvbo->tile_mode) {
160 if (device->chipset >= 0x40) { 160 if (device->info.chipset >= 0x40) {
161 *align = 65536; 161 *align = 65536;
162 *size = roundup(*size, 64 * nvbo->tile_mode); 162 *size = roundup(*size, 64 * nvbo->tile_mode);
163 163
164 } else if (device->chipset >= 0x30) { 164 } else if (device->info.chipset >= 0x30) {
165 *align = 32768; 165 *align = 32768;
166 *size = roundup(*size, 64 * nvbo->tile_mode); 166 *size = roundup(*size, 64 * nvbo->tile_mode);
167 167
168 } else if (device->chipset >= 0x20) { 168 } else if (device->info.chipset >= 0x20) {
169 *align = 16384; 169 *align = 16384;
170 *size = roundup(*size, 64 * nvbo->tile_mode); 170 *size = roundup(*size, 64 * nvbo->tile_mode);
171 171
172 } else if (device->chipset >= 0x10) { 172 } else if (device->info.chipset >= 0x10) {
173 *align = 16384; 173 *align = 16384;
174 *size = roundup(*size, 32 * nvbo->tile_mode); 174 *size = roundup(*size, 32 * nvbo->tile_mode);
175 } 175 }
@@ -261,11 +261,10 @@ static void
261set_placement_range(struct nouveau_bo *nvbo, uint32_t type) 261set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
262{ 262{
263 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 263 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
264 struct nouveau_fb *pfb = nouveau_fb(drm->device); 264 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
265 u32 vram_pages = pfb->ram->size >> PAGE_SHIFT; 265 u32 vram_pages = pfb->ram->size >> PAGE_SHIFT;
266 266
267 if ((nv_device(drm->device)->card_type == NV_10 || 267 if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
268 nv_device(drm->device)->card_type == NV_11) &&
269 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && 268 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
270 nvbo->bo.mem.num_pages < vram_pages / 4) { 269 nvbo->bo.mem.num_pages < vram_pages / 4) {
271 /* 270 /*
@@ -506,9 +505,9 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
506 TTM_PL_FLAG_WC; 505 TTM_PL_FLAG_WC;
507 man->default_caching = TTM_PL_FLAG_WC; 506 man->default_caching = TTM_PL_FLAG_WC;
508 507
509 if (nv_device(drm->device)->card_type >= NV_50) { 508 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
510 /* Some BARs do not support being ioremapped WC */ 509 /* Some BARs do not support being ioremapped WC */
511 if (nouveau_bar(drm->device)->iomap_uncached) { 510 if (nvkm_bar(&drm->device)->iomap_uncached) {
512 man->available_caching = TTM_PL_FLAG_UNCACHED; 511 man->available_caching = TTM_PL_FLAG_UNCACHED;
513 man->default_caching = TTM_PL_FLAG_UNCACHED; 512 man->default_caching = TTM_PL_FLAG_UNCACHED;
514 } 513 }
@@ -521,7 +520,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
521 } 520 }
522 break; 521 break;
523 case TTM_PL_TT: 522 case TTM_PL_TT:
524 if (nv_device(drm->device)->card_type >= NV_50) 523 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
525 man->func = &nouveau_gart_manager; 524 man->func = &nouveau_gart_manager;
526 else 525 else
527 if (drm->agp.stat != ENABLED) 526 if (drm->agp.stat != ENABLED)
@@ -959,7 +958,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
959 * old nouveau_mem node, these will get cleaned up after ttm has 958 * old nouveau_mem node, these will get cleaned up after ttm has
960 * destroyed the ttm_mem_reg 959 * destroyed the ttm_mem_reg
961 */ 960 */
962 if (nv_device(drm->device)->card_type >= NV_50) { 961 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
963 ret = nouveau_bo_move_prep(drm, bo, new_mem); 962 ret = nouveau_bo_move_prep(drm, bo, new_mem);
964 if (ret) 963 if (ret)
965 return ret; 964 return ret;
@@ -1142,7 +1141,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1142 if (new_mem->mem_type != TTM_PL_VRAM) 1141 if (new_mem->mem_type != TTM_PL_VRAM)
1143 return 0; 1142 return 0;
1144 1143
1145 if (nv_device(drm->device)->card_type >= NV_10) { 1144 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
1146 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size, 1145 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
1147 nvbo->tile_mode, 1146 nvbo->tile_mode,
1148 nvbo->tile_flags); 1147 nvbo->tile_flags);
@@ -1173,7 +1172,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1173 struct nouveau_drm_tile *new_tile = NULL; 1172 struct nouveau_drm_tile *new_tile = NULL;
1174 int ret = 0; 1173 int ret = 0;
1175 1174
1176 if (nv_device(drm->device)->card_type < NV_50) { 1175 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1177 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); 1176 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1178 if (ret) 1177 if (ret)
1179 return ret; 1178 return ret;
@@ -1210,7 +1209,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1210 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 1209 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1211 1210
1212out: 1211out:
1213 if (nv_device(drm->device)->card_type < NV_50) { 1212 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1214 if (ret) 1213 if (ret)
1215 nouveau_bo_vm_cleanup(bo, NULL, &new_tile); 1214 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1216 else 1215 else
@@ -1256,16 +1255,16 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1256 mem->bus.is_iomem = !dev->agp->cant_use_aperture; 1255 mem->bus.is_iomem = !dev->agp->cant_use_aperture;
1257 } 1256 }
1258#endif 1257#endif
1259 if (nv_device(drm->device)->card_type < NV_50 || !node->memtype) 1258 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
1260 /* untiled */ 1259 /* untiled */
1261 break; 1260 break;
1262 /* fallthrough, tiled memory */ 1261 /* fallthrough, tiled memory */
1263 case TTM_PL_VRAM: 1262 case TTM_PL_VRAM:
1264 mem->bus.offset = mem->start << PAGE_SHIFT; 1263 mem->bus.offset = mem->start << PAGE_SHIFT;
1265 mem->bus.base = nv_device_resource_start(nv_device(drm->device), 1); 1264 mem->bus.base = nv_device_resource_start(nvkm_device(&drm->device), 1);
1266 mem->bus.is_iomem = true; 1265 mem->bus.is_iomem = true;
1267 if (nv_device(drm->device)->card_type >= NV_50) { 1266 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1268 struct nouveau_bar *bar = nouveau_bar(drm->device); 1267 struct nouveau_bar *bar = nvkm_bar(&drm->device);
1269 1268
1270 ret = bar->umap(bar, node, NV_MEM_ACCESS_RW, 1269 ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
1271 &node->bar_vma); 1270 &node->bar_vma);
@@ -1285,7 +1284,7 @@ static void
1285nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1284nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1286{ 1285{
1287 struct nouveau_drm *drm = nouveau_bdev(bdev); 1286 struct nouveau_drm *drm = nouveau_bdev(bdev);
1288 struct nouveau_bar *bar = nouveau_bar(drm->device); 1287 struct nouveau_bar *bar = nvkm_bar(&drm->device);
1289 struct nouveau_mem *node = mem->mm_node; 1288 struct nouveau_mem *node = mem->mm_node;
1290 1289
1291 if (!node->bar_vma.node) 1290 if (!node->bar_vma.node)
@@ -1299,15 +1298,15 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1299{ 1298{
1300 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1299 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1301 struct nouveau_bo *nvbo = nouveau_bo(bo); 1300 struct nouveau_bo *nvbo = nouveau_bo(bo);
1302 struct nouveau_device *device = nv_device(drm->device); 1301 struct nvif_device *device = &drm->device;
1303 u32 mappable = nv_device_resource_len(device, 1) >> PAGE_SHIFT; 1302 u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT;
1304 int ret; 1303 int ret;
1305 1304
1306 /* as long as the bo isn't in vram, and isn't tiled, we've got 1305 /* as long as the bo isn't in vram, and isn't tiled, we've got
1307 * nothing to do here. 1306 * nothing to do here.
1308 */ 1307 */
1309 if (bo->mem.mem_type != TTM_PL_VRAM) { 1308 if (bo->mem.mem_type != TTM_PL_VRAM) {
1310 if (nv_device(drm->device)->card_type < NV_50 || 1309 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA ||
1311 !nouveau_bo_tile_layout(nvbo)) 1310 !nouveau_bo_tile_layout(nvbo))
1312 return 0; 1311 return 0;
1313 1312
@@ -1322,7 +1321,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1322 } 1321 }
1323 1322
1324 /* make sure bo is in mappable vram */ 1323 /* make sure bo is in mappable vram */
1325 if (nv_device(drm->device)->card_type >= NV_50 || 1324 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
1326 bo->mem.start + bo->mem.num_pages < mappable) 1325 bo->mem.start + bo->mem.num_pages < mappable)
1327 return 0; 1326 return 0;
1328 1327
@@ -1357,7 +1356,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1357 } 1356 }
1358 1357
1359 drm = nouveau_bdev(ttm->bdev); 1358 drm = nouveau_bdev(ttm->bdev);
1360 device = nv_device(drm->device); 1359 device = nvkm_device(&drm->device);
1361 dev = drm->dev; 1360 dev = drm->dev;
1362 pdev = nv_device_base(device); 1361 pdev = nv_device_base(device);
1363 1362
@@ -1414,7 +1413,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1414 return; 1413 return;
1415 1414
1416 drm = nouveau_bdev(ttm->bdev); 1415 drm = nouveau_bdev(ttm->bdev);
1417 device = nv_device(drm->device); 1416 device = nvkm_device(&drm->device);
1418 dev = drm->dev; 1417 dev = drm->dev;
1419 pdev = nv_device_base(device); 1418 pdev = nv_device_base(device);
1420 1419