diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2010-12-06 00:28:54 -0500 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2010-12-07 22:48:20 -0500 |
commit | 60d2a88ae896ae51c76f8b15c2f4b762d5b00864 (patch) | |
tree | e136aee905d35f8936036fd01af1ca29526fcc67 /drivers/gpu/drm/nouveau/nouveau_mem.c | |
parent | 34cf01bc4b8021cef62cbd79224577c13d01b106 (diff) |
drm/nouveau: kick vram functions out into an "engine"
NVC0 will be able to share some of nv50's paths this way. This also makes
it the card-specific vram code responsible for deciding if a given set
of tile_flags is valid, rather than duplicating the allowed types in
nv50_vram.c and nouveau_gem.c
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_mem.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_mem.c | 47 |
1 files changed, 26 insertions, 21 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 5a1809480388..224181193a1f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -241,7 +241,7 @@ nouveau_mem_detect_nforce(struct drm_device *dev) | |||
241 | return 0; | 241 | return 0; |
242 | } | 242 | } |
243 | 243 | ||
244 | static int | 244 | int |
245 | nouveau_mem_detect(struct drm_device *dev) | 245 | nouveau_mem_detect(struct drm_device *dev) |
246 | { | 246 | { |
247 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 247 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
@@ -255,26 +255,25 @@ nouveau_mem_detect(struct drm_device *dev) | |||
255 | if (dev_priv->card_type < NV_50) { | 255 | if (dev_priv->card_type < NV_50) { |
256 | dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); | 256 | dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); |
257 | dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK; | 257 | dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK; |
258 | } else | ||
259 | if (dev_priv->card_type < NV_C0) { | ||
260 | if (nv50_vram_init(dev)) | ||
261 | return -ENOMEM; | ||
262 | } else { | 258 | } else { |
263 | dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; | 259 | dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; |
264 | dev_priv->vram_size *= nv_rd32(dev, 0x121c74); | 260 | dev_priv->vram_size *= nv_rd32(dev, 0x121c74); |
265 | } | 261 | } |
266 | 262 | ||
267 | NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); | ||
268 | if (dev_priv->vram_sys_base) { | ||
269 | NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", | ||
270 | dev_priv->vram_sys_base); | ||
271 | } | ||
272 | |||
273 | if (dev_priv->vram_size) | 263 | if (dev_priv->vram_size) |
274 | return 0; | 264 | return 0; |
275 | return -ENOMEM; | 265 | return -ENOMEM; |
276 | } | 266 | } |
277 | 267 | ||
268 | bool | ||
269 | nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags) | ||
270 | { | ||
271 | if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)) | ||
272 | return true; | ||
273 | |||
274 | return false; | ||
275 | } | ||
276 | |||
278 | #if __OS_HAS_AGP | 277 | #if __OS_HAS_AGP |
279 | static unsigned long | 278 | static unsigned long |
280 | get_agp_mode(struct drm_device *dev, unsigned long mode) | 279 | get_agp_mode(struct drm_device *dev, unsigned long mode) |
@@ -432,11 +431,16 @@ nouveau_mem_vram_init(struct drm_device *dev) | |||
432 | else | 431 | else |
433 | dev_priv->ramin_rsvd_vram = (512 * 1024); | 432 | dev_priv->ramin_rsvd_vram = (512 * 1024); |
434 | 433 | ||
435 | /* initialise gpu-specific vram backend */ | 434 | ret = dev_priv->engine.vram.init(dev); |
436 | ret = nouveau_mem_detect(dev); | ||
437 | if (ret) | 435 | if (ret) |
438 | return ret; | 436 | return ret; |
439 | 437 | ||
438 | NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); | ||
439 | if (dev_priv->vram_sys_base) { | ||
440 | NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", | ||
441 | dev_priv->vram_sys_base); | ||
442 | } | ||
443 | |||
440 | dev_priv->fb_available_size = dev_priv->vram_size; | 444 | dev_priv->fb_available_size = dev_priv->vram_size; |
441 | dev_priv->fb_mappable_pages = dev_priv->fb_available_size; | 445 | dev_priv->fb_mappable_pages = dev_priv->fb_available_size; |
442 | if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1)) | 446 | if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1)) |
@@ -698,9 +702,10 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man, | |||
698 | struct ttm_mem_reg *mem) | 702 | struct ttm_mem_reg *mem) |
699 | { | 703 | { |
700 | struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); | 704 | struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); |
705 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | ||
701 | struct drm_device *dev = dev_priv->dev; | 706 | struct drm_device *dev = dev_priv->dev; |
702 | 707 | ||
703 | nv50_vram_del(dev, (struct nouveau_vram **)&mem->mm_node); | 708 | vram->put(dev, (struct nouveau_vram **)&mem->mm_node); |
704 | } | 709 | } |
705 | 710 | ||
706 | static int | 711 | static int |
@@ -710,30 +715,30 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, | |||
710 | struct ttm_mem_reg *mem) | 715 | struct ttm_mem_reg *mem) |
711 | { | 716 | { |
712 | struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); | 717 | struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); |
718 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | ||
713 | struct drm_device *dev = dev_priv->dev; | 719 | struct drm_device *dev = dev_priv->dev; |
714 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 720 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
715 | struct nouveau_vram *vram; | 721 | struct nouveau_vram *node; |
716 | u32 size_nc = 0; | 722 | u32 size_nc = 0; |
717 | int ret; | 723 | int ret; |
718 | 724 | ||
719 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) | 725 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) |
720 | size_nc = 1 << nvbo->vma.node->type; | 726 | size_nc = 1 << nvbo->vma.node->type; |
721 | 727 | ||
722 | ret = nv50_vram_new(dev, mem->num_pages << PAGE_SHIFT, | 728 | ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, |
723 | mem->page_alignment << PAGE_SHIFT, size_nc, | 729 | mem->page_alignment << PAGE_SHIFT, size_nc, |
724 | (nvbo->tile_flags >> 8) & 0x7f, &vram); | 730 | (nvbo->tile_flags >> 8) & 0xff, &node); |
725 | if (ret) | 731 | if (ret) |
726 | return ret; | 732 | return ret; |
727 | 733 | ||
728 | mem->mm_node = vram; | 734 | mem->mm_node = node; |
729 | mem->start = vram->offset >> PAGE_SHIFT; | 735 | mem->start = node->offset >> PAGE_SHIFT; |
730 | return 0; | 736 | return 0; |
731 | } | 737 | } |
732 | 738 | ||
733 | void | 739 | void |
734 | nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) | 740 | nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) |
735 | { | 741 | { |
736 | struct ttm_bo_global *glob = man->bdev->glob; | ||
737 | struct nouveau_mm *mm = man->priv; | 742 | struct nouveau_mm *mm = man->priv; |
738 | struct nouveau_mm_node *r; | 743 | struct nouveau_mm_node *r; |
739 | u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {}; | 744 | u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {}; |