aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_bo.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2011-02-09 22:41:01 -0500
committerBen Skeggs <bskeggs@redhat.com>2011-02-24 15:45:43 -0500
commitdb5c8e299a30db48a3a60dadc676cf05d19d268d (patch)
tree5b6c3d13e88f0f1a67746ce9e1a707a9e6015ce1 /drivers/gpu/drm/nouveau/nouveau_bo.c
parent6ba9a68317781537d6184d3fdb2d0f20c97da3a4 (diff)
drm/nv50-nvc0: restrict memtype to those specified at creation time
Upcoming patches are going to enable full support for buffers that keep a constant GPU virtual address whenever they're validated for use by the GPU. In order for this to work properly while keeping support for large pages, we need to know if it's ever going to be possible for a buffer to end up in GART, and if so, disable large pages for the buffer's VMA. This is a new restriction that's not present in earlier kernel's, but should not break userspace as the current code never attempts to validate buffers into a memtype other than it was created with. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 931dade12edd..cfdecd31f802 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -54,8 +54,8 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
54} 54}
55 55
56static void 56static void
57nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size, 57nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
58 int *page_shift) 58 int *align, int *size, int *page_shift)
59{ 59{
60 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 60 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
61 61
@@ -80,7 +80,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
80 } 80 }
81 } else { 81 } else {
82 if (likely(dev_priv->chan_vm)) { 82 if (likely(dev_priv->chan_vm)) {
83 if (*size > 256 * 1024) 83 if (!(flags & TTM_PL_FLAG_TT) && *size > 256 * 1024)
84 *page_shift = dev_priv->chan_vm->lpg_shift; 84 *page_shift = dev_priv->chan_vm->lpg_shift;
85 else 85 else
86 *page_shift = dev_priv->chan_vm->spg_shift; 86 *page_shift = dev_priv->chan_vm->spg_shift;
@@ -113,7 +113,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
113 nvbo->tile_flags = tile_flags; 113 nvbo->tile_flags = tile_flags;
114 nvbo->bo.bdev = &dev_priv->ttm.bdev; 114 nvbo->bo.bdev = &dev_priv->ttm.bdev;
115 115
116 nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift); 116 nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift);
117 align >>= PAGE_SHIFT; 117 align >>= PAGE_SHIFT;
118 118
119 if (dev_priv->chan_vm) { 119 if (dev_priv->chan_vm) {