aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_gem.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2011-02-09 22:41:01 -0500
committerBen Skeggs <bskeggs@redhat.com>2011-02-24 15:45:43 -0500
commitdb5c8e299a30db48a3a60dadc676cf05d19d268d (patch)
tree5b6c3d13e88f0f1a67746ce9e1a707a9e6015ce1 /drivers/gpu/drm/nouveau/nouveau_gem.c
parent6ba9a68317781537d6184d3fdb2d0f20c97da3a4 (diff)
drm/nv50-nvc0: restrict memtype to those specified at creation time
Upcoming patches are going to enable full support for buffers that keep a constant GPU virtual address whenever they're validated for use by the GPU. In order for this to work properly while keeping support for large pages, we need to know if it's ever going to be possible for a buffer to end up in GART, and if so, disable large pages for the buffer's VMA. This is a new restriction that's not present in earlier kernel's, but should not break userspace as the current code never attempts to validate buffers into a memtype other than it was created with. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_gem.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c12
1 files changed, 11 insertions, 1 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index cd4ed9e86704..3ce58d2222cb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -64,6 +64,7 @@ nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
64 int size, int align, uint32_t domain, uint32_t tile_mode, 64 int size, int align, uint32_t domain, uint32_t tile_mode,
65 uint32_t tile_flags, struct nouveau_bo **pnvbo) 65 uint32_t tile_flags, struct nouveau_bo **pnvbo)
66{ 66{
67 struct drm_nouveau_private *dev_priv = dev->dev_private;
67 struct nouveau_bo *nvbo; 68 struct nouveau_bo *nvbo;
68 u32 flags = 0; 69 u32 flags = 0;
69 int ret; 70 int ret;
@@ -81,6 +82,15 @@ nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
81 return ret; 82 return ret;
82 nvbo = *pnvbo; 83 nvbo = *pnvbo;
83 84
85 /* we restrict allowed domains on nv50+ to only the types
86 * that were requested at creation time. not possibly on
87 * earlier chips without busting the ABI.
88 */
89 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
90 NOUVEAU_GEM_DOMAIN_GART;
91 if (dev_priv->card_type >= NV_50)
92 nvbo->valid_domains &= domain;
93
84 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); 94 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
85 if (!nvbo->gem) { 95 if (!nvbo->gem) {
86 nouveau_bo_ref(NULL, pnvbo); 96 nouveau_bo_ref(NULL, pnvbo);
@@ -159,7 +169,7 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
159{ 169{
160 struct nouveau_bo *nvbo = gem->driver_private; 170 struct nouveau_bo *nvbo = gem->driver_private;
161 struct ttm_buffer_object *bo = &nvbo->bo; 171 struct ttm_buffer_object *bo = &nvbo->bo;
162 uint32_t domains = valid_domains & 172 uint32_t domains = valid_domains & nvbo->valid_domains &
163 (write_domains ? write_domains : read_domains); 173 (write_domains ? write_domains : read_domains);
164 uint32_t pref_flags = 0, valid_flags = 0; 174 uint32_t pref_flags = 0, valid_flags = 0;
165 175