aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_sgdma.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2011-01-10 01:24:00 -0500
committerBen Skeggs <bskeggs@redhat.com>2011-02-24 15:30:05 -0500
commitefa58db3de82ab0fdc0774aef69e2dd8a27cc98f (patch)
tree9e609cceaaccc22621739c24f72d59e0bdb5a8f8 /drivers/gpu/drm/nouveau/nouveau_sgdma.c
parente11d57ca0b6dada29007ce3ad3db6c84034a768f (diff)
drm/nouveau: move + rename some stuff in nouveau_sgdma.c
In preparation for the addition of a new nv40 pcie backend. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_sgdma.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c77
1 files changed, 38 insertions, 39 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 07b115184b87..a6002f456899 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -89,8 +89,24 @@ nouveau_sgdma_clear(struct ttm_backend *be)
89 } 89 }
90} 90}
91 91
92static void
93nouveau_sgdma_destroy(struct ttm_backend *be)
94{
95 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
96
97 if (be) {
98 NV_DEBUG(nvbe->dev, "\n");
99
100 if (nvbe) {
101 if (nvbe->pages)
102 be->func->clear(be);
103 kfree(nvbe);
104 }
105 }
106}
107
92static int 108static int
93nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) 109nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
94{ 110{
95 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 111 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
96 struct drm_device *dev = nvbe->dev; 112 struct drm_device *dev = nvbe->dev;
@@ -117,7 +133,7 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
117} 133}
118 134
119static int 135static int
120nouveau_sgdma_unbind(struct ttm_backend *be) 136nv04_sgdma_unbind(struct ttm_backend *be)
121{ 137{
122 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 138 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
123 struct drm_device *dev = nvbe->dev; 139 struct drm_device *dev = nvbe->dev;
@@ -140,21 +156,13 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
140 return 0; 156 return 0;
141} 157}
142 158
143static void 159static struct ttm_backend_func nv04_sgdma_backend = {
144nouveau_sgdma_destroy(struct ttm_backend *be) 160 .populate = nouveau_sgdma_populate,
145{ 161 .clear = nouveau_sgdma_clear,
146 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 162 .bind = nv04_sgdma_bind,
147 163 .unbind = nv04_sgdma_unbind,
148 if (be) { 164 .destroy = nouveau_sgdma_destroy
149 NV_DEBUG(nvbe->dev, "\n"); 165};
150
151 if (nvbe) {
152 if (nvbe->pages)
153 be->func->clear(be);
154 kfree(nvbe);
155 }
156 }
157}
158 166
159static int 167static int
160nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) 168nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
@@ -185,14 +193,6 @@ nv50_sgdma_unbind(struct ttm_backend *be)
185 return 0; 193 return 0;
186} 194}
187 195
188static struct ttm_backend_func nouveau_sgdma_backend = {
189 .populate = nouveau_sgdma_populate,
190 .clear = nouveau_sgdma_clear,
191 .bind = nouveau_sgdma_bind,
192 .unbind = nouveau_sgdma_unbind,
193 .destroy = nouveau_sgdma_destroy
194};
195
196static struct ttm_backend_func nv50_sgdma_backend = { 196static struct ttm_backend_func nv50_sgdma_backend = {
197 .populate = nouveau_sgdma_populate, 197 .populate = nouveau_sgdma_populate,
198 .clear = nouveau_sgdma_clear, 198 .clear = nouveau_sgdma_clear,
@@ -213,10 +213,10 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
213 213
214 nvbe->dev = dev; 214 nvbe->dev = dev;
215 215
216 if (dev_priv->card_type < NV_50) 216 if (dev_priv->card_type >= NV_50)
217 nvbe->backend.func = &nouveau_sgdma_backend;
218 else
219 nvbe->backend.func = &nv50_sgdma_backend; 217 nvbe->backend.func = &nv50_sgdma_backend;
218 else
219 nvbe->backend.func = &nv04_sgdma_backend;
220 return &nvbe->backend; 220 return &nvbe->backend;
221} 221}
222 222
@@ -228,7 +228,16 @@ nouveau_sgdma_init(struct drm_device *dev)
228 uint32_t aper_size, obj_size; 228 uint32_t aper_size, obj_size;
229 int i, ret; 229 int i, ret;
230 230
231 if (dev_priv->card_type < NV_50) { 231 if (dev_priv->card_type >= NV_50) {
232 ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
233 12, NV_MEM_ACCESS_RW,
234 &dev_priv->gart_info.vma);
235 if (ret)
236 return ret;
237
238 dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
239 dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
240 } else {
232 if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024) 241 if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024)
233 aper_size = 64 * 1024 * 1024; 242 aper_size = 64 * 1024 * 1024;
234 else 243 else
@@ -257,16 +266,6 @@ nouveau_sgdma_init(struct drm_device *dev)
257 dev_priv->gart_info.sg_ctxdma = gpuobj; 266 dev_priv->gart_info.sg_ctxdma = gpuobj;
258 dev_priv->gart_info.aper_base = 0; 267 dev_priv->gart_info.aper_base = 0;
259 dev_priv->gart_info.aper_size = aper_size; 268 dev_priv->gart_info.aper_size = aper_size;
260 } else
261 if (dev_priv->chan_vm) {
262 ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
263 12, NV_MEM_ACCESS_RW,
264 &dev_priv->gart_info.vma);
265 if (ret)
266 return ret;
267
268 dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
269 dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
270 } 269 }
271 270
272 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA; 271 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;