diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_sgdma.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_sgdma.c | 188 |
1 files changed, 85 insertions, 103 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index b75258a9fe44..bc2ab900b24c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
@@ -8,44 +8,23 @@ | |||
8 | #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) | 8 | #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) |
9 | 9 | ||
10 | struct nouveau_sgdma_be { | 10 | struct nouveau_sgdma_be { |
11 | struct ttm_backend backend; | 11 | struct ttm_tt ttm; |
12 | struct drm_device *dev; | 12 | struct drm_device *dev; |
13 | |||
14 | dma_addr_t *pages; | ||
15 | unsigned nr_pages; | ||
16 | bool unmap_pages; | ||
17 | |||
18 | u64 offset; | 13 | u64 offset; |
19 | bool bound; | ||
20 | }; | 14 | }; |
21 | 15 | ||
22 | static int | 16 | static int |
23 | nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, | 17 | nouveau_sgdma_dma_map(struct ttm_tt *ttm) |
24 | struct page **pages, struct page *dummy_read_page, | ||
25 | dma_addr_t *dma_addrs) | ||
26 | { | 18 | { |
27 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | 19 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
28 | struct drm_device *dev = nvbe->dev; | 20 | struct drm_device *dev = nvbe->dev; |
29 | int i; | 21 | int i; |
30 | 22 | ||
31 | NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages); | 23 | for (i = 0; i < ttm->num_pages; i++) { |
32 | 24 | ttm->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i], | |
33 | nvbe->pages = dma_addrs; | 25 | 0, PAGE_SIZE, |
34 | nvbe->nr_pages = num_pages; | 26 | PCI_DMA_BIDIRECTIONAL); |
35 | nvbe->unmap_pages = true; | 27 | if (pci_dma_mapping_error(dev->pdev, ttm->dma_address[i])) { |
36 | |||
37 | /* this code path isn't called and is incorrect anyways */ | ||
38 | if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */ | ||
39 | nvbe->unmap_pages = false; | ||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | for (i = 0; i < num_pages; i++) { | ||
44 | nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0, | ||
45 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
46 | if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) { | ||
47 | nvbe->nr_pages = --i; | ||
48 | be->func->clear(be); | ||
49 | return -EFAULT; | 28 | return -EFAULT; |
50 | } | 29 | } |
51 | } | 30 | } |
@@ -54,53 +33,52 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, | |||
54 | } | 33 | } |
55 | 34 | ||
56 | static void | 35 | static void |
57 | nouveau_sgdma_clear(struct ttm_backend *be) | 36 | nouveau_sgdma_dma_unmap(struct ttm_tt *ttm) |
58 | { | 37 | { |
59 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | 38 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
60 | struct drm_device *dev = nvbe->dev; | 39 | struct drm_device *dev = nvbe->dev; |
40 | int i; | ||
61 | 41 | ||
62 | if (nvbe->bound) | 42 | for (i = 0; i < ttm->num_pages; i++) { |
63 | be->func->unbind(be); | 43 | if (ttm->dma_address[i]) { |
64 | 44 | pci_unmap_page(dev->pdev, ttm->dma_address[i], | |
65 | if (nvbe->unmap_pages) { | ||
66 | while (nvbe->nr_pages--) { | ||
67 | pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], | ||
68 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 45 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
69 | } | 46 | } |
47 | ttm->dma_address[i] = 0; | ||
70 | } | 48 | } |
71 | } | 49 | } |
72 | 50 | ||
73 | static void | 51 | static void |
74 | nouveau_sgdma_destroy(struct ttm_backend *be) | 52 | nouveau_sgdma_destroy(struct ttm_tt *ttm) |
75 | { | 53 | { |
76 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | 54 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
77 | 55 | ||
78 | if (be) { | 56 | if (ttm) { |
79 | NV_DEBUG(nvbe->dev, "\n"); | 57 | NV_DEBUG(nvbe->dev, "\n"); |
80 | 58 | kfree(nvbe); | |
81 | if (nvbe) { | ||
82 | if (nvbe->pages) | ||
83 | be->func->clear(be); | ||
84 | kfree(nvbe); | ||
85 | } | ||
86 | } | 59 | } |
87 | } | 60 | } |
88 | 61 | ||
89 | static int | 62 | static int |
90 | nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) | 63 | nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) |
91 | { | 64 | { |
92 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | 65 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
93 | struct drm_device *dev = nvbe->dev; | 66 | struct drm_device *dev = nvbe->dev; |
94 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 67 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
95 | struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; | 68 | struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; |
96 | unsigned i, j, pte; | 69 | unsigned i, j, pte; |
70 | int r; | ||
97 | 71 | ||
98 | NV_DEBUG(dev, "pg=0x%lx\n", mem->start); | 72 | NV_DEBUG(dev, "pg=0x%lx\n", mem->start); |
73 | r = nouveau_sgdma_dma_map(ttm); | ||
74 | if (r) { | ||
75 | return r; | ||
76 | } | ||
99 | 77 | ||
100 | nvbe->offset = mem->start << PAGE_SHIFT; | 78 | nvbe->offset = mem->start << PAGE_SHIFT; |
101 | pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; | 79 | pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; |
102 | for (i = 0; i < nvbe->nr_pages; i++) { | 80 | for (i = 0; i < ttm->num_pages; i++) { |
103 | dma_addr_t dma_offset = nvbe->pages[i]; | 81 | dma_addr_t dma_offset = ttm->dma_address[i]; |
104 | uint32_t offset_l = lower_32_bits(dma_offset); | 82 | uint32_t offset_l = lower_32_bits(dma_offset); |
105 | 83 | ||
106 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { | 84 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { |
@@ -109,14 +87,13 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) | |||
109 | } | 87 | } |
110 | } | 88 | } |
111 | 89 | ||
112 | nvbe->bound = true; | ||
113 | return 0; | 90 | return 0; |
114 | } | 91 | } |
115 | 92 | ||
116 | static int | 93 | static int |
117 | nv04_sgdma_unbind(struct ttm_backend *be) | 94 | nv04_sgdma_unbind(struct ttm_tt *ttm) |
118 | { | 95 | { |
119 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | 96 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
120 | struct drm_device *dev = nvbe->dev; | 97 | struct drm_device *dev = nvbe->dev; |
121 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 98 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
122 | struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; | 99 | struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; |
@@ -124,22 +101,20 @@ nv04_sgdma_unbind(struct ttm_backend *be) | |||
124 | 101 | ||
125 | NV_DEBUG(dev, "\n"); | 102 | NV_DEBUG(dev, "\n"); |
126 | 103 | ||
127 | if (!nvbe->bound) | 104 | if (ttm->state != tt_bound) |
128 | return 0; | 105 | return 0; |
129 | 106 | ||
130 | pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; | 107 | pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; |
131 | for (i = 0; i < nvbe->nr_pages; i++) { | 108 | for (i = 0; i < ttm->num_pages; i++) { |
132 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) | 109 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) |
133 | nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); | 110 | nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); |
134 | } | 111 | } |
135 | 112 | ||
136 | nvbe->bound = false; | 113 | nouveau_sgdma_dma_unmap(ttm); |
137 | return 0; | 114 | return 0; |
138 | } | 115 | } |
139 | 116 | ||
140 | static struct ttm_backend_func nv04_sgdma_backend = { | 117 | static struct ttm_backend_func nv04_sgdma_backend = { |
141 | .populate = nouveau_sgdma_populate, | ||
142 | .clear = nouveau_sgdma_clear, | ||
143 | .bind = nv04_sgdma_bind, | 118 | .bind = nv04_sgdma_bind, |
144 | .unbind = nv04_sgdma_unbind, | 119 | .unbind = nv04_sgdma_unbind, |
145 | .destroy = nouveau_sgdma_destroy | 120 | .destroy = nouveau_sgdma_destroy |
@@ -158,16 +133,21 @@ nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe) | |||
158 | } | 133 | } |
159 | 134 | ||
160 | static int | 135 | static int |
161 | nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) | 136 | nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) |
162 | { | 137 | { |
163 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | 138 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
164 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; | 139 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; |
165 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; | 140 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; |
166 | dma_addr_t *list = nvbe->pages; | 141 | dma_addr_t *list = ttm->dma_address; |
167 | u32 pte = mem->start << 2; | 142 | u32 pte = mem->start << 2; |
168 | u32 cnt = nvbe->nr_pages; | 143 | u32 cnt = ttm->num_pages; |
144 | int r; | ||
169 | 145 | ||
170 | nvbe->offset = mem->start << PAGE_SHIFT; | 146 | nvbe->offset = mem->start << PAGE_SHIFT; |
147 | r = nouveau_sgdma_dma_map(ttm); | ||
148 | if (r) { | ||
149 | return r; | ||
150 | } | ||
171 | 151 | ||
172 | while (cnt--) { | 152 | while (cnt--) { |
173 | nv_wo32(pgt, pte, (*list++ >> 7) | 1); | 153 | nv_wo32(pgt, pte, (*list++ >> 7) | 1); |
@@ -175,18 +155,17 @@ nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) | |||
175 | } | 155 | } |
176 | 156 | ||
177 | nv41_sgdma_flush(nvbe); | 157 | nv41_sgdma_flush(nvbe); |
178 | nvbe->bound = true; | ||
179 | return 0; | 158 | return 0; |
180 | } | 159 | } |
181 | 160 | ||
182 | static int | 161 | static int |
183 | nv41_sgdma_unbind(struct ttm_backend *be) | 162 | nv41_sgdma_unbind(struct ttm_tt *ttm) |
184 | { | 163 | { |
185 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | 164 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
186 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; | 165 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; |
187 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; | 166 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; |
188 | u32 pte = (nvbe->offset >> 12) << 2; | 167 | u32 pte = (nvbe->offset >> 12) << 2; |
189 | u32 cnt = nvbe->nr_pages; | 168 | u32 cnt = ttm->num_pages; |
190 | 169 | ||
191 | while (cnt--) { | 170 | while (cnt--) { |
192 | nv_wo32(pgt, pte, 0x00000000); | 171 | nv_wo32(pgt, pte, 0x00000000); |
@@ -194,24 +173,23 @@ nv41_sgdma_unbind(struct ttm_backend *be) | |||
194 | } | 173 | } |
195 | 174 | ||
196 | nv41_sgdma_flush(nvbe); | 175 | nv41_sgdma_flush(nvbe); |
197 | nvbe->bound = false; | 176 | nouveau_sgdma_dma_unmap(ttm); |
198 | return 0; | 177 | return 0; |
199 | } | 178 | } |
200 | 179 | ||
201 | static struct ttm_backend_func nv41_sgdma_backend = { | 180 | static struct ttm_backend_func nv41_sgdma_backend = { |
202 | .populate = nouveau_sgdma_populate, | ||
203 | .clear = nouveau_sgdma_clear, | ||
204 | .bind = nv41_sgdma_bind, | 181 | .bind = nv41_sgdma_bind, |
205 | .unbind = nv41_sgdma_unbind, | 182 | .unbind = nv41_sgdma_unbind, |
206 | .destroy = nouveau_sgdma_destroy | 183 | .destroy = nouveau_sgdma_destroy |
207 | }; | 184 | }; |
208 | 185 | ||
209 | static void | 186 | static void |
210 | nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe) | 187 | nv44_sgdma_flush(struct ttm_tt *ttm) |
211 | { | 188 | { |
189 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; | ||
212 | struct drm_device *dev = nvbe->dev; | 190 | struct drm_device *dev = nvbe->dev; |
213 | 191 | ||
214 | nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12); | 192 | nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12); |
215 | nv_wr32(dev, 0x100808, nvbe->offset | 0x20); | 193 | nv_wr32(dev, 0x100808, nvbe->offset | 0x20); |
216 | if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001)) | 194 | if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001)) |
217 | NV_ERROR(dev, "gart flush timeout: 0x%08x\n", | 195 | NV_ERROR(dev, "gart flush timeout: 0x%08x\n", |
@@ -270,17 +248,21 @@ nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt) | |||
270 | } | 248 | } |
271 | 249 | ||
272 | static int | 250 | static int |
273 | nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) | 251 | nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) |
274 | { | 252 | { |
275 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | 253 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
276 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; | 254 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; |
277 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; | 255 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; |
278 | dma_addr_t *list = nvbe->pages; | 256 | dma_addr_t *list = ttm->dma_address; |
279 | u32 pte = mem->start << 2, tmp[4]; | 257 | u32 pte = mem->start << 2, tmp[4]; |
280 | u32 cnt = nvbe->nr_pages; | 258 | u32 cnt = ttm->num_pages; |
281 | int i; | 259 | int i, r; |
282 | 260 | ||
283 | nvbe->offset = mem->start << PAGE_SHIFT; | 261 | nvbe->offset = mem->start << PAGE_SHIFT; |
262 | r = nouveau_sgdma_dma_map(ttm); | ||
263 | if (r) { | ||
264 | return r; | ||
265 | } | ||
284 | 266 | ||
285 | if (pte & 0x0000000c) { | 267 | if (pte & 0x0000000c) { |
286 | u32 max = 4 - ((pte >> 2) & 0x3); | 268 | u32 max = 4 - ((pte >> 2) & 0x3); |
@@ -305,19 +287,18 @@ nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) | |||
305 | if (cnt) | 287 | if (cnt) |
306 | nv44_sgdma_fill(pgt, list, pte, cnt); | 288 | nv44_sgdma_fill(pgt, list, pte, cnt); |
307 | 289 | ||
308 | nv44_sgdma_flush(nvbe); | 290 | nv44_sgdma_flush(ttm); |
309 | nvbe->bound = true; | ||
310 | return 0; | 291 | return 0; |
311 | } | 292 | } |
312 | 293 | ||
313 | static int | 294 | static int |
314 | nv44_sgdma_unbind(struct ttm_backend *be) | 295 | nv44_sgdma_unbind(struct ttm_tt *ttm) |
315 | { | 296 | { |
316 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | 297 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
317 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; | 298 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; |
318 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; | 299 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; |
319 | u32 pte = (nvbe->offset >> 12) << 2; | 300 | u32 pte = (nvbe->offset >> 12) << 2; |
320 | u32 cnt = nvbe->nr_pages; | 301 | u32 cnt = ttm->num_pages; |
321 | 302 | ||
322 | if (pte & 0x0000000c) { | 303 | if (pte & 0x0000000c) { |
323 | u32 max = 4 - ((pte >> 2) & 0x3); | 304 | u32 max = 4 - ((pte >> 2) & 0x3); |
@@ -339,55 +320,53 @@ nv44_sgdma_unbind(struct ttm_backend *be) | |||
339 | if (cnt) | 320 | if (cnt) |
340 | nv44_sgdma_fill(pgt, NULL, pte, cnt); | 321 | nv44_sgdma_fill(pgt, NULL, pte, cnt); |
341 | 322 | ||
342 | nv44_sgdma_flush(nvbe); | 323 | nv44_sgdma_flush(ttm); |
343 | nvbe->bound = false; | 324 | nouveau_sgdma_dma_unmap(ttm); |
344 | return 0; | 325 | return 0; |
345 | } | 326 | } |
346 | 327 | ||
347 | static struct ttm_backend_func nv44_sgdma_backend = { | 328 | static struct ttm_backend_func nv44_sgdma_backend = { |
348 | .populate = nouveau_sgdma_populate, | ||
349 | .clear = nouveau_sgdma_clear, | ||
350 | .bind = nv44_sgdma_bind, | 329 | .bind = nv44_sgdma_bind, |
351 | .unbind = nv44_sgdma_unbind, | 330 | .unbind = nv44_sgdma_unbind, |
352 | .destroy = nouveau_sgdma_destroy | 331 | .destroy = nouveau_sgdma_destroy |
353 | }; | 332 | }; |
354 | 333 | ||
355 | static int | 334 | static int |
356 | nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) | 335 | nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) |
357 | { | 336 | { |
358 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | ||
359 | struct nouveau_mem *node = mem->mm_node; | 337 | struct nouveau_mem *node = mem->mm_node; |
338 | int r; | ||
339 | |||
360 | /* noop: bound in move_notify() */ | 340 | /* noop: bound in move_notify() */ |
361 | node->pages = nvbe->pages; | 341 | r = nouveau_sgdma_dma_map(ttm); |
362 | nvbe->pages = (dma_addr_t *)node; | 342 | if (r) { |
363 | nvbe->bound = true; | 343 | return r; |
344 | } | ||
345 | node->pages = ttm->dma_address; | ||
364 | return 0; | 346 | return 0; |
365 | } | 347 | } |
366 | 348 | ||
367 | static int | 349 | static int |
368 | nv50_sgdma_unbind(struct ttm_backend *be) | 350 | nv50_sgdma_unbind(struct ttm_tt *ttm) |
369 | { | 351 | { |
370 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | ||
371 | struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages; | ||
372 | /* noop: unbound in move_notify() */ | 352 | /* noop: unbound in move_notify() */ |
373 | nvbe->pages = node->pages; | 353 | nouveau_sgdma_dma_unmap(ttm); |
374 | node->pages = NULL; | ||
375 | nvbe->bound = false; | ||
376 | return 0; | 354 | return 0; |
377 | } | 355 | } |
378 | 356 | ||
379 | static struct ttm_backend_func nv50_sgdma_backend = { | 357 | static struct ttm_backend_func nv50_sgdma_backend = { |
380 | .populate = nouveau_sgdma_populate, | ||
381 | .clear = nouveau_sgdma_clear, | ||
382 | .bind = nv50_sgdma_bind, | 358 | .bind = nv50_sgdma_bind, |
383 | .unbind = nv50_sgdma_unbind, | 359 | .unbind = nv50_sgdma_unbind, |
384 | .destroy = nouveau_sgdma_destroy | 360 | .destroy = nouveau_sgdma_destroy |
385 | }; | 361 | }; |
386 | 362 | ||
387 | struct ttm_backend * | 363 | struct ttm_tt * |
388 | nouveau_sgdma_init_ttm(struct drm_device *dev) | 364 | nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev, |
365 | unsigned long size, uint32_t page_flags, | ||
366 | struct page *dummy_read_page) | ||
389 | { | 367 | { |
390 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 368 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); |
369 | struct drm_device *dev = dev_priv->dev; | ||
391 | struct nouveau_sgdma_be *nvbe; | 370 | struct nouveau_sgdma_be *nvbe; |
392 | 371 | ||
393 | nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); | 372 | nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); |
@@ -395,9 +374,12 @@ nouveau_sgdma_init_ttm(struct drm_device *dev) | |||
395 | return NULL; | 374 | return NULL; |
396 | 375 | ||
397 | nvbe->dev = dev; | 376 | nvbe->dev = dev; |
377 | nvbe->ttm.func = dev_priv->gart_info.func; | ||
398 | 378 | ||
399 | nvbe->backend.func = dev_priv->gart_info.func; | 379 | if (ttm_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) { |
400 | return &nvbe->backend; | 380 | return NULL; |
381 | } | ||
382 | return &nvbe->ttm; | ||
401 | } | 383 | } |
402 | 384 | ||
403 | int | 385 | int |