aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2011-11-01 20:46:13 -0400
committerDave Airlie <airlied@redhat.com>2011-12-06 05:39:17 -0500
commit649bf3ca77343e3be1e0af8e21356fa569b1abd9 (patch)
tree01ad6a5f3f74b087cb791f5965d3190916975789 /drivers/gpu
parent822c4d9ae0d55a4fcea9f0a462bc6406a06692e2 (diff)
drm/ttm: merge ttm_backend and ttm_tt V5
ttm_backend will only exist with a ttm_tt, and ttm_tt will only be of interest when bound to a backend. Merge them to avoid code and data duplication. V2 Rebase on top of memory accounting overhaul V3 Rebase on top of more memory accounting changes V4 Rebase on top of no memory account changes (where/when is my delorean when i need it ?) V5 make sure ttm is unbound before destroying, change commit message on suggestion from Tormod Volden Signed-off-by: Jerome Glisse <jglisse@redhat.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c188
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c219
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c88
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c59
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c66
8 files changed, 253 insertions, 395 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7226f419e178..b060fa48135c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -343,8 +343,10 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
343 *mem = val; 343 *mem = val;
344} 344}
345 345
346static struct ttm_backend * 346static struct ttm_tt *
347nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev) 347nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
348 unsigned long size, uint32_t page_flags,
349 struct page *dummy_read_page)
348{ 350{
349 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 351 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
350 struct drm_device *dev = dev_priv->dev; 352 struct drm_device *dev = dev_priv->dev;
@@ -352,11 +354,13 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
352 switch (dev_priv->gart_info.type) { 354 switch (dev_priv->gart_info.type) {
353#if __OS_HAS_AGP 355#if __OS_HAS_AGP
354 case NOUVEAU_GART_AGP: 356 case NOUVEAU_GART_AGP:
355 return ttm_agp_backend_init(bdev, dev->agp->bridge); 357 return ttm_agp_tt_create(bdev, dev->agp->bridge,
358 size, page_flags, dummy_read_page);
356#endif 359#endif
357 case NOUVEAU_GART_PDMA: 360 case NOUVEAU_GART_PDMA:
358 case NOUVEAU_GART_HW: 361 case NOUVEAU_GART_HW:
359 return nouveau_sgdma_init_ttm(dev); 362 return nouveau_sgdma_create_ttm(bdev, size, page_flags,
363 dummy_read_page);
360 default: 364 default:
361 NV_ERROR(dev, "Unknown GART type %d\n", 365 NV_ERROR(dev, "Unknown GART type %d\n",
362 dev_priv->gart_info.type); 366 dev_priv->gart_info.type);
@@ -1045,7 +1049,7 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1045} 1049}
1046 1050
1047struct ttm_bo_driver nouveau_bo_driver = { 1051struct ttm_bo_driver nouveau_bo_driver = {
1048 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, 1052 .ttm_tt_create = &nouveau_ttm_tt_create,
1049 .invalidate_caches = nouveau_bo_invalidate_caches, 1053 .invalidate_caches = nouveau_bo_invalidate_caches,
1050 .init_mem_type = nouveau_bo_init_mem_type, 1054 .init_mem_type = nouveau_bo_init_mem_type,
1051 .evict_flags = nouveau_bo_evict_flags, 1055 .evict_flags = nouveau_bo_evict_flags,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 29837da1098b..0c53e39fc6c9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1000,7 +1000,10 @@ extern int nouveau_sgdma_init(struct drm_device *);
1000extern void nouveau_sgdma_takedown(struct drm_device *); 1000extern void nouveau_sgdma_takedown(struct drm_device *);
1001extern uint32_t nouveau_sgdma_get_physical(struct drm_device *, 1001extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
1002 uint32_t offset); 1002 uint32_t offset);
1003extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *); 1003extern struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
1004 unsigned long size,
1005 uint32_t page_flags,
1006 struct page *dummy_read_page);
1004 1007
1005/* nouveau_debugfs.c */ 1008/* nouveau_debugfs.c */
1006#if defined(CONFIG_DRM_NOUVEAU_DEBUG) 1009#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index b75258a9fe44..bc2ab900b24c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -8,44 +8,23 @@
8#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) 8#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
9 9
10struct nouveau_sgdma_be { 10struct nouveau_sgdma_be {
11 struct ttm_backend backend; 11 struct ttm_tt ttm;
12 struct drm_device *dev; 12 struct drm_device *dev;
13
14 dma_addr_t *pages;
15 unsigned nr_pages;
16 bool unmap_pages;
17
18 u64 offset; 13 u64 offset;
19 bool bound;
20}; 14};
21 15
22static int 16static int
23nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, 17nouveau_sgdma_dma_map(struct ttm_tt *ttm)
24 struct page **pages, struct page *dummy_read_page,
25 dma_addr_t *dma_addrs)
26{ 18{
27 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 19 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
28 struct drm_device *dev = nvbe->dev; 20 struct drm_device *dev = nvbe->dev;
29 int i; 21 int i;
30 22
31 NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages); 23 for (i = 0; i < ttm->num_pages; i++) {
32 24 ttm->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
33 nvbe->pages = dma_addrs; 25 0, PAGE_SIZE,
34 nvbe->nr_pages = num_pages; 26 PCI_DMA_BIDIRECTIONAL);
35 nvbe->unmap_pages = true; 27 if (pci_dma_mapping_error(dev->pdev, ttm->dma_address[i])) {
36
37 /* this code path isn't called and is incorrect anyways */
38 if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
39 nvbe->unmap_pages = false;
40 return 0;
41 }
42
43 for (i = 0; i < num_pages; i++) {
44 nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
45 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
46 if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
47 nvbe->nr_pages = --i;
48 be->func->clear(be);
49 return -EFAULT; 28 return -EFAULT;
50 } 29 }
51 } 30 }
@@ -54,53 +33,52 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
54} 33}
55 34
56static void 35static void
57nouveau_sgdma_clear(struct ttm_backend *be) 36nouveau_sgdma_dma_unmap(struct ttm_tt *ttm)
58{ 37{
59 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 38 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
60 struct drm_device *dev = nvbe->dev; 39 struct drm_device *dev = nvbe->dev;
40 int i;
61 41
62 if (nvbe->bound) 42 for (i = 0; i < ttm->num_pages; i++) {
63 be->func->unbind(be); 43 if (ttm->dma_address[i]) {
64 44 pci_unmap_page(dev->pdev, ttm->dma_address[i],
65 if (nvbe->unmap_pages) {
66 while (nvbe->nr_pages--) {
67 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
68 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 45 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
69 } 46 }
47 ttm->dma_address[i] = 0;
70 } 48 }
71} 49}
72 50
73static void 51static void
74nouveau_sgdma_destroy(struct ttm_backend *be) 52nouveau_sgdma_destroy(struct ttm_tt *ttm)
75{ 53{
76 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 54 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
77 55
78 if (be) { 56 if (ttm) {
79 NV_DEBUG(nvbe->dev, "\n"); 57 NV_DEBUG(nvbe->dev, "\n");
80 58 kfree(nvbe);
81 if (nvbe) {
82 if (nvbe->pages)
83 be->func->clear(be);
84 kfree(nvbe);
85 }
86 } 59 }
87} 60}
88 61
89static int 62static int
90nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) 63nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
91{ 64{
92 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 65 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
93 struct drm_device *dev = nvbe->dev; 66 struct drm_device *dev = nvbe->dev;
94 struct drm_nouveau_private *dev_priv = dev->dev_private; 67 struct drm_nouveau_private *dev_priv = dev->dev_private;
95 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; 68 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
96 unsigned i, j, pte; 69 unsigned i, j, pte;
70 int r;
97 71
98 NV_DEBUG(dev, "pg=0x%lx\n", mem->start); 72 NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
73 r = nouveau_sgdma_dma_map(ttm);
74 if (r) {
75 return r;
76 }
99 77
100 nvbe->offset = mem->start << PAGE_SHIFT; 78 nvbe->offset = mem->start << PAGE_SHIFT;
101 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; 79 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
102 for (i = 0; i < nvbe->nr_pages; i++) { 80 for (i = 0; i < ttm->num_pages; i++) {
103 dma_addr_t dma_offset = nvbe->pages[i]; 81 dma_addr_t dma_offset = ttm->dma_address[i];
104 uint32_t offset_l = lower_32_bits(dma_offset); 82 uint32_t offset_l = lower_32_bits(dma_offset);
105 83
106 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { 84 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
@@ -109,14 +87,13 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
109 } 87 }
110 } 88 }
111 89
112 nvbe->bound = true;
113 return 0; 90 return 0;
114} 91}
115 92
116static int 93static int
117nv04_sgdma_unbind(struct ttm_backend *be) 94nv04_sgdma_unbind(struct ttm_tt *ttm)
118{ 95{
119 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 96 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
120 struct drm_device *dev = nvbe->dev; 97 struct drm_device *dev = nvbe->dev;
121 struct drm_nouveau_private *dev_priv = dev->dev_private; 98 struct drm_nouveau_private *dev_priv = dev->dev_private;
122 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; 99 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
@@ -124,22 +101,20 @@ nv04_sgdma_unbind(struct ttm_backend *be)
124 101
125 NV_DEBUG(dev, "\n"); 102 NV_DEBUG(dev, "\n");
126 103
127 if (!nvbe->bound) 104 if (ttm->state != tt_bound)
128 return 0; 105 return 0;
129 106
130 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; 107 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
131 for (i = 0; i < nvbe->nr_pages; i++) { 108 for (i = 0; i < ttm->num_pages; i++) {
132 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) 109 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
133 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); 110 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
134 } 111 }
135 112
136 nvbe->bound = false; 113 nouveau_sgdma_dma_unmap(ttm);
137 return 0; 114 return 0;
138} 115}
139 116
140static struct ttm_backend_func nv04_sgdma_backend = { 117static struct ttm_backend_func nv04_sgdma_backend = {
141 .populate = nouveau_sgdma_populate,
142 .clear = nouveau_sgdma_clear,
143 .bind = nv04_sgdma_bind, 118 .bind = nv04_sgdma_bind,
144 .unbind = nv04_sgdma_unbind, 119 .unbind = nv04_sgdma_unbind,
145 .destroy = nouveau_sgdma_destroy 120 .destroy = nouveau_sgdma_destroy
@@ -158,16 +133,21 @@ nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
158} 133}
159 134
160static int 135static int
161nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) 136nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
162{ 137{
163 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 138 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
164 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; 139 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
165 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; 140 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
166 dma_addr_t *list = nvbe->pages; 141 dma_addr_t *list = ttm->dma_address;
167 u32 pte = mem->start << 2; 142 u32 pte = mem->start << 2;
168 u32 cnt = nvbe->nr_pages; 143 u32 cnt = ttm->num_pages;
144 int r;
169 145
170 nvbe->offset = mem->start << PAGE_SHIFT; 146 nvbe->offset = mem->start << PAGE_SHIFT;
147 r = nouveau_sgdma_dma_map(ttm);
148 if (r) {
149 return r;
150 }
171 151
172 while (cnt--) { 152 while (cnt--) {
173 nv_wo32(pgt, pte, (*list++ >> 7) | 1); 153 nv_wo32(pgt, pte, (*list++ >> 7) | 1);
@@ -175,18 +155,17 @@ nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
175 } 155 }
176 156
177 nv41_sgdma_flush(nvbe); 157 nv41_sgdma_flush(nvbe);
178 nvbe->bound = true;
179 return 0; 158 return 0;
180} 159}
181 160
182static int 161static int
183nv41_sgdma_unbind(struct ttm_backend *be) 162nv41_sgdma_unbind(struct ttm_tt *ttm)
184{ 163{
185 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 164 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
186 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; 165 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
187 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; 166 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
188 u32 pte = (nvbe->offset >> 12) << 2; 167 u32 pte = (nvbe->offset >> 12) << 2;
189 u32 cnt = nvbe->nr_pages; 168 u32 cnt = ttm->num_pages;
190 169
191 while (cnt--) { 170 while (cnt--) {
192 nv_wo32(pgt, pte, 0x00000000); 171 nv_wo32(pgt, pte, 0x00000000);
@@ -194,24 +173,23 @@ nv41_sgdma_unbind(struct ttm_backend *be)
194 } 173 }
195 174
196 nv41_sgdma_flush(nvbe); 175 nv41_sgdma_flush(nvbe);
197 nvbe->bound = false; 176 nouveau_sgdma_dma_unmap(ttm);
198 return 0; 177 return 0;
199} 178}
200 179
201static struct ttm_backend_func nv41_sgdma_backend = { 180static struct ttm_backend_func nv41_sgdma_backend = {
202 .populate = nouveau_sgdma_populate,
203 .clear = nouveau_sgdma_clear,
204 .bind = nv41_sgdma_bind, 181 .bind = nv41_sgdma_bind,
205 .unbind = nv41_sgdma_unbind, 182 .unbind = nv41_sgdma_unbind,
206 .destroy = nouveau_sgdma_destroy 183 .destroy = nouveau_sgdma_destroy
207}; 184};
208 185
209static void 186static void
210nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe) 187nv44_sgdma_flush(struct ttm_tt *ttm)
211{ 188{
189 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
212 struct drm_device *dev = nvbe->dev; 190 struct drm_device *dev = nvbe->dev;
213 191
214 nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12); 192 nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
215 nv_wr32(dev, 0x100808, nvbe->offset | 0x20); 193 nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
216 if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001)) 194 if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
217 NV_ERROR(dev, "gart flush timeout: 0x%08x\n", 195 NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
@@ -270,17 +248,21 @@ nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
270} 248}
271 249
272static int 250static int
273nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) 251nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
274{ 252{
275 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 253 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
276 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; 254 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
277 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; 255 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
278 dma_addr_t *list = nvbe->pages; 256 dma_addr_t *list = ttm->dma_address;
279 u32 pte = mem->start << 2, tmp[4]; 257 u32 pte = mem->start << 2, tmp[4];
280 u32 cnt = nvbe->nr_pages; 258 u32 cnt = ttm->num_pages;
281 int i; 259 int i, r;
282 260
283 nvbe->offset = mem->start << PAGE_SHIFT; 261 nvbe->offset = mem->start << PAGE_SHIFT;
262 r = nouveau_sgdma_dma_map(ttm);
263 if (r) {
264 return r;
265 }
284 266
285 if (pte & 0x0000000c) { 267 if (pte & 0x0000000c) {
286 u32 max = 4 - ((pte >> 2) & 0x3); 268 u32 max = 4 - ((pte >> 2) & 0x3);
@@ -305,19 +287,18 @@ nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
305 if (cnt) 287 if (cnt)
306 nv44_sgdma_fill(pgt, list, pte, cnt); 288 nv44_sgdma_fill(pgt, list, pte, cnt);
307 289
308 nv44_sgdma_flush(nvbe); 290 nv44_sgdma_flush(ttm);
309 nvbe->bound = true;
310 return 0; 291 return 0;
311} 292}
312 293
313static int 294static int
314nv44_sgdma_unbind(struct ttm_backend *be) 295nv44_sgdma_unbind(struct ttm_tt *ttm)
315{ 296{
316 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 297 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
317 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; 298 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
318 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; 299 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
319 u32 pte = (nvbe->offset >> 12) << 2; 300 u32 pte = (nvbe->offset >> 12) << 2;
320 u32 cnt = nvbe->nr_pages; 301 u32 cnt = ttm->num_pages;
321 302
322 if (pte & 0x0000000c) { 303 if (pte & 0x0000000c) {
323 u32 max = 4 - ((pte >> 2) & 0x3); 304 u32 max = 4 - ((pte >> 2) & 0x3);
@@ -339,55 +320,53 @@ nv44_sgdma_unbind(struct ttm_backend *be)
339 if (cnt) 320 if (cnt)
340 nv44_sgdma_fill(pgt, NULL, pte, cnt); 321 nv44_sgdma_fill(pgt, NULL, pte, cnt);
341 322
342 nv44_sgdma_flush(nvbe); 323 nv44_sgdma_flush(ttm);
343 nvbe->bound = false; 324 nouveau_sgdma_dma_unmap(ttm);
344 return 0; 325 return 0;
345} 326}
346 327
347static struct ttm_backend_func nv44_sgdma_backend = { 328static struct ttm_backend_func nv44_sgdma_backend = {
348 .populate = nouveau_sgdma_populate,
349 .clear = nouveau_sgdma_clear,
350 .bind = nv44_sgdma_bind, 329 .bind = nv44_sgdma_bind,
351 .unbind = nv44_sgdma_unbind, 330 .unbind = nv44_sgdma_unbind,
352 .destroy = nouveau_sgdma_destroy 331 .destroy = nouveau_sgdma_destroy
353}; 332};
354 333
355static int 334static int
356nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) 335nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
357{ 336{
358 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
359 struct nouveau_mem *node = mem->mm_node; 337 struct nouveau_mem *node = mem->mm_node;
338 int r;
339
360 /* noop: bound in move_notify() */ 340 /* noop: bound in move_notify() */
361 node->pages = nvbe->pages; 341 r = nouveau_sgdma_dma_map(ttm);
362 nvbe->pages = (dma_addr_t *)node; 342 if (r) {
363 nvbe->bound = true; 343 return r;
344 }
345 node->pages = ttm->dma_address;
364 return 0; 346 return 0;
365} 347}
366 348
367static int 349static int
368nv50_sgdma_unbind(struct ttm_backend *be) 350nv50_sgdma_unbind(struct ttm_tt *ttm)
369{ 351{
370 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
371 struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
372 /* noop: unbound in move_notify() */ 352 /* noop: unbound in move_notify() */
373 nvbe->pages = node->pages; 353 nouveau_sgdma_dma_unmap(ttm);
374 node->pages = NULL;
375 nvbe->bound = false;
376 return 0; 354 return 0;
377} 355}
378 356
379static struct ttm_backend_func nv50_sgdma_backend = { 357static struct ttm_backend_func nv50_sgdma_backend = {
380 .populate = nouveau_sgdma_populate,
381 .clear = nouveau_sgdma_clear,
382 .bind = nv50_sgdma_bind, 358 .bind = nv50_sgdma_bind,
383 .unbind = nv50_sgdma_unbind, 359 .unbind = nv50_sgdma_unbind,
384 .destroy = nouveau_sgdma_destroy 360 .destroy = nouveau_sgdma_destroy
385}; 361};
386 362
387struct ttm_backend * 363struct ttm_tt *
388nouveau_sgdma_init_ttm(struct drm_device *dev) 364nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
365 unsigned long size, uint32_t page_flags,
366 struct page *dummy_read_page)
389{ 367{
390 struct drm_nouveau_private *dev_priv = dev->dev_private; 368 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
369 struct drm_device *dev = dev_priv->dev;
391 struct nouveau_sgdma_be *nvbe; 370 struct nouveau_sgdma_be *nvbe;
392 371
393 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); 372 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
@@ -395,9 +374,12 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
395 return NULL; 374 return NULL;
396 375
397 nvbe->dev = dev; 376 nvbe->dev = dev;
377 nvbe->ttm.func = dev_priv->gart_info.func;
398 378
399 nvbe->backend.func = dev_priv->gart_info.func; 379 if (ttm_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
400 return &nvbe->backend; 380 return NULL;
381 }
382 return &nvbe->ttm;
401} 383}
402 384
403int 385int
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 97c76aeae42e..af4d5f258afb 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -114,24 +114,6 @@ static void radeon_ttm_global_fini(struct radeon_device *rdev)
114 } 114 }
115} 115}
116 116
117struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
118
119static struct ttm_backend*
120radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
121{
122 struct radeon_device *rdev;
123
124 rdev = radeon_get_rdev(bdev);
125#if __OS_HAS_AGP
126 if (rdev->flags & RADEON_IS_AGP) {
127 return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
128 } else
129#endif
130 {
131 return radeon_ttm_backend_create(rdev);
132 }
133}
134
135static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) 117static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
136{ 118{
137 return 0; 119 return 0;
@@ -515,8 +497,90 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
515 return radeon_fence_signaled((struct radeon_fence *)sync_obj); 497 return radeon_fence_signaled((struct radeon_fence *)sync_obj);
516} 498}
517 499
500/*
501 * TTM backend functions.
502 */
503struct radeon_ttm_tt {
504 struct ttm_tt ttm;
505 struct radeon_device *rdev;
506 u64 offset;
507};
508
509static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
510 struct ttm_mem_reg *bo_mem)
511{
512 struct radeon_ttm_tt *gtt;
513 int r;
514
515 gtt = container_of(ttm, struct radeon_ttm_tt, ttm);
516 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
517 if (!ttm->num_pages) {
518 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
519 ttm->num_pages, bo_mem, ttm);
520 }
521 r = radeon_gart_bind(gtt->rdev, gtt->offset,
522 ttm->num_pages, ttm->pages, ttm->dma_address);
523 if (r) {
524 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
525 ttm->num_pages, (unsigned)gtt->offset);
526 return r;
527 }
528 return 0;
529}
530
531static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
532{
533 struct radeon_ttm_tt *gtt;
534
535 gtt = container_of(ttm, struct radeon_ttm_tt, ttm);
536 radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
537 return 0;
538}
539
540static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
541{
542 struct radeon_ttm_tt *gtt;
543
544 gtt = container_of(ttm, struct radeon_ttm_tt, ttm);
545 kfree(gtt);
546}
547
548static struct ttm_backend_func radeon_backend_func = {
549 .bind = &radeon_ttm_backend_bind,
550 .unbind = &radeon_ttm_backend_unbind,
551 .destroy = &radeon_ttm_backend_destroy,
552};
553
554struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
555 unsigned long size, uint32_t page_flags,
556 struct page *dummy_read_page)
557{
558 struct radeon_device *rdev;
559 struct radeon_ttm_tt *gtt;
560
561 rdev = radeon_get_rdev(bdev);
562#if __OS_HAS_AGP
563 if (rdev->flags & RADEON_IS_AGP) {
564 return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
565 size, page_flags, dummy_read_page);
566 }
567#endif
568
569 gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
570 if (gtt == NULL) {
571 return NULL;
572 }
573 gtt->ttm.func = &radeon_backend_func;
574 gtt->rdev = rdev;
575 if (ttm_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
576 return NULL;
577 }
578 return &gtt->ttm;
579}
580
581
518static struct ttm_bo_driver radeon_bo_driver = { 582static struct ttm_bo_driver radeon_bo_driver = {
519 .create_ttm_backend_entry = &radeon_create_ttm_backend_entry, 583 .ttm_tt_create = &radeon_ttm_tt_create,
520 .invalidate_caches = &radeon_invalidate_caches, 584 .invalidate_caches = &radeon_invalidate_caches,
521 .init_mem_type = &radeon_init_mem_type, 585 .init_mem_type = &radeon_init_mem_type,
522 .evict_flags = &radeon_evict_flags, 586 .evict_flags = &radeon_evict_flags,
@@ -680,123 +744,6 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
680} 744}
681 745
682 746
683/*
684 * TTM backend functions.
685 */
686struct radeon_ttm_backend {
687 struct ttm_backend backend;
688 struct radeon_device *rdev;
689 unsigned long num_pages;
690 struct page **pages;
691 struct page *dummy_read_page;
692 dma_addr_t *dma_addrs;
693 bool populated;
694 bool bound;
695 unsigned offset;
696};
697
698static int radeon_ttm_backend_populate(struct ttm_backend *backend,
699 unsigned long num_pages,
700 struct page **pages,
701 struct page *dummy_read_page,
702 dma_addr_t *dma_addrs)
703{
704 struct radeon_ttm_backend *gtt;
705
706 gtt = container_of(backend, struct radeon_ttm_backend, backend);
707 gtt->pages = pages;
708 gtt->dma_addrs = dma_addrs;
709 gtt->num_pages = num_pages;
710 gtt->dummy_read_page = dummy_read_page;
711 gtt->populated = true;
712 return 0;
713}
714
715static void radeon_ttm_backend_clear(struct ttm_backend *backend)
716{
717 struct radeon_ttm_backend *gtt;
718
719 gtt = container_of(backend, struct radeon_ttm_backend, backend);
720 gtt->pages = NULL;
721 gtt->dma_addrs = NULL;
722 gtt->num_pages = 0;
723 gtt->dummy_read_page = NULL;
724 gtt->populated = false;
725 gtt->bound = false;
726}
727
728
729static int radeon_ttm_backend_bind(struct ttm_backend *backend,
730 struct ttm_mem_reg *bo_mem)
731{
732 struct radeon_ttm_backend *gtt;
733 int r;
734
735 gtt = container_of(backend, struct radeon_ttm_backend, backend);
736 gtt->offset = bo_mem->start << PAGE_SHIFT;
737 if (!gtt->num_pages) {
738 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
739 gtt->num_pages, bo_mem, backend);
740 }
741 r = radeon_gart_bind(gtt->rdev, gtt->offset,
742 gtt->num_pages, gtt->pages, gtt->dma_addrs);
743 if (r) {
744 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
745 gtt->num_pages, gtt->offset);
746 return r;
747 }
748 gtt->bound = true;
749 return 0;
750}
751
752static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
753{
754 struct radeon_ttm_backend *gtt;
755
756 gtt = container_of(backend, struct radeon_ttm_backend, backend);
757 radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
758 gtt->bound = false;
759 return 0;
760}
761
762static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
763{
764 struct radeon_ttm_backend *gtt;
765
766 gtt = container_of(backend, struct radeon_ttm_backend, backend);
767 if (gtt->bound) {
768 radeon_ttm_backend_unbind(backend);
769 }
770 kfree(gtt);
771}
772
773static struct ttm_backend_func radeon_backend_func = {
774 .populate = &radeon_ttm_backend_populate,
775 .clear = &radeon_ttm_backend_clear,
776 .bind = &radeon_ttm_backend_bind,
777 .unbind = &radeon_ttm_backend_unbind,
778 .destroy = &radeon_ttm_backend_destroy,
779};
780
781struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
782{
783 struct radeon_ttm_backend *gtt;
784
785 gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
786 if (gtt == NULL) {
787 return NULL;
788 }
789 gtt->backend.bdev = &rdev->mman.bdev;
790 gtt->backend.func = &radeon_backend_func;
791 gtt->rdev = rdev;
792 gtt->pages = NULL;
793 gtt->num_pages = 0;
794 gtt->dummy_read_page = NULL;
795 gtt->populated = false;
796 gtt->bound = false;
797 return &gtt->backend;
798}
799
800#define RADEON_DEBUGFS_MEM_TYPES 2 747#define RADEON_DEBUGFS_MEM_TYPES 2
801 748
802#if defined(CONFIG_DEBUG_FS) 749#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 1c4a72f681c1..14ebd3650aa9 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -40,45 +40,33 @@
40#include <asm/agp.h> 40#include <asm/agp.h>
41 41
42struct ttm_agp_backend { 42struct ttm_agp_backend {
43 struct ttm_backend backend; 43 struct ttm_tt ttm;
44 struct agp_memory *mem; 44 struct agp_memory *mem;
45 struct agp_bridge_data *bridge; 45 struct agp_bridge_data *bridge;
46}; 46};
47 47
48static int ttm_agp_populate(struct ttm_backend *backend, 48static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
49 unsigned long num_pages, struct page **pages,
50 struct page *dummy_read_page,
51 dma_addr_t *dma_addrs)
52{ 49{
53 struct ttm_agp_backend *agp_be = 50 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
54 container_of(backend, struct ttm_agp_backend, backend); 51 struct drm_mm_node *node = bo_mem->mm_node;
55 struct page **cur_page, **last_page = pages + num_pages;
56 struct agp_memory *mem; 52 struct agp_memory *mem;
53 int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
54 unsigned i;
57 55
58 mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY); 56 mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
59 if (unlikely(mem == NULL)) 57 if (unlikely(mem == NULL))
60 return -ENOMEM; 58 return -ENOMEM;
61 59
62 mem->page_count = 0; 60 mem->page_count = 0;
63 for (cur_page = pages; cur_page < last_page; ++cur_page) { 61 for (i = 0; i < ttm->num_pages; i++) {
64 struct page *page = *cur_page; 62 struct page *page = ttm->pages[i];
63
65 if (!page) 64 if (!page)
66 page = dummy_read_page; 65 page = ttm->dummy_read_page;
67 66
68 mem->pages[mem->page_count++] = page; 67 mem->pages[mem->page_count++] = page;
69 } 68 }
70 agp_be->mem = mem; 69 agp_be->mem = mem;
71 return 0;
72}
73
74static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
75{
76 struct ttm_agp_backend *agp_be =
77 container_of(backend, struct ttm_agp_backend, backend);
78 struct drm_mm_node *node = bo_mem->mm_node;
79 struct agp_memory *mem = agp_be->mem;
80 int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
81 int ret;
82 70
83 mem->is_flushed = 1; 71 mem->is_flushed = 1;
84 mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; 72 mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
@@ -90,50 +78,38 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
90 return ret; 78 return ret;
91} 79}
92 80
93static int ttm_agp_unbind(struct ttm_backend *backend) 81static int ttm_agp_unbind(struct ttm_tt *ttm)
94{ 82{
95 struct ttm_agp_backend *agp_be = 83 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
96 container_of(backend, struct ttm_agp_backend, backend);
97
98 if (agp_be->mem->is_bound)
99 return agp_unbind_memory(agp_be->mem);
100 else
101 return 0;
102}
103 84
104static void ttm_agp_clear(struct ttm_backend *backend) 85 if (agp_be->mem) {
105{ 86 if (agp_be->mem->is_bound)
106 struct ttm_agp_backend *agp_be = 87 return agp_unbind_memory(agp_be->mem);
107 container_of(backend, struct ttm_agp_backend, backend); 88 agp_free_memory(agp_be->mem);
108 struct agp_memory *mem = agp_be->mem; 89 agp_be->mem = NULL;
109
110 if (mem) {
111 ttm_agp_unbind(backend);
112 agp_free_memory(mem);
113 } 90 }
114 agp_be->mem = NULL; 91 return 0;
115} 92}
116 93
117static void ttm_agp_destroy(struct ttm_backend *backend) 94static void ttm_agp_destroy(struct ttm_tt *ttm)
118{ 95{
119 struct ttm_agp_backend *agp_be = 96 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
120 container_of(backend, struct ttm_agp_backend, backend);
121 97
122 if (agp_be->mem) 98 if (agp_be->mem)
123 ttm_agp_clear(backend); 99 ttm_agp_unbind(ttm);
124 kfree(agp_be); 100 kfree(agp_be);
125} 101}
126 102
127static struct ttm_backend_func ttm_agp_func = { 103static struct ttm_backend_func ttm_agp_func = {
128 .populate = ttm_agp_populate,
129 .clear = ttm_agp_clear,
130 .bind = ttm_agp_bind, 104 .bind = ttm_agp_bind,
131 .unbind = ttm_agp_unbind, 105 .unbind = ttm_agp_unbind,
132 .destroy = ttm_agp_destroy, 106 .destroy = ttm_agp_destroy,
133}; 107};
134 108
135struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev, 109struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
136 struct agp_bridge_data *bridge) 110 struct agp_bridge_data *bridge,
111 unsigned long size, uint32_t page_flags,
112 struct page *dummy_read_page)
137{ 113{
138 struct ttm_agp_backend *agp_be; 114 struct ttm_agp_backend *agp_be;
139 115
@@ -143,10 +119,14 @@ struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
143 119
144 agp_be->mem = NULL; 120 agp_be->mem = NULL;
145 agp_be->bridge = bridge; 121 agp_be->bridge = bridge;
146 agp_be->backend.func = &ttm_agp_func; 122 agp_be->ttm.func = &ttm_agp_func;
147 agp_be->backend.bdev = bdev; 123
148 return &agp_be->backend; 124 if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
125 return NULL;
126 }
127
128 return &agp_be->ttm;
149} 129}
150EXPORT_SYMBOL(ttm_agp_backend_init); 130EXPORT_SYMBOL(ttm_agp_tt_create);
151 131
152#endif 132#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 4bde3356ecb2..cb7352712750 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -337,8 +337,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
337 if (zero_alloc) 337 if (zero_alloc)
338 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; 338 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
339 case ttm_bo_type_kernel: 339 case ttm_bo_type_kernel:
340 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 340 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
341 page_flags, glob->dummy_read_page); 341 page_flags, glob->dummy_read_page);
342 if (unlikely(bo->ttm == NULL)) 342 if (unlikely(bo->ttm == NULL))
343 ret = -ENOMEM; 343 ret = -ENOMEM;
344 break; 344 break;
@@ -1437,10 +1437,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
1437 goto out_no_shrink; 1437 goto out_no_shrink;
1438 } 1438 }
1439 1439
1440 glob->ttm_bo_extra_size = 1440 glob->ttm_bo_extra_size = ttm_round_pot(sizeof(struct ttm_tt));
1441 ttm_round_pot(sizeof(struct ttm_tt)) +
1442 ttm_round_pot(sizeof(struct ttm_backend));
1443
1444 glob->ttm_bo_size = glob->ttm_bo_extra_size + 1441 glob->ttm_bo_size = glob->ttm_bo_extra_size +
1445 ttm_round_pot(sizeof(struct ttm_buffer_object)); 1442 ttm_round_pot(sizeof(struct ttm_buffer_object));
1446 1443
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 6e079dedfc4f..fbc90dce1de8 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -105,7 +105,6 @@ int ttm_tt_populate(struct ttm_tt *ttm)
105{ 105{
106 struct page *page; 106 struct page *page;
107 unsigned long i; 107 unsigned long i;
108 struct ttm_backend *be;
109 int ret; 108 int ret;
110 109
111 if (ttm->state != tt_unpopulated) 110 if (ttm->state != tt_unpopulated)
@@ -117,16 +116,11 @@ int ttm_tt_populate(struct ttm_tt *ttm)
117 return ret; 116 return ret;
118 } 117 }
119 118
120 be = ttm->be;
121
122 for (i = 0; i < ttm->num_pages; ++i) { 119 for (i = 0; i < ttm->num_pages; ++i) {
123 page = __ttm_tt_get_page(ttm, i); 120 page = __ttm_tt_get_page(ttm, i);
124 if (!page) 121 if (!page)
125 return -ENOMEM; 122 return -ENOMEM;
126 } 123 }
127
128 be->func->populate(be, ttm->num_pages, ttm->pages,
129 ttm->dummy_read_page, ttm->dma_address);
130 ttm->state = tt_unbound; 124 ttm->state = tt_unbound;
131 return 0; 125 return 0;
132} 126}
@@ -235,11 +229,8 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
235 229
236static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) 230static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
237{ 231{
238 struct ttm_backend *be = ttm->be;
239 unsigned i; 232 unsigned i;
240 233
241 if (be)
242 be->func->clear(be);
243 for (i = 0; i < ttm->num_pages; ++i) { 234 for (i = 0; i < ttm->num_pages; ++i) {
244 if (ttm->pages[i]) { 235 if (ttm->pages[i]) {
245 ttm_mem_global_free_page(ttm->glob->mem_glob, 236 ttm_mem_global_free_page(ttm->glob->mem_glob,
@@ -253,20 +244,15 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
253 244
254void ttm_tt_destroy(struct ttm_tt *ttm) 245void ttm_tt_destroy(struct ttm_tt *ttm)
255{ 246{
256 struct ttm_backend *be;
257
258 if (unlikely(ttm == NULL)) 247 if (unlikely(ttm == NULL))
259 return; 248 return;
260 249
261 be = ttm->be; 250 if (ttm->state == tt_bound) {
262 if (likely(be != NULL)) { 251 ttm_tt_unbind(ttm);
263 be->func->destroy(be);
264 ttm->be = NULL;
265 } 252 }
266 253
267 if (likely(ttm->pages != NULL)) { 254 if (likely(ttm->pages != NULL)) {
268 ttm_tt_free_alloced_pages(ttm); 255 ttm_tt_free_alloced_pages(ttm);
269
270 ttm_tt_free_page_directory(ttm); 256 ttm_tt_free_page_directory(ttm);
271 } 257 }
272 258
@@ -274,52 +260,38 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
274 ttm->swap_storage) 260 ttm->swap_storage)
275 fput(ttm->swap_storage); 261 fput(ttm->swap_storage);
276 262
277 kfree(ttm); 263 ttm->swap_storage = NULL;
264 ttm->func->destroy(ttm);
278} 265}
279 266
280struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, 267int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
281 uint32_t page_flags, struct page *dummy_read_page) 268 unsigned long size, uint32_t page_flags,
269 struct page *dummy_read_page)
282{ 270{
283 struct ttm_bo_driver *bo_driver = bdev->driver; 271 ttm->bdev = bdev;
284 struct ttm_tt *ttm;
285
286 if (!bo_driver)
287 return NULL;
288
289 ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
290 if (!ttm)
291 return NULL;
292
293 ttm->glob = bdev->glob; 272 ttm->glob = bdev->glob;
294 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 273 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
295 ttm->caching_state = tt_cached; 274 ttm->caching_state = tt_cached;
296 ttm->page_flags = page_flags; 275 ttm->page_flags = page_flags;
297
298 ttm->dummy_read_page = dummy_read_page; 276 ttm->dummy_read_page = dummy_read_page;
277 ttm->state = tt_unpopulated;
299 278
300 ttm_tt_alloc_page_directory(ttm); 279 ttm_tt_alloc_page_directory(ttm);
301 if (!ttm->pages || !ttm->dma_address) { 280 if (!ttm->pages || !ttm->dma_address) {
302 ttm_tt_destroy(ttm); 281 ttm_tt_destroy(ttm);
303 printk(KERN_ERR TTM_PFX "Failed allocating page table\n"); 282 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
304 return NULL; 283 return -ENOMEM;
305 }
306 ttm->be = bo_driver->create_ttm_backend_entry(bdev);
307 if (!ttm->be) {
308 ttm_tt_destroy(ttm);
309 printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
310 return NULL;
311 } 284 }
312 ttm->state = tt_unpopulated; 285 return 0;
313 return ttm;
314} 286}
287EXPORT_SYMBOL(ttm_tt_init);
315 288
316void ttm_tt_unbind(struct ttm_tt *ttm) 289void ttm_tt_unbind(struct ttm_tt *ttm)
317{ 290{
318 int ret; 291 int ret;
319 struct ttm_backend *be = ttm->be;
320 292
321 if (ttm->state == tt_bound) { 293 if (ttm->state == tt_bound) {
322 ret = be->func->unbind(be); 294 ret = ttm->func->unbind(ttm);
323 BUG_ON(ret); 295 BUG_ON(ret);
324 ttm->state = tt_unbound; 296 ttm->state = tt_unbound;
325 } 297 }
@@ -328,7 +300,6 @@ void ttm_tt_unbind(struct ttm_tt *ttm)
328int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) 300int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
329{ 301{
330 int ret = 0; 302 int ret = 0;
331 struct ttm_backend *be;
332 303
333 if (!ttm) 304 if (!ttm)
334 return -EINVAL; 305 return -EINVAL;
@@ -336,13 +307,11 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
336 if (ttm->state == tt_bound) 307 if (ttm->state == tt_bound)
337 return 0; 308 return 0;
338 309
339 be = ttm->be;
340
341 ret = ttm_tt_populate(ttm); 310 ret = ttm_tt_populate(ttm);
342 if (ret) 311 if (ret)
343 return ret; 312 return ret;
344 313
345 ret = be->func->bind(be, bo_mem); 314 ret = ttm->func->bind(ttm, bo_mem);
346 if (unlikely(ret != 0)) 315 if (unlikely(ret != 0))
347 return ret; 316 return ret;
348 317
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 5a72ed908232..cc7243592425 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -139,85 +139,61 @@ struct ttm_placement vmw_srf_placement = {
139 .busy_placement = gmr_vram_placement_flags 139 .busy_placement = gmr_vram_placement_flags
140}; 140};
141 141
142struct vmw_ttm_backend { 142struct vmw_ttm_tt {
143 struct ttm_backend backend; 143 struct ttm_tt ttm;
144 struct page **pages;
145 unsigned long num_pages;
146 struct vmw_private *dev_priv; 144 struct vmw_private *dev_priv;
147 int gmr_id; 145 int gmr_id;
148}; 146};
149 147
150static int vmw_ttm_populate(struct ttm_backend *backend, 148static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
151 unsigned long num_pages, struct page **pages,
152 struct page *dummy_read_page,
153 dma_addr_t *dma_addrs)
154{ 149{
155 struct vmw_ttm_backend *vmw_be = 150 struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
156 container_of(backend, struct vmw_ttm_backend, backend);
157
158 vmw_be->pages = pages;
159 vmw_be->num_pages = num_pages;
160
161 return 0;
162}
163
164static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
165{
166 struct vmw_ttm_backend *vmw_be =
167 container_of(backend, struct vmw_ttm_backend, backend);
168 151
169 vmw_be->gmr_id = bo_mem->start; 152 vmw_be->gmr_id = bo_mem->start;
170 153
171 return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages, 154 return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages,
172 vmw_be->num_pages, vmw_be->gmr_id); 155 ttm->num_pages, vmw_be->gmr_id);
173} 156}
174 157
175static int vmw_ttm_unbind(struct ttm_backend *backend) 158static int vmw_ttm_unbind(struct ttm_tt *ttm)
176{ 159{
177 struct vmw_ttm_backend *vmw_be = 160 struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
178 container_of(backend, struct vmw_ttm_backend, backend);
179 161
180 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); 162 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
181 return 0; 163 return 0;
182} 164}
183 165
184static void vmw_ttm_clear(struct ttm_backend *backend) 166static void vmw_ttm_destroy(struct ttm_tt *ttm)
185{ 167{
186 struct vmw_ttm_backend *vmw_be = 168 struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
187 container_of(backend, struct vmw_ttm_backend, backend);
188
189 vmw_be->pages = NULL;
190 vmw_be->num_pages = 0;
191}
192
193static void vmw_ttm_destroy(struct ttm_backend *backend)
194{
195 struct vmw_ttm_backend *vmw_be =
196 container_of(backend, struct vmw_ttm_backend, backend);
197 169
198 kfree(vmw_be); 170 kfree(vmw_be);
199} 171}
200 172
201static struct ttm_backend_func vmw_ttm_func = { 173static struct ttm_backend_func vmw_ttm_func = {
202 .populate = vmw_ttm_populate,
203 .clear = vmw_ttm_clear,
204 .bind = vmw_ttm_bind, 174 .bind = vmw_ttm_bind,
205 .unbind = vmw_ttm_unbind, 175 .unbind = vmw_ttm_unbind,
206 .destroy = vmw_ttm_destroy, 176 .destroy = vmw_ttm_destroy,
207}; 177};
208 178
209struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev) 179struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
180 unsigned long size, uint32_t page_flags,
181 struct page *dummy_read_page)
210{ 182{
211 struct vmw_ttm_backend *vmw_be; 183 struct vmw_ttm_tt *vmw_be;
212 184
213 vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL); 185 vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
214 if (!vmw_be) 186 if (!vmw_be)
215 return NULL; 187 return NULL;
216 188
217 vmw_be->backend.func = &vmw_ttm_func; 189 vmw_be->ttm.func = &vmw_ttm_func;
218 vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); 190 vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
219 191
220 return &vmw_be->backend; 192 if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
193 return NULL;
194 }
195
196 return &vmw_be->ttm;
221} 197}
222 198
223int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) 199int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
@@ -357,7 +333,7 @@ static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
357} 333}
358 334
359struct ttm_bo_driver vmw_bo_driver = { 335struct ttm_bo_driver vmw_bo_driver = {
360 .create_ttm_backend_entry = vmw_ttm_backend_init, 336 .ttm_tt_create = &vmw_ttm_tt_create,
361 .invalidate_caches = vmw_invalidate_caches, 337 .invalidate_caches = vmw_invalidate_caches,
362 .init_mem_type = vmw_init_mem_type, 338 .init_mem_type = vmw_init_mem_type,
363 .evict_flags = vmw_evict_flags, 339 .evict_flags = vmw_evict_flags,