diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_mob.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | 659 |
1 files changed, 659 insertions, 0 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c new file mode 100644 index 000000000000..ad29651a4302 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | |||
@@ -0,0 +1,659 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | #include "vmwgfx_drv.h" | ||
29 | |||
30 | /* | ||
31 | * If we set up the screen target otable, screen objects stop working. | ||
32 | */ | ||
33 | |||
34 | #define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1) | ||
35 | |||
36 | #ifdef CONFIG_64BIT | ||
37 | #define VMW_PPN_SIZE 8 | ||
38 | #define vmw_cmd_set_otable_base SVGA3dCmdSetOTableBase64 | ||
39 | #define VMW_ID_SET_OTABLE_BASE SVGA_3D_CMD_SET_OTABLE_BASE64 | ||
40 | #define vmw_cmd_define_gb_mob SVGA3dCmdDefineGBMob64 | ||
41 | #define VMW_ID_DEFINE_GB_MOB SVGA_3D_CMD_DEFINE_GB_MOB64 | ||
42 | #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0 | ||
43 | #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1 | ||
44 | #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2 | ||
45 | #else | ||
46 | #define VMW_PPN_SIZE 4 | ||
47 | #define vmw_cmd_set_otable_base SVGA3dCmdSetOTableBase | ||
48 | #define VMW_ID_SET_OTABLE_BASE SVGA_3D_CMD_SET_OTABLE_BASE | ||
49 | #define vmw_cmd_define_gb_mob SVGA3dCmdDefineGBMob | ||
50 | #define VMW_ID_DEFINE_GB_MOB SVGA_3D_CMD_DEFINE_GB_MOB | ||
51 | #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0 | ||
52 | #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1 | ||
53 | #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2 | ||
54 | #endif | ||
55 | |||
56 | /* | ||
57 | * struct vmw_mob - Structure containing page table and metadata for a | ||
58 | * Guest Memory OBject. | ||
59 | * | ||
60 | * @num_pages Number of pages that make up the page table. | ||
61 | * @pt_level The indirection level of the page table. 0-2. | ||
62 | * @pt_root_page DMA address of the level 0 page of the page table. | ||
63 | */ | ||
64 | struct vmw_mob { | ||
65 | struct ttm_buffer_object *pt_bo; | ||
66 | unsigned long num_pages; | ||
67 | unsigned pt_level; | ||
68 | dma_addr_t pt_root_page; | ||
69 | uint32_t id; | ||
70 | }; | ||
71 | |||
72 | /* | ||
73 | * struct vmw_otable - Guest Memory OBject table metadata | ||
74 | * | ||
75 | * @size: Size of the table (page-aligned). | ||
76 | * @page_table: Pointer to a struct vmw_mob holding the page table. | ||
77 | */ | ||
78 | struct vmw_otable { | ||
79 | unsigned long size; | ||
80 | struct vmw_mob *page_table; | ||
81 | }; | ||
82 | |||
83 | static int vmw_mob_pt_populate(struct vmw_private *dev_priv, | ||
84 | struct vmw_mob *mob); | ||
85 | static void vmw_mob_pt_setup(struct vmw_mob *mob, | ||
86 | struct vmw_piter data_iter, | ||
87 | unsigned long num_data_pages); | ||
88 | |||
89 | /* | ||
90 | * vmw_setup_otable_base - Issue an object table base setup command to | ||
91 | * the device | ||
92 | * | ||
93 | * @dev_priv: Pointer to a device private structure | ||
94 | * @type: Type of object table base | ||
95 | * @offset Start of table offset into dev_priv::otable_bo | ||
96 | * @otable Pointer to otable metadata; | ||
97 | * | ||
98 | * This function returns -ENOMEM if it fails to reserve fifo space, | ||
99 | * and may block waiting for fifo space. | ||
100 | */ | ||
101 | static int vmw_setup_otable_base(struct vmw_private *dev_priv, | ||
102 | SVGAOTableType type, | ||
103 | unsigned long offset, | ||
104 | struct vmw_otable *otable) | ||
105 | { | ||
106 | struct { | ||
107 | SVGA3dCmdHeader header; | ||
108 | vmw_cmd_set_otable_base body; | ||
109 | } *cmd; | ||
110 | struct vmw_mob *mob; | ||
111 | const struct vmw_sg_table *vsgt; | ||
112 | struct vmw_piter iter; | ||
113 | int ret; | ||
114 | |||
115 | BUG_ON(otable->page_table != NULL); | ||
116 | |||
117 | vsgt = vmw_bo_sg_table(dev_priv->otable_bo); | ||
118 | vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); | ||
119 | WARN_ON(!vmw_piter_next(&iter)); | ||
120 | |||
121 | mob = vmw_mob_create(otable->size >> PAGE_SHIFT); | ||
122 | if (unlikely(mob == NULL)) { | ||
123 | DRM_ERROR("Failed creating OTable page table.\n"); | ||
124 | return -ENOMEM; | ||
125 | } | ||
126 | |||
127 | if (otable->size <= PAGE_SIZE) { | ||
128 | mob->pt_level = VMW_MOBFMT_PTDEPTH_0; | ||
129 | mob->pt_root_page = vmw_piter_dma_addr(&iter); | ||
130 | } else if (vsgt->num_regions == 1) { | ||
131 | mob->pt_level = SVGA3D_MOBFMT_RANGE; | ||
132 | mob->pt_root_page = vmw_piter_dma_addr(&iter); | ||
133 | } else { | ||
134 | ret = vmw_mob_pt_populate(dev_priv, mob); | ||
135 | if (unlikely(ret != 0)) | ||
136 | goto out_no_populate; | ||
137 | |||
138 | vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT); | ||
139 | mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; | ||
140 | } | ||
141 | |||
142 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
143 | if (unlikely(cmd == NULL)) { | ||
144 | DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); | ||
145 | goto out_no_fifo; | ||
146 | } | ||
147 | |||
148 | memset(cmd, 0, sizeof(*cmd)); | ||
149 | cmd->header.id = VMW_ID_SET_OTABLE_BASE; | ||
150 | cmd->header.size = sizeof(cmd->body); | ||
151 | cmd->body.type = type; | ||
152 | cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT; | ||
153 | cmd->body.sizeInBytes = otable->size; | ||
154 | cmd->body.validSizeInBytes = 0; | ||
155 | cmd->body.ptDepth = mob->pt_level; | ||
156 | |||
157 | /* | ||
158 | * The device doesn't support this, But the otable size is | ||
159 | * determined at compile-time, so this BUG shouldn't trigger | ||
160 | * randomly. | ||
161 | */ | ||
162 | BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2); | ||
163 | |||
164 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
165 | otable->page_table = mob; | ||
166 | |||
167 | return 0; | ||
168 | |||
169 | out_no_fifo: | ||
170 | out_no_populate: | ||
171 | vmw_mob_destroy(mob); | ||
172 | return ret; | ||
173 | } | ||
174 | |||
175 | /* | ||
176 | * vmw_takedown_otable_base - Issue an object table base takedown command | ||
177 | * to the device | ||
178 | * | ||
179 | * @dev_priv: Pointer to a device private structure | ||
180 | * @type: Type of object table base | ||
181 | * | ||
182 | */ | ||
183 | static void vmw_takedown_otable_base(struct vmw_private *dev_priv, | ||
184 | SVGAOTableType type, | ||
185 | struct vmw_otable *otable) | ||
186 | { | ||
187 | struct { | ||
188 | SVGA3dCmdHeader header; | ||
189 | SVGA3dCmdSetOTableBase body; | ||
190 | } *cmd; | ||
191 | struct ttm_buffer_object *bo = otable->page_table->pt_bo; | ||
192 | |||
193 | if (otable->page_table == NULL) | ||
194 | return; | ||
195 | |||
196 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
197 | if (unlikely(cmd == NULL)) | ||
198 | DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); | ||
199 | |||
200 | memset(cmd, 0, sizeof(*cmd)); | ||
201 | cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; | ||
202 | cmd->header.size = sizeof(cmd->body); | ||
203 | cmd->body.type = type; | ||
204 | cmd->body.baseAddress = 0; | ||
205 | cmd->body.sizeInBytes = 0; | ||
206 | cmd->body.validSizeInBytes = 0; | ||
207 | cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; | ||
208 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
209 | |||
210 | if (bo) { | ||
211 | int ret; | ||
212 | |||
213 | ret = ttm_bo_reserve(bo, false, true, false, false); | ||
214 | BUG_ON(ret != 0); | ||
215 | |||
216 | vmw_fence_single_bo(bo, NULL); | ||
217 | ttm_bo_unreserve(bo); | ||
218 | } | ||
219 | |||
220 | vmw_mob_destroy(otable->page_table); | ||
221 | otable->page_table = NULL; | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * vmw_otables_setup - Set up guest backed memory object tables | ||
226 | * | ||
227 | * @dev_priv: Pointer to a device private structure | ||
228 | * | ||
229 | * Takes care of the device guest backed surface | ||
230 | * initialization, by setting up the guest backed memory object tables. | ||
231 | * Returns 0 on success and various error codes on failure. A succesful return | ||
232 | * means the object tables can be taken down using the vmw_otables_takedown | ||
233 | * function. | ||
234 | */ | ||
235 | int vmw_otables_setup(struct vmw_private *dev_priv) | ||
236 | { | ||
237 | unsigned long offset; | ||
238 | unsigned long bo_size; | ||
239 | struct vmw_otable *otables; | ||
240 | SVGAOTableType i; | ||
241 | int ret; | ||
242 | |||
243 | otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables), | ||
244 | GFP_KERNEL); | ||
245 | if (unlikely(otables == NULL)) { | ||
246 | DRM_ERROR("Failed to allocate space for otable " | ||
247 | "metadata.\n"); | ||
248 | return -ENOMEM; | ||
249 | } | ||
250 | |||
251 | otables[SVGA_OTABLE_MOB].size = | ||
252 | VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE; | ||
253 | otables[SVGA_OTABLE_SURFACE].size = | ||
254 | VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE; | ||
255 | otables[SVGA_OTABLE_CONTEXT].size = | ||
256 | VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE; | ||
257 | otables[SVGA_OTABLE_SHADER].size = | ||
258 | VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE; | ||
259 | otables[SVGA_OTABLE_SCREEN_TARGET].size = | ||
260 | VMWGFX_NUM_GB_SCREEN_TARGET * | ||
261 | SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE; | ||
262 | |||
263 | bo_size = 0; | ||
264 | for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) { | ||
265 | otables[i].size = | ||
266 | (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK; | ||
267 | bo_size += otables[i].size; | ||
268 | } | ||
269 | |||
270 | ret = ttm_bo_create(&dev_priv->bdev, bo_size, | ||
271 | ttm_bo_type_device, | ||
272 | &vmw_sys_ne_placement, | ||
273 | 0, false, NULL, | ||
274 | &dev_priv->otable_bo); | ||
275 | |||
276 | if (unlikely(ret != 0)) | ||
277 | goto out_no_bo; | ||
278 | |||
279 | ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, false); | ||
280 | BUG_ON(ret != 0); | ||
281 | ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm); | ||
282 | if (unlikely(ret != 0)) | ||
283 | goto out_unreserve; | ||
284 | ret = vmw_bo_map_dma(dev_priv->otable_bo); | ||
285 | if (unlikely(ret != 0)) | ||
286 | goto out_unreserve; | ||
287 | |||
288 | ttm_bo_unreserve(dev_priv->otable_bo); | ||
289 | |||
290 | offset = 0; | ||
291 | for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) { | ||
292 | ret = vmw_setup_otable_base(dev_priv, i, offset, | ||
293 | &otables[i]); | ||
294 | if (unlikely(ret != 0)) | ||
295 | goto out_no_setup; | ||
296 | offset += otables[i].size; | ||
297 | } | ||
298 | |||
299 | dev_priv->otables = otables; | ||
300 | return 0; | ||
301 | |||
302 | out_unreserve: | ||
303 | ttm_bo_unreserve(dev_priv->otable_bo); | ||
304 | out_no_setup: | ||
305 | for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) | ||
306 | vmw_takedown_otable_base(dev_priv, i, &otables[i]); | ||
307 | |||
308 | ttm_bo_unref(&dev_priv->otable_bo); | ||
309 | out_no_bo: | ||
310 | kfree(otables); | ||
311 | return ret; | ||
312 | } | ||
313 | |||
314 | |||
315 | /* | ||
316 | * vmw_otables_takedown - Take down guest backed memory object tables | ||
317 | * | ||
318 | * @dev_priv: Pointer to a device private structure | ||
319 | * | ||
320 | * Take down the Guest Memory Object tables. | ||
321 | */ | ||
322 | void vmw_otables_takedown(struct vmw_private *dev_priv) | ||
323 | { | ||
324 | SVGAOTableType i; | ||
325 | struct ttm_buffer_object *bo = dev_priv->otable_bo; | ||
326 | int ret; | ||
327 | |||
328 | for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) | ||
329 | vmw_takedown_otable_base(dev_priv, i, | ||
330 | &dev_priv->otables[i]); | ||
331 | |||
332 | ret = ttm_bo_reserve(bo, false, true, false, false); | ||
333 | BUG_ON(ret != 0); | ||
334 | |||
335 | vmw_fence_single_bo(bo, NULL); | ||
336 | ttm_bo_unreserve(bo); | ||
337 | |||
338 | ttm_bo_unref(&dev_priv->otable_bo); | ||
339 | kfree(dev_priv->otables); | ||
340 | dev_priv->otables = NULL; | ||
341 | } | ||
342 | |||
343 | |||
344 | /* | ||
345 | * vmw_mob_calculate_pt_pages - Calculate the number of page table pages | ||
346 | * needed for a guest backed memory object. | ||
347 | * | ||
348 | * @data_pages: Number of data pages in the memory object buffer. | ||
349 | */ | ||
350 | static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages) | ||
351 | { | ||
352 | unsigned long data_size = data_pages * PAGE_SIZE; | ||
353 | unsigned long tot_size = 0; | ||
354 | |||
355 | while (likely(data_size > PAGE_SIZE)) { | ||
356 | data_size = DIV_ROUND_UP(data_size, PAGE_SIZE); | ||
357 | data_size *= VMW_PPN_SIZE; | ||
358 | tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK; | ||
359 | } | ||
360 | |||
361 | return tot_size >> PAGE_SHIFT; | ||
362 | } | ||
363 | |||
364 | /* | ||
365 | * vmw_mob_create - Create a mob, but don't populate it. | ||
366 | * | ||
367 | * @data_pages: Number of data pages of the underlying buffer object. | ||
368 | */ | ||
369 | struct vmw_mob *vmw_mob_create(unsigned long data_pages) | ||
370 | { | ||
371 | struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL); | ||
372 | |||
373 | if (unlikely(mob == NULL)) | ||
374 | return NULL; | ||
375 | |||
376 | mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); | ||
377 | |||
378 | return mob; | ||
379 | } | ||
380 | |||
381 | /* | ||
382 | * vmw_mob_pt_populate - Populate the mob pagetable | ||
383 | * | ||
384 | * @mob: Pointer to the mob the pagetable of which we want to | ||
385 | * populate. | ||
386 | * | ||
387 | * This function allocates memory to be used for the pagetable, and | ||
388 | * adjusts TTM memory accounting accordingly. Returns ENOMEM if | ||
389 | * memory resources aren't sufficient and may cause TTM buffer objects | ||
390 | * to be swapped out by using the TTM memory accounting function. | ||
391 | */ | ||
392 | static int vmw_mob_pt_populate(struct vmw_private *dev_priv, | ||
393 | struct vmw_mob *mob) | ||
394 | { | ||
395 | int ret; | ||
396 | BUG_ON(mob->pt_bo != NULL); | ||
397 | |||
398 | ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE, | ||
399 | ttm_bo_type_device, | ||
400 | &vmw_sys_ne_placement, | ||
401 | 0, false, NULL, &mob->pt_bo); | ||
402 | if (unlikely(ret != 0)) | ||
403 | return ret; | ||
404 | |||
405 | ret = ttm_bo_reserve(mob->pt_bo, false, true, false, false); | ||
406 | |||
407 | BUG_ON(ret != 0); | ||
408 | ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm); | ||
409 | if (unlikely(ret != 0)) | ||
410 | goto out_unreserve; | ||
411 | ret = vmw_bo_map_dma(mob->pt_bo); | ||
412 | if (unlikely(ret != 0)) | ||
413 | goto out_unreserve; | ||
414 | |||
415 | ttm_bo_unreserve(mob->pt_bo); | ||
416 | |||
417 | return 0; | ||
418 | |||
419 | out_unreserve: | ||
420 | ttm_bo_unreserve(mob->pt_bo); | ||
421 | ttm_bo_unref(&mob->pt_bo); | ||
422 | |||
423 | return ret; | ||
424 | } | ||
425 | |||
426 | /** | ||
427 | * vmw_mob_assign_ppn - Assign a value to a page table entry | ||
428 | * | ||
429 | * @addr: Pointer to pointer to page table entry. | ||
430 | * @val: The page table entry | ||
431 | * | ||
432 | * Assigns a value to a page table entry pointed to by *@addr and increments | ||
433 | * *@addr according to the page table entry size. | ||
434 | */ | ||
435 | #if (VMW_PPN_SIZE == 8) | ||
436 | static void vmw_mob_assign_ppn(uint32_t **addr, dma_addr_t val) | ||
437 | { | ||
438 | *((uint64_t *) *addr) = val >> PAGE_SHIFT; | ||
439 | *addr += 2; | ||
440 | } | ||
441 | #else | ||
442 | static void vmw_mob_assign_ppn(uint32_t **addr, dma_addr_t val) | ||
443 | { | ||
444 | *(*addr)++ = val >> PAGE_SHIFT; | ||
445 | } | ||
446 | #endif | ||
447 | |||
448 | /* | ||
449 | * vmw_mob_build_pt - Build a pagetable | ||
450 | * | ||
451 | * @data_addr: Array of DMA addresses to the underlying buffer | ||
452 | * object's data pages. | ||
453 | * @num_data_pages: Number of buffer object data pages. | ||
454 | * @pt_pages: Array of page pointers to the page table pages. | ||
455 | * | ||
456 | * Returns the number of page table pages actually used. | ||
457 | * Uses atomic kmaps of highmem pages to avoid TLB thrashing. | ||
458 | */ | ||
459 | static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter, | ||
460 | unsigned long num_data_pages, | ||
461 | struct vmw_piter *pt_iter) | ||
462 | { | ||
463 | unsigned long pt_size = num_data_pages * VMW_PPN_SIZE; | ||
464 | unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE); | ||
465 | unsigned long pt_page; | ||
466 | uint32_t *addr, *save_addr; | ||
467 | unsigned long i; | ||
468 | struct page *page; | ||
469 | |||
470 | for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) { | ||
471 | page = vmw_piter_page(pt_iter); | ||
472 | |||
473 | save_addr = addr = kmap_atomic(page); | ||
474 | |||
475 | for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) { | ||
476 | vmw_mob_assign_ppn(&addr, | ||
477 | vmw_piter_dma_addr(data_iter)); | ||
478 | if (unlikely(--num_data_pages == 0)) | ||
479 | break; | ||
480 | WARN_ON(!vmw_piter_next(data_iter)); | ||
481 | } | ||
482 | kunmap_atomic(save_addr); | ||
483 | vmw_piter_next(pt_iter); | ||
484 | } | ||
485 | |||
486 | return num_pt_pages; | ||
487 | } | ||
488 | |||
489 | /* | ||
490 | * vmw_mob_build_pt - Set up a multilevel mob pagetable | ||
491 | * | ||
492 | * @mob: Pointer to a mob whose page table needs setting up. | ||
493 | * @data_addr Array of DMA addresses to the buffer object's data | ||
494 | * pages. | ||
495 | * @num_data_pages: Number of buffer object data pages. | ||
496 | * | ||
497 | * Uses tail recursion to set up a multilevel mob page table. | ||
498 | */ | ||
499 | static void vmw_mob_pt_setup(struct vmw_mob *mob, | ||
500 | struct vmw_piter data_iter, | ||
501 | unsigned long num_data_pages) | ||
502 | { | ||
503 | unsigned long num_pt_pages = 0; | ||
504 | struct ttm_buffer_object *bo = mob->pt_bo; | ||
505 | struct vmw_piter save_pt_iter; | ||
506 | struct vmw_piter pt_iter; | ||
507 | const struct vmw_sg_table *vsgt; | ||
508 | int ret; | ||
509 | |||
510 | ret = ttm_bo_reserve(bo, false, true, false, 0); | ||
511 | BUG_ON(ret != 0); | ||
512 | |||
513 | vsgt = vmw_bo_sg_table(bo); | ||
514 | vmw_piter_start(&pt_iter, vsgt, 0); | ||
515 | BUG_ON(!vmw_piter_next(&pt_iter)); | ||
516 | mob->pt_level = 0; | ||
517 | while (likely(num_data_pages > 1)) { | ||
518 | ++mob->pt_level; | ||
519 | BUG_ON(mob->pt_level > 2); | ||
520 | save_pt_iter = pt_iter; | ||
521 | num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages, | ||
522 | &pt_iter); | ||
523 | data_iter = save_pt_iter; | ||
524 | num_data_pages = num_pt_pages; | ||
525 | } | ||
526 | |||
527 | mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter); | ||
528 | ttm_bo_unreserve(bo); | ||
529 | } | ||
530 | |||
531 | /* | ||
532 | * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary. | ||
533 | * | ||
534 | * @mob: Pointer to a mob to destroy. | ||
535 | */ | ||
536 | void vmw_mob_destroy(struct vmw_mob *mob) | ||
537 | { | ||
538 | if (mob->pt_bo) | ||
539 | ttm_bo_unref(&mob->pt_bo); | ||
540 | kfree(mob); | ||
541 | } | ||
542 | |||
543 | /* | ||
544 | * vmw_mob_unbind - Hide a mob from the device. | ||
545 | * | ||
546 | * @dev_priv: Pointer to a device private. | ||
547 | * @mob_id: Device id of the mob to unbind. | ||
548 | */ | ||
549 | void vmw_mob_unbind(struct vmw_private *dev_priv, | ||
550 | struct vmw_mob *mob) | ||
551 | { | ||
552 | struct { | ||
553 | SVGA3dCmdHeader header; | ||
554 | SVGA3dCmdDestroyGBMob body; | ||
555 | } *cmd; | ||
556 | int ret; | ||
557 | struct ttm_buffer_object *bo = mob->pt_bo; | ||
558 | |||
559 | if (bo) { | ||
560 | ret = ttm_bo_reserve(bo, false, true, false, 0); | ||
561 | /* | ||
562 | * Noone else should be using this buffer. | ||
563 | */ | ||
564 | BUG_ON(ret != 0); | ||
565 | } | ||
566 | |||
567 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
568 | if (unlikely(cmd == NULL)) { | ||
569 | DRM_ERROR("Failed reserving FIFO space for Memory " | ||
570 | "Object unbinding.\n"); | ||
571 | } | ||
572 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB; | ||
573 | cmd->header.size = sizeof(cmd->body); | ||
574 | cmd->body.mobid = mob->id; | ||
575 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
576 | if (bo) { | ||
577 | vmw_fence_single_bo(bo, NULL); | ||
578 | ttm_bo_unreserve(bo); | ||
579 | } | ||
580 | vmw_3d_resource_dec(dev_priv, false); | ||
581 | } | ||
582 | |||
583 | /* | ||
584 | * vmw_mob_bind - Make a mob visible to the device after first | ||
585 | * populating it if necessary. | ||
586 | * | ||
587 | * @dev_priv: Pointer to a device private. | ||
588 | * @mob: Pointer to the mob we're making visible. | ||
589 | * @data_addr: Array of DMA addresses to the data pages of the underlying | ||
590 | * buffer object. | ||
591 | * @num_data_pages: Number of data pages of the underlying buffer | ||
592 | * object. | ||
593 | * @mob_id: Device id of the mob to bind | ||
594 | * | ||
595 | * This function is intended to be interfaced with the ttm_tt backend | ||
596 | * code. | ||
597 | */ | ||
598 | int vmw_mob_bind(struct vmw_private *dev_priv, | ||
599 | struct vmw_mob *mob, | ||
600 | const struct vmw_sg_table *vsgt, | ||
601 | unsigned long num_data_pages, | ||
602 | int32_t mob_id) | ||
603 | { | ||
604 | int ret; | ||
605 | bool pt_set_up = false; | ||
606 | struct vmw_piter data_iter; | ||
607 | struct { | ||
608 | SVGA3dCmdHeader header; | ||
609 | vmw_cmd_define_gb_mob body; | ||
610 | } *cmd; | ||
611 | |||
612 | mob->id = mob_id; | ||
613 | vmw_piter_start(&data_iter, vsgt, 0); | ||
614 | if (unlikely(!vmw_piter_next(&data_iter))) | ||
615 | return 0; | ||
616 | |||
617 | if (likely(num_data_pages == 1)) { | ||
618 | mob->pt_level = VMW_MOBFMT_PTDEPTH_0; | ||
619 | mob->pt_root_page = vmw_piter_dma_addr(&data_iter); | ||
620 | } else if (vsgt->num_regions == 1) { | ||
621 | mob->pt_level = SVGA3D_MOBFMT_RANGE; | ||
622 | mob->pt_root_page = vmw_piter_dma_addr(&data_iter); | ||
623 | } else if (unlikely(mob->pt_bo == NULL)) { | ||
624 | ret = vmw_mob_pt_populate(dev_priv, mob); | ||
625 | if (unlikely(ret != 0)) | ||
626 | return ret; | ||
627 | |||
628 | vmw_mob_pt_setup(mob, data_iter, num_data_pages); | ||
629 | pt_set_up = true; | ||
630 | mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; | ||
631 | } | ||
632 | |||
633 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
634 | |||
635 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
636 | if (unlikely(cmd == NULL)) { | ||
637 | DRM_ERROR("Failed reserving FIFO space for Memory " | ||
638 | "Object binding.\n"); | ||
639 | goto out_no_cmd_space; | ||
640 | } | ||
641 | |||
642 | cmd->header.id = VMW_ID_DEFINE_GB_MOB; | ||
643 | cmd->header.size = sizeof(cmd->body); | ||
644 | cmd->body.mobid = mob_id; | ||
645 | cmd->body.ptDepth = mob->pt_level; | ||
646 | cmd->body.base = mob->pt_root_page >> PAGE_SHIFT; | ||
647 | cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE; | ||
648 | |||
649 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
650 | |||
651 | return 0; | ||
652 | |||
653 | out_no_cmd_space: | ||
654 | vmw_3d_resource_dec(dev_priv, false); | ||
655 | if (pt_set_up) | ||
656 | ttm_bo_unref(&mob->pt_bo); | ||
657 | |||
658 | return -ENOMEM; | ||
659 | } | ||