diff options
-rw-r--r-- | drivers/gpu/drm/vmwgfx/Makefile | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 17 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 31 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | 570 |
5 files changed, 632 insertions, 1 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 9f8b690bcf52..5d892e76117b 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile | |||
@@ -6,6 +6,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ | |||
6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ | 6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ |
7 | vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ | 7 | vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ |
8 | vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ | 8 | vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ |
9 | vmwgfx_surface.o vmwgfx_prime.o | 9 | vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o |
10 | 10 | ||
11 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o | 11 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 2d61a2d86bd7..92ae4c5894c5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
@@ -40,6 +40,10 @@ static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM | | |||
40 | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | | 40 | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | |
41 | TTM_PL_FLAG_CACHED; | 41 | TTM_PL_FLAG_CACHED; |
42 | 42 | ||
43 | static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM | | ||
44 | TTM_PL_FLAG_CACHED | | ||
45 | TTM_PL_FLAG_NO_EVICT; | ||
46 | |||
43 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | | 47 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | |
44 | TTM_PL_FLAG_CACHED; | 48 | TTM_PL_FLAG_CACHED; |
45 | 49 | ||
@@ -116,6 +120,15 @@ struct ttm_placement vmw_sys_placement = { | |||
116 | .busy_placement = &sys_placement_flags | 120 | .busy_placement = &sys_placement_flags |
117 | }; | 121 | }; |
118 | 122 | ||
123 | struct ttm_placement vmw_sys_ne_placement = { | ||
124 | .fpfn = 0, | ||
125 | .lpfn = 0, | ||
126 | .num_placement = 1, | ||
127 | .placement = &sys_ne_placement_flags, | ||
128 | .num_busy_placement = 1, | ||
129 | .busy_placement = &sys_ne_placement_flags | ||
130 | }; | ||
131 | |||
119 | static uint32_t evictable_placement_flags[] = { | 132 | static uint32_t evictable_placement_flags[] = { |
120 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, | 133 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, |
121 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, | 134 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 6e8cb1481376..24df6d6e0c08 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -327,6 +327,14 @@ static int vmw_request_device(struct vmw_private *dev_priv) | |||
327 | return ret; | 327 | return ret; |
328 | } | 328 | } |
329 | vmw_fence_fifo_up(dev_priv->fman); | 329 | vmw_fence_fifo_up(dev_priv->fman); |
330 | if (dev_priv->has_mob) { | ||
331 | ret = vmw_otables_setup(dev_priv); | ||
332 | if (unlikely(ret != 0)) { | ||
333 | DRM_ERROR("Unable to initialize " | ||
334 | "guest Memory OBjects.\n"); | ||
335 | goto out_no_mob; | ||
336 | } | ||
337 | } | ||
330 | ret = vmw_dummy_query_bo_create(dev_priv); | 338 | ret = vmw_dummy_query_bo_create(dev_priv); |
331 | if (unlikely(ret != 0)) | 339 | if (unlikely(ret != 0)) |
332 | goto out_no_query_bo; | 340 | goto out_no_query_bo; |
@@ -335,6 +343,9 @@ static int vmw_request_device(struct vmw_private *dev_priv) | |||
335 | return 0; | 343 | return 0; |
336 | 344 | ||
337 | out_no_query_bo: | 345 | out_no_query_bo: |
346 | if (dev_priv->has_mob) | ||
347 | vmw_otables_takedown(dev_priv); | ||
348 | out_no_mob: | ||
338 | vmw_fence_fifo_down(dev_priv->fman); | 349 | vmw_fence_fifo_down(dev_priv->fman); |
339 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | 350 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
340 | return ret; | 351 | return ret; |
@@ -350,10 +361,13 @@ static void vmw_release_device(struct vmw_private *dev_priv) | |||
350 | BUG_ON(dev_priv->pinned_bo != NULL); | 361 | BUG_ON(dev_priv->pinned_bo != NULL); |
351 | 362 | ||
352 | ttm_bo_unref(&dev_priv->dummy_query_bo); | 363 | ttm_bo_unref(&dev_priv->dummy_query_bo); |
364 | if (dev_priv->has_mob) | ||
365 | vmw_otables_takedown(dev_priv); | ||
353 | vmw_fence_fifo_down(dev_priv->fman); | 366 | vmw_fence_fifo_down(dev_priv->fman); |
354 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | 367 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
355 | } | 368 | } |
356 | 369 | ||
370 | |||
357 | /** | 371 | /** |
358 | * Increase the 3d resource refcount. | 372 | * Increase the 3d resource refcount. |
359 | * If the count was prevously zero, initialize the fifo, switching to svga | 373 | * If the count was prevously zero, initialize the fifo, switching to svga |
@@ -690,6 +704,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
690 | dev_priv->has_gmr = false; | 704 | dev_priv->has_gmr = false; |
691 | } | 705 | } |
692 | 706 | ||
707 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) | ||
708 | dev_priv->has_mob = true; | ||
709 | |||
693 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, | 710 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, |
694 | dev_priv->mmio_size); | 711 | dev_priv->mmio_size); |
695 | 712 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 486adaf235d0..518f8f5e2612 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -51,6 +51,16 @@ | |||
51 | #define VMWGFX_MAX_DISPLAYS 16 | 51 | #define VMWGFX_MAX_DISPLAYS 16 |
52 | #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 | 52 | #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 |
53 | 53 | ||
54 | /* | ||
55 | * Perhaps we should have sysfs entries for these. | ||
56 | */ | ||
57 | #define VMWGFX_NUM_GB_CONTEXT 256 | ||
58 | #define VMWGFX_NUM_GB_SHADER 20000 | ||
59 | #define VMWGFX_NUM_GB_SURFACE 32768 | ||
60 | #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ | ||
61 | VMWGFX_NUM_GB_SHADER +\ | ||
62 | VMWGFX_NUM_GB_SURFACE) | ||
63 | |||
54 | #define VMW_PL_GMR TTM_PL_PRIV0 | 64 | #define VMW_PL_GMR TTM_PL_PRIV0 |
55 | #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 | 65 | #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 |
56 | 66 | ||
@@ -295,6 +305,7 @@ struct vmw_private { | |||
295 | uint32_t max_gmr_pages; | 305 | uint32_t max_gmr_pages; |
296 | uint32_t memory_size; | 306 | uint32_t memory_size; |
297 | bool has_gmr; | 307 | bool has_gmr; |
308 | bool has_mob; | ||
298 | struct mutex hw_mutex; | 309 | struct mutex hw_mutex; |
299 | 310 | ||
300 | /* | 311 | /* |
@@ -415,6 +426,12 @@ struct vmw_private { | |||
415 | * DMA mapping stuff. | 426 | * DMA mapping stuff. |
416 | */ | 427 | */ |
417 | enum vmw_dma_map_mode map_mode; | 428 | enum vmw_dma_map_mode map_mode; |
429 | |||
430 | /* | ||
431 | * Guest Backed stuff | ||
432 | */ | ||
433 | struct ttm_buffer_object *otable_bo; | ||
434 | struct vmw_otable *otables; | ||
418 | }; | 435 | }; |
419 | 436 | ||
420 | static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) | 437 | static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) |
@@ -622,6 +639,7 @@ extern struct ttm_placement vmw_vram_sys_placement; | |||
622 | extern struct ttm_placement vmw_vram_gmr_placement; | 639 | extern struct ttm_placement vmw_vram_gmr_placement; |
623 | extern struct ttm_placement vmw_vram_gmr_ne_placement; | 640 | extern struct ttm_placement vmw_vram_gmr_ne_placement; |
624 | extern struct ttm_placement vmw_sys_placement; | 641 | extern struct ttm_placement vmw_sys_placement; |
642 | extern struct ttm_placement vmw_sys_ne_placement; | ||
625 | extern struct ttm_placement vmw_evictable_placement; | 643 | extern struct ttm_placement vmw_evictable_placement; |
626 | extern struct ttm_placement vmw_srf_placement; | 644 | extern struct ttm_placement vmw_srf_placement; |
627 | extern struct ttm_bo_driver vmw_bo_driver; | 645 | extern struct ttm_bo_driver vmw_bo_driver; |
@@ -832,6 +850,19 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev, | |||
832 | uint32_t handle, uint32_t flags, | 850 | uint32_t handle, uint32_t flags, |
833 | int *prime_fd); | 851 | int *prime_fd); |
834 | 852 | ||
853 | /* | ||
854 | * MemoryOBject management - vmwgfx_mob.c | ||
855 | */ | ||
856 | struct vmw_mob; | ||
857 | extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob, | ||
858 | struct page **data_pages, unsigned long num_data_pages, | ||
859 | int32_t mob_id); | ||
860 | extern void vmw_mob_unbind(struct vmw_private *dev_priv, | ||
861 | struct vmw_mob *mob); | ||
862 | extern void vmw_mob_destroy(struct vmw_mob *mob); | ||
863 | extern struct vmw_mob *vmw_mob_create(unsigned long data_pages); | ||
864 | extern int vmw_otables_setup(struct vmw_private *dev_priv); | ||
865 | extern void vmw_otables_takedown(struct vmw_private *dev_priv); | ||
835 | 866 | ||
836 | /** | 867 | /** |
837 | * Inline helper functions | 868 | * Inline helper functions |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c new file mode 100644 index 000000000000..34450867d2da --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | |||
@@ -0,0 +1,570 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | #include "vmwgfx_drv.h" | ||
29 | |||
30 | /* | ||
31 | * Currently the MOB interface does not support 64-bit page frame numbers. | ||
32 | * This might change in the future to be similar to the GMR2 interface | ||
33 | * when virtual machines support memory beyond 16TB. | ||
34 | */ | ||
35 | |||
36 | #define VMW_PPN_SIZE 4 | ||
37 | |||
38 | /* | ||
39 | * struct vmw_mob - Structure containing page table and metadata for a | ||
40 | * Guest Memory OBject. | ||
41 | * | ||
42 | * @num_pages Number of pages that make up the page table. | ||
43 | * @pt_level The indirection level of the page table. 0-2. | ||
44 | * @pt_root_page Pointer to the level 0 page of the page table. | ||
45 | */ | ||
46 | struct vmw_mob { | ||
47 | struct ttm_buffer_object *pt_bo; | ||
48 | unsigned long num_pages; | ||
49 | unsigned pt_level; | ||
50 | struct page *pt_root_page; | ||
51 | uint32_t id; | ||
52 | }; | ||
53 | |||
54 | /* | ||
55 | * struct vmw_otable - Guest Memory OBject table metadata | ||
56 | * | ||
57 | * @size: Size of the table (page-aligned). | ||
58 | * @page_table: Pointer to a struct vmw_mob holding the page table. | ||
59 | */ | ||
60 | struct vmw_otable { | ||
61 | unsigned long size; | ||
62 | struct vmw_mob *page_table; | ||
63 | }; | ||
64 | |||
65 | static int vmw_mob_pt_populate(struct vmw_private *dev_priv, | ||
66 | struct vmw_mob *mob); | ||
67 | static void vmw_mob_pt_setup(struct vmw_mob *mob, | ||
68 | struct page **data_pages, | ||
69 | unsigned long num_data_pages); | ||
70 | |||
71 | /* | ||
72 | * vmw_setup_otable_base - Issue an object table base setup command to | ||
73 | * the device | ||
74 | * | ||
75 | * @dev_priv: Pointer to a device private structure | ||
76 | * @type: Type of object table base | ||
77 | * @offset Start of table offset into dev_priv::otable_bo | ||
78 | * @otable Pointer to otable metadata; | ||
79 | * | ||
80 | * This function returns -ENOMEM if it fails to reserve fifo space, | ||
81 | * and may block waiting for fifo space. | ||
82 | */ | ||
83 | static int vmw_setup_otable_base(struct vmw_private *dev_priv, | ||
84 | SVGAOTableType type, | ||
85 | unsigned long offset, | ||
86 | struct vmw_otable *otable) | ||
87 | { | ||
88 | struct { | ||
89 | SVGA3dCmdHeader header; | ||
90 | SVGA3dCmdSetOTableBase body; | ||
91 | } *cmd; | ||
92 | struct page **pages = dev_priv->otable_bo->ttm->pages + | ||
93 | (offset >> PAGE_SHIFT); | ||
94 | struct vmw_mob *mob; | ||
95 | int ret; | ||
96 | |||
97 | BUG_ON(otable->page_table != NULL); | ||
98 | |||
99 | mob = vmw_mob_create(otable->size >> PAGE_SHIFT); | ||
100 | if (unlikely(mob == NULL)) { | ||
101 | DRM_ERROR("Failed creating OTable page table.\n"); | ||
102 | return -ENOMEM; | ||
103 | } | ||
104 | |||
105 | if (otable->size <= PAGE_SIZE) { | ||
106 | mob->pt_level = 0; | ||
107 | mob->pt_root_page = pages[0]; | ||
108 | } else { | ||
109 | ret = vmw_mob_pt_populate(dev_priv, mob); | ||
110 | if (unlikely(ret != 0)) | ||
111 | goto out_no_populate; | ||
112 | |||
113 | vmw_mob_pt_setup(mob, pages, | ||
114 | otable->size >> PAGE_SHIFT); | ||
115 | } | ||
116 | |||
117 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
118 | if (unlikely(cmd == NULL)) { | ||
119 | DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); | ||
120 | goto out_no_fifo; | ||
121 | } | ||
122 | |||
123 | memset(cmd, 0, sizeof(*cmd)); | ||
124 | cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; | ||
125 | cmd->header.size = sizeof(cmd->body); | ||
126 | cmd->body.type = type; | ||
127 | cmd->body.baseAddress = page_to_pfn(mob->pt_root_page); | ||
128 | cmd->body.sizeInBytes = otable->size; | ||
129 | cmd->body.validSizeInBytes = 0; | ||
130 | cmd->body.ptDepth = mob->pt_level; | ||
131 | |||
132 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
133 | otable->page_table = mob; | ||
134 | |||
135 | return 0; | ||
136 | |||
137 | out_no_fifo: | ||
138 | out_no_populate: | ||
139 | vmw_mob_destroy(mob); | ||
140 | return ret; | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * vmw_takedown_otable_base - Issue an object table base takedown command | ||
145 | * to the device | ||
146 | * | ||
147 | * @dev_priv: Pointer to a device private structure | ||
148 | * @type: Type of object table base | ||
149 | * | ||
150 | */ | ||
151 | static void vmw_takedown_otable_base(struct vmw_private *dev_priv, | ||
152 | SVGAOTableType type, | ||
153 | struct vmw_otable *otable) | ||
154 | { | ||
155 | struct { | ||
156 | SVGA3dCmdHeader header; | ||
157 | SVGA3dCmdSetOTableBase body; | ||
158 | } *cmd; | ||
159 | struct ttm_buffer_object *bo = otable->page_table->pt_bo; | ||
160 | |||
161 | if (otable->page_table == NULL) | ||
162 | return; | ||
163 | |||
164 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
165 | if (unlikely(cmd == NULL)) | ||
166 | DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); | ||
167 | |||
168 | memset(cmd, 0, sizeof(*cmd)); | ||
169 | cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; | ||
170 | cmd->header.size = sizeof(cmd->body); | ||
171 | cmd->body.type = type; | ||
172 | cmd->body.baseAddress = 0; | ||
173 | cmd->body.sizeInBytes = 0; | ||
174 | cmd->body.validSizeInBytes = 0; | ||
175 | cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; | ||
176 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
177 | |||
178 | if (bo) { | ||
179 | int ret; | ||
180 | |||
181 | ret = ttm_bo_reserve(bo, false, true, false, false); | ||
182 | BUG_ON(ret != 0); | ||
183 | |||
184 | vmw_fence_single_bo(bo, NULL); | ||
185 | ttm_bo_unreserve(bo); | ||
186 | } | ||
187 | |||
188 | vmw_mob_destroy(otable->page_table); | ||
189 | otable->page_table = NULL; | ||
190 | } | ||
191 | |||
192 | /* | ||
193 | * vmw_otables_setup - Set up guest backed memory object tables | ||
194 | * | ||
195 | * @dev_priv: Pointer to a device private structure | ||
196 | * | ||
197 | * Takes care of the device guest backed surface | ||
198 | * initialization, by setting up the guest backed memory object tables. | ||
199 | * Returns 0 on success and various error codes on failure. A succesful return | ||
200 | * means the object tables can be taken down using the vmw_otables_takedown | ||
201 | * function. | ||
202 | */ | ||
203 | int vmw_otables_setup(struct vmw_private *dev_priv) | ||
204 | { | ||
205 | unsigned long offset; | ||
206 | unsigned long bo_size; | ||
207 | struct vmw_otable *otables; | ||
208 | SVGAOTableType i; | ||
209 | int ret; | ||
210 | |||
211 | otables = kzalloc(SVGA_OTABLE_COUNT * sizeof(*otables), | ||
212 | GFP_KERNEL); | ||
213 | if (unlikely(otables == NULL)) { | ||
214 | DRM_ERROR("Failed to allocate space for otable " | ||
215 | "metadata.\n"); | ||
216 | return -ENOMEM; | ||
217 | } | ||
218 | |||
219 | otables[SVGA_OTABLE_MOB].size = | ||
220 | VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE; | ||
221 | otables[SVGA_OTABLE_SURFACE].size = | ||
222 | VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE; | ||
223 | otables[SVGA_OTABLE_CONTEXT].size = | ||
224 | VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE; | ||
225 | otables[SVGA_OTABLE_SHADER].size = | ||
226 | VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE; | ||
227 | |||
228 | bo_size = 0; | ||
229 | for (i = 0; i < SVGA_OTABLE_COUNT; ++i) { | ||
230 | otables[i].size = | ||
231 | (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK; | ||
232 | bo_size += otables[i].size; | ||
233 | } | ||
234 | |||
235 | ret = ttm_bo_create(&dev_priv->bdev, bo_size, | ||
236 | ttm_bo_type_device, | ||
237 | &vmw_sys_ne_placement, | ||
238 | 0, false, NULL, | ||
239 | &dev_priv->otable_bo); | ||
240 | |||
241 | if (unlikely(ret != 0)) | ||
242 | goto out_no_bo; | ||
243 | |||
244 | ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, false); | ||
245 | BUG_ON(ret != 0); | ||
246 | ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm); | ||
247 | ttm_bo_unreserve(dev_priv->otable_bo); | ||
248 | if (unlikely(ret != 0)) | ||
249 | goto out_no_setup; | ||
250 | |||
251 | offset = 0; | ||
252 | for (i = 0; i < SVGA_OTABLE_COUNT; ++i) { | ||
253 | ret = vmw_setup_otable_base(dev_priv, i, offset, | ||
254 | &otables[i]); | ||
255 | if (unlikely(ret != 0)) | ||
256 | goto out_no_setup; | ||
257 | offset += otables[i].size; | ||
258 | } | ||
259 | |||
260 | dev_priv->otables = otables; | ||
261 | return 0; | ||
262 | |||
263 | out_no_setup: | ||
264 | for (i = 0; i < SVGA_OTABLE_COUNT; ++i) | ||
265 | vmw_takedown_otable_base(dev_priv, i, &otables[i]); | ||
266 | |||
267 | ttm_bo_unref(&dev_priv->otable_bo); | ||
268 | out_no_bo: | ||
269 | kfree(otables); | ||
270 | return ret; | ||
271 | } | ||
272 | |||
273 | |||
274 | /* | ||
275 | * vmw_otables_takedown - Take down guest backed memory object tables | ||
276 | * | ||
277 | * @dev_priv: Pointer to a device private structure | ||
278 | * | ||
279 | * Take down the Guest Memory Object tables. | ||
280 | */ | ||
281 | void vmw_otables_takedown(struct vmw_private *dev_priv) | ||
282 | { | ||
283 | SVGAOTableType i; | ||
284 | struct ttm_buffer_object *bo = dev_priv->otable_bo; | ||
285 | int ret; | ||
286 | |||
287 | for (i = 0; i < SVGA_OTABLE_COUNT; ++i) | ||
288 | vmw_takedown_otable_base(dev_priv, i, | ||
289 | &dev_priv->otables[i]); | ||
290 | |||
291 | ret = ttm_bo_reserve(bo, false, true, false, false); | ||
292 | BUG_ON(ret != 0); | ||
293 | |||
294 | vmw_fence_single_bo(bo, NULL); | ||
295 | ttm_bo_unreserve(bo); | ||
296 | |||
297 | ttm_bo_unref(&dev_priv->otable_bo); | ||
298 | kfree(dev_priv->otables); | ||
299 | dev_priv->otables = NULL; | ||
300 | } | ||
301 | |||
302 | |||
303 | /* | ||
304 | * vmw_mob_calculate_pt_pages - Calculate the number of page table pages | ||
305 | * needed for a guest backed memory object. | ||
306 | * | ||
307 | * @data_pages: Number of data pages in the memory object buffer. | ||
308 | */ | ||
309 | static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages) | ||
310 | { | ||
311 | unsigned long data_size = data_pages * PAGE_SIZE; | ||
312 | unsigned long tot_size = 0; | ||
313 | |||
314 | while (likely(data_size > PAGE_SIZE)) { | ||
315 | data_size = DIV_ROUND_UP(data_size, PAGE_SIZE); | ||
316 | data_size *= VMW_PPN_SIZE; | ||
317 | tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK; | ||
318 | } | ||
319 | |||
320 | return tot_size >> PAGE_SHIFT; | ||
321 | } | ||
322 | |||
323 | /* | ||
324 | * vmw_mob_create - Create a mob, but don't populate it. | ||
325 | * | ||
326 | * @data_pages: Number of data pages of the underlying buffer object. | ||
327 | */ | ||
328 | struct vmw_mob *vmw_mob_create(unsigned long data_pages) | ||
329 | { | ||
330 | struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL); | ||
331 | |||
332 | if (unlikely(mob == NULL)) | ||
333 | return NULL; | ||
334 | |||
335 | mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); | ||
336 | |||
337 | return mob; | ||
338 | } | ||
339 | |||
340 | /* | ||
341 | * vmw_mob_pt_populate - Populate the mob pagetable | ||
342 | * | ||
343 | * @mob: Pointer to the mob the pagetable of which we want to | ||
344 | * populate. | ||
345 | * | ||
346 | * This function allocates memory to be used for the pagetable, and | ||
347 | * adjusts TTM memory accounting accordingly. Returns ENOMEM if | ||
348 | * memory resources aren't sufficient and may cause TTM buffer objects | ||
349 | * to be swapped out by using the TTM memory accounting function. | ||
350 | */ | ||
351 | static int vmw_mob_pt_populate(struct vmw_private *dev_priv, | ||
352 | struct vmw_mob *mob) | ||
353 | { | ||
354 | int ret; | ||
355 | BUG_ON(mob->pt_bo != NULL); | ||
356 | |||
357 | ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE, | ||
358 | ttm_bo_type_device, | ||
359 | &vmw_sys_ne_placement, | ||
360 | 0, false, NULL, &mob->pt_bo); | ||
361 | if (unlikely(ret != 0)) | ||
362 | return ret; | ||
363 | |||
364 | ret = ttm_bo_reserve(mob->pt_bo, false, true, false, false); | ||
365 | |||
366 | BUG_ON(ret != 0); | ||
367 | ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm); | ||
368 | ttm_bo_unreserve(mob->pt_bo); | ||
369 | if (unlikely(ret != 0)) | ||
370 | ttm_bo_unref(&mob->pt_bo); | ||
371 | |||
372 | return ret; | ||
373 | } | ||
374 | |||
375 | |||
376 | /* | ||
377 | * vmw_mob_build_pt - Build a pagetable | ||
378 | * | ||
379 | * @data_pages: Array of page pointers to the underlying buffer | ||
380 | * object's data pages. | ||
381 | * @num_data_pages: Number of buffer object data pages. | ||
382 | * @pt_pages: Array of page pointers to the page table pages. | ||
383 | * | ||
384 | * Returns the number of page table pages actually used. | ||
385 | * Uses atomic kmaps of highmem pages to avoid TLB thrashing. | ||
386 | */ | ||
387 | static unsigned long vmw_mob_build_pt(struct page **data_pages, | ||
388 | unsigned long num_data_pages, | ||
389 | struct page **pt_pages) | ||
390 | { | ||
391 | unsigned long pt_size = num_data_pages * VMW_PPN_SIZE; | ||
392 | unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE); | ||
393 | unsigned long pt_page, data_page; | ||
394 | uint32_t *addr, *save_addr; | ||
395 | unsigned long i; | ||
396 | |||
397 | data_page = 0; | ||
398 | for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) { | ||
399 | save_addr = addr = kmap_atomic(pt_pages[pt_page]); | ||
400 | |||
401 | for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) { | ||
402 | *addr++ = page_to_pfn(data_pages[data_page++]); | ||
403 | if (unlikely(data_page >= num_data_pages)) | ||
404 | break; | ||
405 | } | ||
406 | kunmap_atomic(save_addr); | ||
407 | } | ||
408 | |||
409 | return num_pt_pages; | ||
410 | } | ||
411 | |||
412 | /* | ||
413 | * vmw_mob_build_pt - Set up a multilevel mob pagetable | ||
414 | * | ||
415 | * @mob: Pointer to a mob whose page table needs setting up. | ||
416 | * @data_pages Array of page pointers to the buffer object's data | ||
417 | * pages. | ||
418 | * @num_data_pages: Number of buffer object data pages. | ||
419 | * | ||
420 | * Uses tail recursion to set up a multilevel mob page table. | ||
421 | */ | ||
422 | static void vmw_mob_pt_setup(struct vmw_mob *mob, | ||
423 | struct page **data_pages, | ||
424 | unsigned long num_data_pages) | ||
425 | { | ||
426 | struct page **pt_pages; | ||
427 | unsigned long num_pt_pages = 0; | ||
428 | struct ttm_buffer_object *bo = mob->pt_bo; | ||
429 | int ret; | ||
430 | |||
431 | ret = ttm_bo_reserve(bo, false, true, false, 0); | ||
432 | BUG_ON(ret != 0); | ||
433 | |||
434 | pt_pages = bo->ttm->pages; | ||
435 | mob->pt_level = 0; | ||
436 | while (likely(num_data_pages > 1)) { | ||
437 | ++mob->pt_level; | ||
438 | BUG_ON(mob->pt_level > 2); | ||
439 | |||
440 | pt_pages += num_pt_pages; | ||
441 | num_pt_pages = vmw_mob_build_pt(data_pages, num_data_pages, | ||
442 | pt_pages); | ||
443 | data_pages = pt_pages; | ||
444 | num_data_pages = num_pt_pages; | ||
445 | } | ||
446 | |||
447 | mob->pt_root_page = *pt_pages; | ||
448 | ttm_bo_unreserve(bo); | ||
449 | } | ||
450 | |||
451 | /* | ||
452 | * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary. | ||
453 | * | ||
454 | * @mob: Pointer to a mob to destroy. | ||
455 | */ | ||
456 | void vmw_mob_destroy(struct vmw_mob *mob) | ||
457 | { | ||
458 | if (mob->pt_bo) | ||
459 | ttm_bo_unref(&mob->pt_bo); | ||
460 | kfree(mob); | ||
461 | } | ||
462 | |||
463 | /* | ||
464 | * vmw_mob_unbind - Hide a mob from the device. | ||
465 | * | ||
466 | * @dev_priv: Pointer to a device private. | ||
467 | * @mob_id: Device id of the mob to unbind. | ||
468 | */ | ||
469 | void vmw_mob_unbind(struct vmw_private *dev_priv, | ||
470 | struct vmw_mob *mob) | ||
471 | { | ||
472 | struct { | ||
473 | SVGA3dCmdHeader header; | ||
474 | SVGA3dCmdDestroyGBMob body; | ||
475 | } *cmd; | ||
476 | int ret; | ||
477 | struct ttm_buffer_object *bo = mob->pt_bo; | ||
478 | |||
479 | if (bo) { | ||
480 | ret = ttm_bo_reserve(bo, false, true, false, 0); | ||
481 | /* | ||
482 | * Noone else should be using this buffer. | ||
483 | */ | ||
484 | BUG_ON(ret != 0); | ||
485 | } | ||
486 | |||
487 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
488 | if (unlikely(cmd == NULL)) { | ||
489 | DRM_ERROR("Failed reserving FIFO space for Memory " | ||
490 | "Object unbinding.\n"); | ||
491 | } | ||
492 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB; | ||
493 | cmd->header.size = sizeof(cmd->body); | ||
494 | cmd->body.mobid = mob->id; | ||
495 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
496 | if (bo) { | ||
497 | vmw_fence_single_bo(bo, NULL); | ||
498 | ttm_bo_unreserve(bo); | ||
499 | } | ||
500 | vmw_3d_resource_dec(dev_priv, false); | ||
501 | } | ||
502 | |||
503 | /* | ||
504 | * vmw_mob_bind - Make a mob visible to the device after first | ||
505 | * populating it if necessary. | ||
506 | * | ||
507 | * @dev_priv: Pointer to a device private. | ||
508 | * @mob: Pointer to the mob we're making visible. | ||
509 | * @data_pages: Array of pointers to the data pages of the underlying | ||
510 | * buffer object. | ||
511 | * @num_data_pages: Number of data pages of the underlying buffer | ||
512 | * object. | ||
513 | * @mob_id: Device id of the mob to bind | ||
514 | * | ||
515 | * This function is intended to be interfaced with the ttm_tt backend | ||
516 | * code. | ||
517 | */ | ||
518 | int vmw_mob_bind(struct vmw_private *dev_priv, | ||
519 | struct vmw_mob *mob, | ||
520 | struct page **data_pages, | ||
521 | unsigned long num_data_pages, | ||
522 | int32_t mob_id) | ||
523 | { | ||
524 | int ret; | ||
525 | bool pt_set_up = false; | ||
526 | struct { | ||
527 | SVGA3dCmdHeader header; | ||
528 | SVGA3dCmdDefineGBMob body; | ||
529 | } *cmd; | ||
530 | |||
531 | mob->id = mob_id; | ||
532 | if (likely(num_data_pages == 1)) { | ||
533 | mob->pt_level = 0; | ||
534 | mob->pt_root_page = *data_pages; | ||
535 | } else if (unlikely(mob->pt_bo == NULL)) { | ||
536 | ret = vmw_mob_pt_populate(dev_priv, mob); | ||
537 | if (unlikely(ret != 0)) | ||
538 | return ret; | ||
539 | |||
540 | vmw_mob_pt_setup(mob, data_pages, num_data_pages); | ||
541 | pt_set_up = true; | ||
542 | } | ||
543 | |||
544 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
545 | |||
546 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
547 | if (unlikely(cmd == NULL)) { | ||
548 | DRM_ERROR("Failed reserving FIFO space for Memory " | ||
549 | "Object binding.\n"); | ||
550 | goto out_no_cmd_space; | ||
551 | } | ||
552 | |||
553 | cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB; | ||
554 | cmd->header.size = sizeof(cmd->body); | ||
555 | cmd->body.mobid = mob_id; | ||
556 | cmd->body.ptDepth = mob->pt_level; | ||
557 | cmd->body.base = page_to_pfn(mob->pt_root_page); | ||
558 | cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE; | ||
559 | |||
560 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
561 | |||
562 | return 0; | ||
563 | |||
564 | out_no_cmd_space: | ||
565 | vmw_3d_resource_dec(dev_priv, false); | ||
566 | if (pt_set_up) | ||
567 | ttm_bo_unref(&mob->pt_bo); | ||
568 | |||
569 | return -ENOMEM; | ||
570 | } | ||