diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_drv.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 783 |
1 files changed, 783 insertions, 0 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c new file mode 100644 index 000000000000..0c9c0811f42d --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -0,0 +1,783 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | #include "drmP.h" | ||
29 | #include "vmwgfx_drv.h" | ||
30 | #include "ttm/ttm_placement.h" | ||
31 | #include "ttm/ttm_bo_driver.h" | ||
32 | #include "ttm/ttm_object.h" | ||
33 | #include "ttm/ttm_module.h" | ||
34 | |||
35 | #define VMWGFX_DRIVER_NAME "vmwgfx" | ||
36 | #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" | ||
37 | #define VMWGFX_CHIP_SVGAII 0 | ||
38 | #define VMW_FB_RESERVATION 0 | ||
39 | |||
40 | /** | ||
41 | * Fully encoded drm commands. Might move to vmw_drm.h | ||
42 | */ | ||
43 | |||
44 | #define DRM_IOCTL_VMW_GET_PARAM \ | ||
45 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \ | ||
46 | struct drm_vmw_getparam_arg) | ||
47 | #define DRM_IOCTL_VMW_ALLOC_DMABUF \ | ||
48 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \ | ||
49 | union drm_vmw_alloc_dmabuf_arg) | ||
50 | #define DRM_IOCTL_VMW_UNREF_DMABUF \ | ||
51 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \ | ||
52 | struct drm_vmw_unref_dmabuf_arg) | ||
53 | #define DRM_IOCTL_VMW_CURSOR_BYPASS \ | ||
54 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \ | ||
55 | struct drm_vmw_cursor_bypass_arg) | ||
56 | |||
57 | #define DRM_IOCTL_VMW_CONTROL_STREAM \ | ||
58 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \ | ||
59 | struct drm_vmw_control_stream_arg) | ||
60 | #define DRM_IOCTL_VMW_CLAIM_STREAM \ | ||
61 | DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \ | ||
62 | struct drm_vmw_stream_arg) | ||
63 | #define DRM_IOCTL_VMW_UNREF_STREAM \ | ||
64 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \ | ||
65 | struct drm_vmw_stream_arg) | ||
66 | |||
67 | #define DRM_IOCTL_VMW_CREATE_CONTEXT \ | ||
68 | DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \ | ||
69 | struct drm_vmw_context_arg) | ||
70 | #define DRM_IOCTL_VMW_UNREF_CONTEXT \ | ||
71 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \ | ||
72 | struct drm_vmw_context_arg) | ||
73 | #define DRM_IOCTL_VMW_CREATE_SURFACE \ | ||
74 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \ | ||
75 | union drm_vmw_surface_create_arg) | ||
76 | #define DRM_IOCTL_VMW_UNREF_SURFACE \ | ||
77 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \ | ||
78 | struct drm_vmw_surface_arg) | ||
79 | #define DRM_IOCTL_VMW_REF_SURFACE \ | ||
80 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \ | ||
81 | union drm_vmw_surface_reference_arg) | ||
82 | #define DRM_IOCTL_VMW_EXECBUF \ | ||
83 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ | ||
84 | struct drm_vmw_execbuf_arg) | ||
85 | #define DRM_IOCTL_VMW_FIFO_DEBUG \ | ||
86 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG, \ | ||
87 | struct drm_vmw_fifo_debug_arg) | ||
88 | #define DRM_IOCTL_VMW_FENCE_WAIT \ | ||
89 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ | ||
90 | struct drm_vmw_fence_wait_arg) | ||
91 | |||
92 | |||
93 | /** | ||
94 | * The core DRM version of this macro doesn't account for | ||
95 | * DRM_COMMAND_BASE. | ||
96 | */ | ||
97 | |||
98 | #define VMW_IOCTL_DEF(ioctl, func, flags) \ | ||
99 | [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func} | ||
100 | |||
101 | /** | ||
102 | * Ioctl definitions. | ||
103 | */ | ||
104 | |||
105 | static struct drm_ioctl_desc vmw_ioctls[] = { | ||
106 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl, | ||
107 | DRM_AUTH | DRM_UNLOCKED), | ||
108 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, | ||
109 | DRM_AUTH | DRM_UNLOCKED), | ||
110 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, | ||
111 | DRM_AUTH | DRM_UNLOCKED), | ||
112 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS, | ||
113 | vmw_kms_cursor_bypass_ioctl, | ||
114 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), | ||
115 | |||
116 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl, | ||
117 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), | ||
118 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, | ||
119 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), | ||
120 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl, | ||
121 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), | ||
122 | |||
123 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl, | ||
124 | DRM_AUTH | DRM_UNLOCKED), | ||
125 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, | ||
126 | DRM_AUTH | DRM_UNLOCKED), | ||
127 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl, | ||
128 | DRM_AUTH | DRM_UNLOCKED), | ||
129 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, | ||
130 | DRM_AUTH | DRM_UNLOCKED), | ||
131 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl, | ||
132 | DRM_AUTH | DRM_UNLOCKED), | ||
133 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl, | ||
134 | DRM_AUTH | DRM_UNLOCKED), | ||
135 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, | ||
136 | DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED), | ||
137 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl, | ||
138 | DRM_AUTH | DRM_UNLOCKED) | ||
139 | }; | ||
140 | |||
141 | static struct pci_device_id vmw_pci_id_list[] = { | ||
142 | {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, | ||
143 | {0, 0, 0} | ||
144 | }; | ||
145 | |||
146 | static char *vmw_devname = "vmwgfx"; | ||
147 | |||
148 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); | ||
149 | static void vmw_master_init(struct vmw_master *); | ||
150 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | ||
151 | void *ptr); | ||
152 | |||
153 | static void vmw_print_capabilities(uint32_t capabilities) | ||
154 | { | ||
155 | DRM_INFO("Capabilities:\n"); | ||
156 | if (capabilities & SVGA_CAP_RECT_COPY) | ||
157 | DRM_INFO(" Rect copy.\n"); | ||
158 | if (capabilities & SVGA_CAP_CURSOR) | ||
159 | DRM_INFO(" Cursor.\n"); | ||
160 | if (capabilities & SVGA_CAP_CURSOR_BYPASS) | ||
161 | DRM_INFO(" Cursor bypass.\n"); | ||
162 | if (capabilities & SVGA_CAP_CURSOR_BYPASS_2) | ||
163 | DRM_INFO(" Cursor bypass 2.\n"); | ||
164 | if (capabilities & SVGA_CAP_8BIT_EMULATION) | ||
165 | DRM_INFO(" 8bit emulation.\n"); | ||
166 | if (capabilities & SVGA_CAP_ALPHA_CURSOR) | ||
167 | DRM_INFO(" Alpha cursor.\n"); | ||
168 | if (capabilities & SVGA_CAP_3D) | ||
169 | DRM_INFO(" 3D.\n"); | ||
170 | if (capabilities & SVGA_CAP_EXTENDED_FIFO) | ||
171 | DRM_INFO(" Extended Fifo.\n"); | ||
172 | if (capabilities & SVGA_CAP_MULTIMON) | ||
173 | DRM_INFO(" Multimon.\n"); | ||
174 | if (capabilities & SVGA_CAP_PITCHLOCK) | ||
175 | DRM_INFO(" Pitchlock.\n"); | ||
176 | if (capabilities & SVGA_CAP_IRQMASK) | ||
177 | DRM_INFO(" Irq mask.\n"); | ||
178 | if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) | ||
179 | DRM_INFO(" Display Topology.\n"); | ||
180 | if (capabilities & SVGA_CAP_GMR) | ||
181 | DRM_INFO(" GMR.\n"); | ||
182 | if (capabilities & SVGA_CAP_TRACES) | ||
183 | DRM_INFO(" Traces.\n"); | ||
184 | } | ||
185 | |||
186 | static int vmw_request_device(struct vmw_private *dev_priv) | ||
187 | { | ||
188 | int ret; | ||
189 | |||
190 | vmw_kms_save_vga(dev_priv); | ||
191 | |||
192 | ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); | ||
193 | if (unlikely(ret != 0)) { | ||
194 | DRM_ERROR("Unable to initialize FIFO.\n"); | ||
195 | return ret; | ||
196 | } | ||
197 | |||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | static void vmw_release_device(struct vmw_private *dev_priv) | ||
202 | { | ||
203 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | ||
204 | vmw_kms_restore_vga(dev_priv); | ||
205 | } | ||
206 | |||
207 | |||
208 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | ||
209 | { | ||
210 | struct vmw_private *dev_priv; | ||
211 | int ret; | ||
212 | uint32_t svga_id; | ||
213 | |||
214 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); | ||
215 | if (unlikely(dev_priv == NULL)) { | ||
216 | DRM_ERROR("Failed allocating a device private struct.\n"); | ||
217 | return -ENOMEM; | ||
218 | } | ||
219 | memset(dev_priv, 0, sizeof(*dev_priv)); | ||
220 | |||
221 | dev_priv->dev = dev; | ||
222 | dev_priv->vmw_chipset = chipset; | ||
223 | dev_priv->last_read_sequence = (uint32_t) -100; | ||
224 | mutex_init(&dev_priv->hw_mutex); | ||
225 | mutex_init(&dev_priv->cmdbuf_mutex); | ||
226 | rwlock_init(&dev_priv->resource_lock); | ||
227 | idr_init(&dev_priv->context_idr); | ||
228 | idr_init(&dev_priv->surface_idr); | ||
229 | idr_init(&dev_priv->stream_idr); | ||
230 | ida_init(&dev_priv->gmr_ida); | ||
231 | mutex_init(&dev_priv->init_mutex); | ||
232 | init_waitqueue_head(&dev_priv->fence_queue); | ||
233 | init_waitqueue_head(&dev_priv->fifo_queue); | ||
234 | atomic_set(&dev_priv->fence_queue_waiters, 0); | ||
235 | atomic_set(&dev_priv->fifo_queue_waiters, 0); | ||
236 | INIT_LIST_HEAD(&dev_priv->gmr_lru); | ||
237 | |||
238 | dev_priv->io_start = pci_resource_start(dev->pdev, 0); | ||
239 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); | ||
240 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); | ||
241 | |||
242 | mutex_lock(&dev_priv->hw_mutex); | ||
243 | |||
244 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); | ||
245 | svga_id = vmw_read(dev_priv, SVGA_REG_ID); | ||
246 | if (svga_id != SVGA_ID_2) { | ||
247 | ret = -ENOSYS; | ||
248 | DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id); | ||
249 | mutex_unlock(&dev_priv->hw_mutex); | ||
250 | goto out_err0; | ||
251 | } | ||
252 | |||
253 | dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); | ||
254 | |||
255 | if (dev_priv->capabilities & SVGA_CAP_GMR) { | ||
256 | dev_priv->max_gmr_descriptors = | ||
257 | vmw_read(dev_priv, | ||
258 | SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH); | ||
259 | dev_priv->max_gmr_ids = | ||
260 | vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); | ||
261 | } | ||
262 | |||
263 | dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); | ||
264 | dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); | ||
265 | dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); | ||
266 | dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); | ||
267 | |||
268 | mutex_unlock(&dev_priv->hw_mutex); | ||
269 | |||
270 | vmw_print_capabilities(dev_priv->capabilities); | ||
271 | |||
272 | if (dev_priv->capabilities & SVGA_CAP_GMR) { | ||
273 | DRM_INFO("Max GMR ids is %u\n", | ||
274 | (unsigned)dev_priv->max_gmr_ids); | ||
275 | DRM_INFO("Max GMR descriptors is %u\n", | ||
276 | (unsigned)dev_priv->max_gmr_descriptors); | ||
277 | } | ||
278 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", | ||
279 | dev_priv->vram_start, dev_priv->vram_size / 1024); | ||
280 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", | ||
281 | dev_priv->mmio_start, dev_priv->mmio_size / 1024); | ||
282 | |||
283 | ret = vmw_ttm_global_init(dev_priv); | ||
284 | if (unlikely(ret != 0)) | ||
285 | goto out_err0; | ||
286 | |||
287 | |||
288 | vmw_master_init(&dev_priv->fbdev_master); | ||
289 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | ||
290 | dev_priv->active_master = &dev_priv->fbdev_master; | ||
291 | |||
292 | |||
293 | ret = ttm_bo_device_init(&dev_priv->bdev, | ||
294 | dev_priv->bo_global_ref.ref.object, | ||
295 | &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET, | ||
296 | false); | ||
297 | if (unlikely(ret != 0)) { | ||
298 | DRM_ERROR("Failed initializing TTM buffer object driver.\n"); | ||
299 | goto out_err1; | ||
300 | } | ||
301 | |||
302 | ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, | ||
303 | (dev_priv->vram_size >> PAGE_SHIFT)); | ||
304 | if (unlikely(ret != 0)) { | ||
305 | DRM_ERROR("Failed initializing memory manager for VRAM.\n"); | ||
306 | goto out_err2; | ||
307 | } | ||
308 | |||
309 | dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start, | ||
310 | dev_priv->mmio_size, DRM_MTRR_WC); | ||
311 | |||
312 | dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start, | ||
313 | dev_priv->mmio_size); | ||
314 | |||
315 | if (unlikely(dev_priv->mmio_virt == NULL)) { | ||
316 | ret = -ENOMEM; | ||
317 | DRM_ERROR("Failed mapping MMIO.\n"); | ||
318 | goto out_err3; | ||
319 | } | ||
320 | |||
321 | dev_priv->tdev = ttm_object_device_init | ||
322 | (dev_priv->mem_global_ref.object, 12); | ||
323 | |||
324 | if (unlikely(dev_priv->tdev == NULL)) { | ||
325 | DRM_ERROR("Unable to initialize TTM object management.\n"); | ||
326 | ret = -ENOMEM; | ||
327 | goto out_err4; | ||
328 | } | ||
329 | |||
330 | dev->dev_private = dev_priv; | ||
331 | |||
332 | if (!dev->devname) | ||
333 | dev->devname = vmw_devname; | ||
334 | |||
335 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { | ||
336 | ret = drm_irq_install(dev); | ||
337 | if (unlikely(ret != 0)) { | ||
338 | DRM_ERROR("Failed installing irq: %d\n", ret); | ||
339 | goto out_no_irq; | ||
340 | } | ||
341 | } | ||
342 | |||
343 | ret = pci_request_regions(dev->pdev, "vmwgfx probe"); | ||
344 | dev_priv->stealth = (ret != 0); | ||
345 | if (dev_priv->stealth) { | ||
346 | /** | ||
347 | * Request at least the mmio PCI resource. | ||
348 | */ | ||
349 | |||
350 | DRM_INFO("It appears like vesafb is loaded. " | ||
351 | "Ignore above error if any.\n"); | ||
352 | ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); | ||
353 | if (unlikely(ret != 0)) { | ||
354 | DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); | ||
355 | goto out_no_device; | ||
356 | } | ||
357 | } | ||
358 | ret = vmw_request_device(dev_priv); | ||
359 | if (unlikely(ret != 0)) | ||
360 | goto out_no_device; | ||
361 | vmw_kms_init(dev_priv); | ||
362 | vmw_overlay_init(dev_priv); | ||
363 | vmw_fb_init(dev_priv); | ||
364 | |||
365 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; | ||
366 | register_pm_notifier(&dev_priv->pm_nb); | ||
367 | |||
368 | DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n"); | ||
369 | |||
370 | return 0; | ||
371 | |||
372 | out_no_device: | ||
373 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | ||
374 | drm_irq_uninstall(dev_priv->dev); | ||
375 | if (dev->devname == vmw_devname) | ||
376 | dev->devname = NULL; | ||
377 | out_no_irq: | ||
378 | ttm_object_device_release(&dev_priv->tdev); | ||
379 | out_err4: | ||
380 | iounmap(dev_priv->mmio_virt); | ||
381 | out_err3: | ||
382 | drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, | ||
383 | dev_priv->mmio_size, DRM_MTRR_WC); | ||
384 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
385 | out_err2: | ||
386 | (void)ttm_bo_device_release(&dev_priv->bdev); | ||
387 | out_err1: | ||
388 | vmw_ttm_global_release(dev_priv); | ||
389 | out_err0: | ||
390 | ida_destroy(&dev_priv->gmr_ida); | ||
391 | idr_destroy(&dev_priv->surface_idr); | ||
392 | idr_destroy(&dev_priv->context_idr); | ||
393 | idr_destroy(&dev_priv->stream_idr); | ||
394 | kfree(dev_priv); | ||
395 | return ret; | ||
396 | } | ||
397 | |||
398 | static int vmw_driver_unload(struct drm_device *dev) | ||
399 | { | ||
400 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
401 | |||
402 | DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n"); | ||
403 | |||
404 | unregister_pm_notifier(&dev_priv->pm_nb); | ||
405 | |||
406 | vmw_fb_close(dev_priv); | ||
407 | vmw_kms_close(dev_priv); | ||
408 | vmw_overlay_close(dev_priv); | ||
409 | vmw_release_device(dev_priv); | ||
410 | if (dev_priv->stealth) | ||
411 | pci_release_region(dev->pdev, 2); | ||
412 | else | ||
413 | pci_release_regions(dev->pdev); | ||
414 | |||
415 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | ||
416 | drm_irq_uninstall(dev_priv->dev); | ||
417 | if (dev->devname == vmw_devname) | ||
418 | dev->devname = NULL; | ||
419 | ttm_object_device_release(&dev_priv->tdev); | ||
420 | iounmap(dev_priv->mmio_virt); | ||
421 | drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, | ||
422 | dev_priv->mmio_size, DRM_MTRR_WC); | ||
423 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
424 | (void)ttm_bo_device_release(&dev_priv->bdev); | ||
425 | vmw_ttm_global_release(dev_priv); | ||
426 | ida_destroy(&dev_priv->gmr_ida); | ||
427 | idr_destroy(&dev_priv->surface_idr); | ||
428 | idr_destroy(&dev_priv->context_idr); | ||
429 | idr_destroy(&dev_priv->stream_idr); | ||
430 | |||
431 | kfree(dev_priv); | ||
432 | |||
433 | return 0; | ||
434 | } | ||
435 | |||
436 | static void vmw_postclose(struct drm_device *dev, | ||
437 | struct drm_file *file_priv) | ||
438 | { | ||
439 | struct vmw_fpriv *vmw_fp; | ||
440 | |||
441 | vmw_fp = vmw_fpriv(file_priv); | ||
442 | ttm_object_file_release(&vmw_fp->tfile); | ||
443 | if (vmw_fp->locked_master) | ||
444 | drm_master_put(&vmw_fp->locked_master); | ||
445 | kfree(vmw_fp); | ||
446 | } | ||
447 | |||
448 | static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) | ||
449 | { | ||
450 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
451 | struct vmw_fpriv *vmw_fp; | ||
452 | int ret = -ENOMEM; | ||
453 | |||
454 | vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); | ||
455 | if (unlikely(vmw_fp == NULL)) | ||
456 | return ret; | ||
457 | |||
458 | vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); | ||
459 | if (unlikely(vmw_fp->tfile == NULL)) | ||
460 | goto out_no_tfile; | ||
461 | |||
462 | file_priv->driver_priv = vmw_fp; | ||
463 | |||
464 | if (unlikely(dev_priv->bdev.dev_mapping == NULL)) | ||
465 | dev_priv->bdev.dev_mapping = | ||
466 | file_priv->filp->f_path.dentry->d_inode->i_mapping; | ||
467 | |||
468 | return 0; | ||
469 | |||
470 | out_no_tfile: | ||
471 | kfree(vmw_fp); | ||
472 | return ret; | ||
473 | } | ||
474 | |||
475 | static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, | ||
476 | unsigned long arg) | ||
477 | { | ||
478 | struct drm_file *file_priv = filp->private_data; | ||
479 | struct drm_device *dev = file_priv->minor->dev; | ||
480 | unsigned int nr = DRM_IOCTL_NR(cmd); | ||
481 | |||
482 | /* | ||
483 | * Do extra checking on driver private ioctls. | ||
484 | */ | ||
485 | |||
486 | if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) | ||
487 | && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { | ||
488 | struct drm_ioctl_desc *ioctl = | ||
489 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; | ||
490 | |||
491 | if (unlikely(ioctl->cmd != cmd)) { | ||
492 | DRM_ERROR("Invalid command format, ioctl %d\n", | ||
493 | nr - DRM_COMMAND_BASE); | ||
494 | return -EINVAL; | ||
495 | } | ||
496 | } | ||
497 | |||
498 | return drm_ioctl(filp, cmd, arg); | ||
499 | } | ||
500 | |||
501 | static int vmw_firstopen(struct drm_device *dev) | ||
502 | { | ||
503 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
504 | dev_priv->is_opened = true; | ||
505 | |||
506 | return 0; | ||
507 | } | ||
508 | |||
509 | static void vmw_lastclose(struct drm_device *dev) | ||
510 | { | ||
511 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
512 | struct drm_crtc *crtc; | ||
513 | struct drm_mode_set set; | ||
514 | int ret; | ||
515 | |||
516 | /** | ||
517 | * Do nothing on the lastclose call from drm_unload. | ||
518 | */ | ||
519 | |||
520 | if (!dev_priv->is_opened) | ||
521 | return; | ||
522 | |||
523 | dev_priv->is_opened = false; | ||
524 | set.x = 0; | ||
525 | set.y = 0; | ||
526 | set.fb = NULL; | ||
527 | set.mode = NULL; | ||
528 | set.connectors = NULL; | ||
529 | set.num_connectors = 0; | ||
530 | |||
531 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
532 | set.crtc = crtc; | ||
533 | ret = crtc->funcs->set_config(&set); | ||
534 | WARN_ON(ret != 0); | ||
535 | } | ||
536 | |||
537 | } | ||
538 | |||
539 | static void vmw_master_init(struct vmw_master *vmaster) | ||
540 | { | ||
541 | ttm_lock_init(&vmaster->lock); | ||
542 | } | ||
543 | |||
544 | static int vmw_master_create(struct drm_device *dev, | ||
545 | struct drm_master *master) | ||
546 | { | ||
547 | struct vmw_master *vmaster; | ||
548 | |||
549 | DRM_INFO("Master create.\n"); | ||
550 | vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); | ||
551 | if (unlikely(vmaster == NULL)) | ||
552 | return -ENOMEM; | ||
553 | |||
554 | ttm_lock_init(&vmaster->lock); | ||
555 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | ||
556 | master->driver_priv = vmaster; | ||
557 | |||
558 | return 0; | ||
559 | } | ||
560 | |||
561 | static void vmw_master_destroy(struct drm_device *dev, | ||
562 | struct drm_master *master) | ||
563 | { | ||
564 | struct vmw_master *vmaster = vmw_master(master); | ||
565 | |||
566 | DRM_INFO("Master destroy.\n"); | ||
567 | master->driver_priv = NULL; | ||
568 | kfree(vmaster); | ||
569 | } | ||
570 | |||
571 | |||
572 | static int vmw_master_set(struct drm_device *dev, | ||
573 | struct drm_file *file_priv, | ||
574 | bool from_open) | ||
575 | { | ||
576 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
577 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | ||
578 | struct vmw_master *active = dev_priv->active_master; | ||
579 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
580 | int ret = 0; | ||
581 | |||
582 | DRM_INFO("Master set.\n"); | ||
583 | |||
584 | if (active) { | ||
585 | BUG_ON(active != &dev_priv->fbdev_master); | ||
586 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); | ||
587 | if (unlikely(ret != 0)) | ||
588 | goto out_no_active_lock; | ||
589 | |||
590 | ttm_lock_set_kill(&active->lock, true, SIGTERM); | ||
591 | ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
592 | if (unlikely(ret != 0)) { | ||
593 | DRM_ERROR("Unable to clean VRAM on " | ||
594 | "master drop.\n"); | ||
595 | } | ||
596 | |||
597 | dev_priv->active_master = NULL; | ||
598 | } | ||
599 | |||
600 | ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); | ||
601 | if (!from_open) { | ||
602 | ttm_vt_unlock(&vmaster->lock); | ||
603 | BUG_ON(vmw_fp->locked_master != file_priv->master); | ||
604 | drm_master_put(&vmw_fp->locked_master); | ||
605 | } | ||
606 | |||
607 | dev_priv->active_master = vmaster; | ||
608 | |||
609 | return 0; | ||
610 | |||
611 | out_no_active_lock: | ||
612 | vmw_release_device(dev_priv); | ||
613 | return ret; | ||
614 | } | ||
615 | |||
616 | static void vmw_master_drop(struct drm_device *dev, | ||
617 | struct drm_file *file_priv, | ||
618 | bool from_release) | ||
619 | { | ||
620 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
621 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | ||
622 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
623 | int ret; | ||
624 | |||
625 | DRM_INFO("Master drop.\n"); | ||
626 | |||
627 | /** | ||
628 | * Make sure the master doesn't disappear while we have | ||
629 | * it locked. | ||
630 | */ | ||
631 | |||
632 | vmw_fp->locked_master = drm_master_get(file_priv->master); | ||
633 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); | ||
634 | |||
635 | if (unlikely((ret != 0))) { | ||
636 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); | ||
637 | drm_master_put(&vmw_fp->locked_master); | ||
638 | } | ||
639 | |||
640 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | ||
641 | |||
642 | dev_priv->active_master = &dev_priv->fbdev_master; | ||
643 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | ||
644 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); | ||
645 | |||
646 | vmw_fb_on(dev_priv); | ||
647 | } | ||
648 | |||
649 | |||
650 | static void vmw_remove(struct pci_dev *pdev) | ||
651 | { | ||
652 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
653 | |||
654 | drm_put_dev(dev); | ||
655 | } | ||
656 | |||
657 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | ||
658 | void *ptr) | ||
659 | { | ||
660 | struct vmw_private *dev_priv = | ||
661 | container_of(nb, struct vmw_private, pm_nb); | ||
662 | struct vmw_master *vmaster = dev_priv->active_master; | ||
663 | |||
664 | switch (val) { | ||
665 | case PM_HIBERNATION_PREPARE: | ||
666 | case PM_SUSPEND_PREPARE: | ||
667 | ttm_suspend_lock(&vmaster->lock); | ||
668 | |||
669 | /** | ||
670 | * This empties VRAM and unbinds all GMR bindings. | ||
671 | * Buffer contents is moved to swappable memory. | ||
672 | */ | ||
673 | ttm_bo_swapout_all(&dev_priv->bdev); | ||
674 | break; | ||
675 | case PM_POST_HIBERNATION: | ||
676 | case PM_POST_SUSPEND: | ||
677 | ttm_suspend_unlock(&vmaster->lock); | ||
678 | break; | ||
679 | case PM_RESTORE_PREPARE: | ||
680 | break; | ||
681 | case PM_POST_RESTORE: | ||
682 | break; | ||
683 | default: | ||
684 | break; | ||
685 | } | ||
686 | return 0; | ||
687 | } | ||
688 | |||
689 | /** | ||
690 | * These might not be needed with the virtual SVGA device. | ||
691 | */ | ||
692 | |||
693 | int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) | ||
694 | { | ||
695 | pci_save_state(pdev); | ||
696 | pci_disable_device(pdev); | ||
697 | pci_set_power_state(pdev, PCI_D3hot); | ||
698 | return 0; | ||
699 | } | ||
700 | |||
701 | int vmw_pci_resume(struct pci_dev *pdev) | ||
702 | { | ||
703 | pci_set_power_state(pdev, PCI_D0); | ||
704 | pci_restore_state(pdev); | ||
705 | return pci_enable_device(pdev); | ||
706 | } | ||
707 | |||
708 | static struct drm_driver driver = { | ||
709 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | | ||
710 | DRIVER_MODESET, | ||
711 | .load = vmw_driver_load, | ||
712 | .unload = vmw_driver_unload, | ||
713 | .firstopen = vmw_firstopen, | ||
714 | .lastclose = vmw_lastclose, | ||
715 | .irq_preinstall = vmw_irq_preinstall, | ||
716 | .irq_postinstall = vmw_irq_postinstall, | ||
717 | .irq_uninstall = vmw_irq_uninstall, | ||
718 | .irq_handler = vmw_irq_handler, | ||
719 | .reclaim_buffers_locked = NULL, | ||
720 | .get_map_ofs = drm_core_get_map_ofs, | ||
721 | .get_reg_ofs = drm_core_get_reg_ofs, | ||
722 | .ioctls = vmw_ioctls, | ||
723 | .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), | ||
724 | .dma_quiescent = NULL, /*vmw_dma_quiescent, */ | ||
725 | .master_create = vmw_master_create, | ||
726 | .master_destroy = vmw_master_destroy, | ||
727 | .master_set = vmw_master_set, | ||
728 | .master_drop = vmw_master_drop, | ||
729 | .open = vmw_driver_open, | ||
730 | .postclose = vmw_postclose, | ||
731 | .fops = { | ||
732 | .owner = THIS_MODULE, | ||
733 | .open = drm_open, | ||
734 | .release = drm_release, | ||
735 | .unlocked_ioctl = vmw_unlocked_ioctl, | ||
736 | .mmap = vmw_mmap, | ||
737 | .poll = drm_poll, | ||
738 | .fasync = drm_fasync, | ||
739 | #if defined(CONFIG_COMPAT) | ||
740 | .compat_ioctl = drm_compat_ioctl, | ||
741 | #endif | ||
742 | }, | ||
743 | .pci_driver = { | ||
744 | .name = VMWGFX_DRIVER_NAME, | ||
745 | .id_table = vmw_pci_id_list, | ||
746 | .probe = vmw_probe, | ||
747 | .remove = vmw_remove, | ||
748 | .suspend = vmw_pci_suspend, | ||
749 | .resume = vmw_pci_resume | ||
750 | }, | ||
751 | .name = VMWGFX_DRIVER_NAME, | ||
752 | .desc = VMWGFX_DRIVER_DESC, | ||
753 | .date = VMWGFX_DRIVER_DATE, | ||
754 | .major = VMWGFX_DRIVER_MAJOR, | ||
755 | .minor = VMWGFX_DRIVER_MINOR, | ||
756 | .patchlevel = VMWGFX_DRIVER_PATCHLEVEL | ||
757 | }; | ||
758 | |||
759 | static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
760 | { | ||
761 | return drm_get_dev(pdev, ent, &driver); | ||
762 | } | ||
763 | |||
764 | static int __init vmwgfx_init(void) | ||
765 | { | ||
766 | int ret; | ||
767 | ret = drm_init(&driver); | ||
768 | if (ret) | ||
769 | DRM_ERROR("Failed initializing DRM.\n"); | ||
770 | return ret; | ||
771 | } | ||
772 | |||
773 | static void __exit vmwgfx_exit(void) | ||
774 | { | ||
775 | drm_exit(&driver); | ||
776 | } | ||
777 | |||
778 | module_init(vmwgfx_init); | ||
779 | module_exit(vmwgfx_exit); | ||
780 | |||
781 | MODULE_AUTHOR("VMware Inc. and others"); | ||
782 | MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); | ||
783 | MODULE_LICENSE("GPL and additional rights"); | ||