diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2011-10-10 06:23:26 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2011-10-10 10:46:55 -0400 |
commit | 57c5ee79acba9582762c09c269e0e2ae1adf1b31 (patch) | |
tree | 26b2148b81e7a7c1785d9d07d45c68bb1117bfb5 | |
parent | 8bf445cee3127de3779a395d08d1ada2ad70161e (diff) |
vmwgfx: Add fence events
Add a way to send DRM events down the gpu fifo by attaching them to
fence objects. This may be useful for Xserver swapbuffer throttling and
page-flip done notifications.
Bump version to 2.2 to signal the availability of the FENCE_EVENT ioctl.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Jakob Bornecrantz <jakob@vmware.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 533 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fence.h | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 67 | ||||
-rw-r--r-- | include/drm/vmwgfx_drm.h | 47 |
8 files changed, 644 insertions, 49 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index ddb5abd6ac56..5e1994a8ffca 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -94,6 +94,9 @@ | |||
94 | #define DRM_IOCTL_VMW_FENCE_UNREF \ | 94 | #define DRM_IOCTL_VMW_FENCE_UNREF \ |
95 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ | 95 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ |
96 | struct drm_vmw_fence_arg) | 96 | struct drm_vmw_fence_arg) |
97 | #define DRM_IOCTL_VMW_FENCE_EVENT \ | ||
98 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \ | ||
99 | struct drm_vmw_fence_event_arg) | ||
97 | #define DRM_IOCTL_VMW_PRESENT \ | 100 | #define DRM_IOCTL_VMW_PRESENT \ |
98 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ | 101 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ |
99 | struct drm_vmw_present_arg) | 102 | struct drm_vmw_present_arg) |
@@ -150,6 +153,9 @@ static struct drm_ioctl_desc vmw_ioctls[] = { | |||
150 | DRM_AUTH | DRM_UNLOCKED), | 153 | DRM_AUTH | DRM_UNLOCKED), |
151 | VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, | 154 | VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, |
152 | DRM_AUTH | DRM_UNLOCKED), | 155 | DRM_AUTH | DRM_UNLOCKED), |
156 | VMW_IOCTL_DEF(VMW_FENCE_EVENT, | ||
157 | vmw_fence_event_ioctl, | ||
158 | DRM_AUTH | DRM_UNLOCKED), | ||
153 | VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, | 159 | VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, |
154 | DRM_AUTH | DRM_UNLOCKED), | 160 | DRM_AUTH | DRM_UNLOCKED), |
155 | 161 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 8cce73e7d18c..83b2563e684a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -40,9 +40,9 @@ | |||
40 | #include "ttm/ttm_module.h" | 40 | #include "ttm/ttm_module.h" |
41 | #include "vmwgfx_fence.h" | 41 | #include "vmwgfx_fence.h" |
42 | 42 | ||
43 | #define VMWGFX_DRIVER_DATE "20110927" | 43 | #define VMWGFX_DRIVER_DATE "20111008" |
44 | #define VMWGFX_DRIVER_MAJOR 2 | 44 | #define VMWGFX_DRIVER_MAJOR 2 |
45 | #define VMWGFX_DRIVER_MINOR 1 | 45 | #define VMWGFX_DRIVER_MINOR 2 |
46 | #define VMWGFX_DRIVER_PATCHLEVEL 0 | 46 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
47 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 47 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
48 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 48 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
@@ -264,10 +264,12 @@ struct vmw_private { | |||
264 | wait_queue_head_t fence_queue; | 264 | wait_queue_head_t fence_queue; |
265 | wait_queue_head_t fifo_queue; | 265 | wait_queue_head_t fifo_queue; |
266 | int fence_queue_waiters; /* Protected by hw_mutex */ | 266 | int fence_queue_waiters; /* Protected by hw_mutex */ |
267 | int goal_queue_waiters; /* Protected by hw_mutex */ | ||
267 | atomic_t fifo_queue_waiters; | 268 | atomic_t fifo_queue_waiters; |
268 | uint32_t last_read_seqno; | 269 | uint32_t last_read_seqno; |
269 | spinlock_t irq_lock; | 270 | spinlock_t irq_lock; |
270 | struct vmw_fence_manager *fman; | 271 | struct vmw_fence_manager *fman; |
272 | uint32_t irq_mask; | ||
271 | 273 | ||
272 | /* | 274 | /* |
273 | * Device state | 275 | * Device state |
@@ -532,7 +534,13 @@ extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, | |||
532 | struct vmw_private *dev_priv, | 534 | struct vmw_private *dev_priv, |
533 | struct vmw_fence_obj **p_fence, | 535 | struct vmw_fence_obj **p_fence, |
534 | uint32_t *p_handle); | 536 | uint32_t *p_handle); |
535 | 537 | extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, | |
538 | struct vmw_fpriv *vmw_fp, | ||
539 | int ret, | ||
540 | struct drm_vmw_fence_rep __user | ||
541 | *user_fence_rep, | ||
542 | struct vmw_fence_obj *fence, | ||
543 | uint32_t fence_handle); | ||
536 | 544 | ||
537 | /** | 545 | /** |
538 | * IRQs and wating - vmwgfx_irq.c | 546 | * IRQs and wating - vmwgfx_irq.c |
@@ -557,6 +565,8 @@ extern void vmw_update_seqno(struct vmw_private *dev_priv, | |||
557 | struct vmw_fifo_state *fifo_state); | 565 | struct vmw_fifo_state *fifo_state); |
558 | extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv); | 566 | extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv); |
559 | extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); | 567 | extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); |
568 | extern void vmw_goal_waiter_add(struct vmw_private *dev_priv); | ||
569 | extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv); | ||
560 | 570 | ||
561 | /** | 571 | /** |
562 | * Rudimentary fence-like objects currently used only for throttling - | 572 | * Rudimentary fence-like objects currently used only for throttling - |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index f53b99d3c2e0..d4a1d8b06336 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -1057,7 +1057,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, | |||
1057 | * object so we wait for it immediately, and then unreference the | 1057 | * object so we wait for it immediately, and then unreference the |
1058 | * user-space reference. | 1058 | * user-space reference. |
1059 | */ | 1059 | */ |
1060 | static void | 1060 | void |
1061 | vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, | 1061 | vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, |
1062 | struct vmw_fpriv *vmw_fp, | 1062 | struct vmw_fpriv *vmw_fp, |
1063 | int ret, | 1063 | int ret, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 5f60be76166e..35d5f61fc7e4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | |||
@@ -34,13 +34,18 @@ struct vmw_fence_manager { | |||
34 | int num_fence_objects; | 34 | int num_fence_objects; |
35 | struct vmw_private *dev_priv; | 35 | struct vmw_private *dev_priv; |
36 | spinlock_t lock; | 36 | spinlock_t lock; |
37 | u32 next_seqno; | ||
38 | struct list_head fence_list; | 37 | struct list_head fence_list; |
39 | struct work_struct work; | 38 | struct work_struct work; |
40 | u32 user_fence_size; | 39 | u32 user_fence_size; |
41 | u32 fence_size; | 40 | u32 fence_size; |
41 | u32 event_fence_action_size; | ||
42 | bool fifo_down; | 42 | bool fifo_down; |
43 | struct list_head cleanup_list; | 43 | struct list_head cleanup_list; |
44 | uint32_t pending_actions[VMW_ACTION_MAX]; | ||
45 | struct mutex goal_irq_mutex; | ||
46 | bool goal_irq_on; /* Protected by @goal_irq_mutex */ | ||
47 | bool seqno_valid; /* Protected by @lock, and may not be set to true | ||
48 | without the @goal_irq_mutex held. */ | ||
44 | }; | 49 | }; |
45 | 50 | ||
46 | struct vmw_user_fence { | 51 | struct vmw_user_fence { |
@@ -49,8 +54,51 @@ struct vmw_user_fence { | |||
49 | }; | 54 | }; |
50 | 55 | ||
51 | /** | 56 | /** |
52 | * vmw_fence_destroy_locked | 57 | * struct vmw_event_fence_action - fence action that delivers a drm event. |
53 | * | 58 | * |
59 | * @e: A struct drm_pending_event that controls the event delivery. | ||
60 | * @action: A struct vmw_fence_action to hook up to a fence. | ||
61 | * @fence: A referenced pointer to the fence to keep it alive while @action | ||
62 | * hangs on it. | ||
63 | * @dev: Pointer to a struct drm_device so we can access the event stuff. | ||
64 | * @kref: Both @e and @action has destructors, so we need to refcount. | ||
65 | * @size: Size accounted for this object. | ||
66 | * @tv_sec: If non-null, the variable pointed to will be assigned | ||
67 | * current time tv_sec val when the fence signals. | ||
68 | * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will | ||
69 | * be assigned the current time tv_usec val when the fence signals. | ||
70 | */ | ||
71 | struct vmw_event_fence_action { | ||
72 | struct drm_pending_event e; | ||
73 | struct vmw_fence_action action; | ||
74 | struct vmw_fence_obj *fence; | ||
75 | struct drm_device *dev; | ||
76 | struct kref kref; | ||
77 | uint32_t size; | ||
78 | uint32_t *tv_sec; | ||
79 | uint32_t *tv_usec; | ||
80 | }; | ||
81 | |||
82 | /** | ||
83 | * Note on fencing subsystem usage of irqs: | ||
84 | * Typically the vmw_fences_update function is called | ||
85 | * | ||
86 | * a) When a new fence seqno has been submitted by the fifo code. | ||
87 | * b) On-demand when we have waiters. Sleeping waiters will switch on the | ||
88 | * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE | ||
89 | * irq is received. When the last fence waiter is gone, that IRQ is masked | ||
90 | * away. | ||
91 | * | ||
92 | * In situations where there are no waiters and we don't submit any new fences, | ||
93 | * fence objects may not be signaled. This is perfectly OK, since there are | ||
94 | * no consumers of the signaled data, but that is NOT ok when there are fence | ||
95 | * actions attached to a fence. The fencing subsystem then makes use of the | ||
96 | * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence | ||
97 | * which has an action attached, and each time vmw_fences_update is called, | ||
98 | * the subsystem makes sure the fence goal seqno is updated. | ||
99 | * | ||
100 | * The fence goal seqno irq is on as long as there are unsignaled fence | ||
101 | * objects with actions attached to them. | ||
54 | */ | 102 | */ |
55 | 103 | ||
56 | static void vmw_fence_obj_destroy_locked(struct kref *kref) | 104 | static void vmw_fence_obj_destroy_locked(struct kref *kref) |
@@ -85,24 +133,36 @@ static void vmw_fence_work_func(struct work_struct *work) | |||
85 | container_of(work, struct vmw_fence_manager, work); | 133 | container_of(work, struct vmw_fence_manager, work); |
86 | struct list_head list; | 134 | struct list_head list; |
87 | struct vmw_fence_action *action, *next_action; | 135 | struct vmw_fence_action *action, *next_action; |
136 | bool seqno_valid; | ||
88 | 137 | ||
89 | do { | 138 | do { |
90 | INIT_LIST_HEAD(&list); | 139 | INIT_LIST_HEAD(&list); |
140 | mutex_lock(&fman->goal_irq_mutex); | ||
141 | |||
91 | spin_lock_irq(&fman->lock); | 142 | spin_lock_irq(&fman->lock); |
92 | list_splice_init(&fman->cleanup_list, &list); | 143 | list_splice_init(&fman->cleanup_list, &list); |
144 | seqno_valid = fman->seqno_valid; | ||
93 | spin_unlock_irq(&fman->lock); | 145 | spin_unlock_irq(&fman->lock); |
94 | 146 | ||
147 | if (!seqno_valid && fman->goal_irq_on) { | ||
148 | fman->goal_irq_on = false; | ||
149 | vmw_goal_waiter_remove(fman->dev_priv); | ||
150 | } | ||
151 | mutex_unlock(&fman->goal_irq_mutex); | ||
152 | |||
95 | if (list_empty(&list)) | 153 | if (list_empty(&list)) |
96 | return; | 154 | return; |
97 | 155 | ||
98 | /* | 156 | /* |
99 | * At this point, only we should be able to manipulate the | 157 | * At this point, only we should be able to manipulate the |
100 | * list heads of the actions we have on the private list. | 158 | * list heads of the actions we have on the private list. |
159 | * hence fman::lock not held. | ||
101 | */ | 160 | */ |
102 | 161 | ||
103 | list_for_each_entry_safe(action, next_action, &list, head) { | 162 | list_for_each_entry_safe(action, next_action, &list, head) { |
104 | list_del_init(&action->head); | 163 | list_del_init(&action->head); |
105 | action->cleanup(action); | 164 | if (action->cleanup) |
165 | action->cleanup(action); | ||
106 | } | 166 | } |
107 | } while (1); | 167 | } while (1); |
108 | } | 168 | } |
@@ -122,6 +182,9 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) | |||
122 | fman->fifo_down = true; | 182 | fman->fifo_down = true; |
123 | fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); | 183 | fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); |
124 | fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); | 184 | fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); |
185 | fman->event_fence_action_size = | ||
186 | ttm_round_pot(sizeof(struct vmw_event_fence_action)); | ||
187 | mutex_init(&fman->goal_irq_mutex); | ||
125 | 188 | ||
126 | return fman; | 189 | return fman; |
127 | } | 190 | } |
@@ -214,6 +277,7 @@ void vmw_fences_perform_actions(struct vmw_fence_manager *fman, | |||
214 | 277 | ||
215 | list_for_each_entry_safe(action, next_action, list, head) { | 278 | list_for_each_entry_safe(action, next_action, list, head) { |
216 | list_del_init(&action->head); | 279 | list_del_init(&action->head); |
280 | fman->pending_actions[action->type]--; | ||
217 | if (action->seq_passed != NULL) | 281 | if (action->seq_passed != NULL) |
218 | action->seq_passed(action); | 282 | action->seq_passed(action); |
219 | 283 | ||
@@ -222,17 +286,101 @@ void vmw_fences_perform_actions(struct vmw_fence_manager *fman, | |||
222 | * it will be performed by a worker task. | 286 | * it will be performed by a worker task. |
223 | */ | 287 | */ |
224 | 288 | ||
225 | if (action->cleanup != NULL) | 289 | list_add_tail(&action->head, &fman->cleanup_list); |
226 | list_add_tail(&action->head, &fman->cleanup_list); | 290 | } |
291 | } | ||
292 | |||
293 | /** | ||
294 | * vmw_fence_goal_new_locked - Figure out a new device fence goal | ||
295 | * seqno if needed. | ||
296 | * | ||
297 | * @fman: Pointer to a fence manager. | ||
298 | * @passed_seqno: The seqno the device currently signals as passed. | ||
299 | * | ||
300 | * This function should be called with the fence manager lock held. | ||
301 | * It is typically called when we have a new passed_seqno, and | ||
302 | * we might need to update the fence goal. It checks to see whether | ||
303 | * the current fence goal has already passed, and, in that case, | ||
304 | * scans through all unsignaled fences to get the next fence object with an | ||
305 | * action attached, and sets the seqno of that fence as a new fence goal. | ||
306 | * | ||
307 | * returns true if the device goal seqno was updated. False otherwise. | ||
308 | */ | ||
309 | static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman, | ||
310 | u32 passed_seqno) | ||
311 | { | ||
312 | u32 goal_seqno; | ||
313 | __le32 __iomem *fifo_mem; | ||
314 | struct vmw_fence_obj *fence; | ||
315 | |||
316 | if (likely(!fman->seqno_valid)) | ||
317 | return false; | ||
318 | |||
319 | fifo_mem = fman->dev_priv->mmio_virt; | ||
320 | goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL); | ||
321 | if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP)) | ||
322 | return false; | ||
323 | |||
324 | fman->seqno_valid = false; | ||
325 | list_for_each_entry(fence, &fman->fence_list, head) { | ||
326 | if (!list_empty(&fence->seq_passed_actions)) { | ||
327 | fman->seqno_valid = true; | ||
328 | iowrite32(fence->seqno, | ||
329 | fifo_mem + SVGA_FIFO_FENCE_GOAL); | ||
330 | break; | ||
331 | } | ||
227 | } | 332 | } |
333 | |||
334 | return true; | ||
335 | } | ||
336 | |||
337 | |||
338 | /** | ||
339 | * vmw_fence_goal_check_locked - Replace the device fence goal seqno if | ||
340 | * needed. | ||
341 | * | ||
342 | * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be | ||
343 | * considered as a device fence goal. | ||
344 | * | ||
345 | * This function should be called with the fence manager lock held. | ||
346 | * It is typically called when an action has been attached to a fence to | ||
347 | * check whether the seqno of that fence should be used for a fence | ||
348 | * goal interrupt. This is typically needed if the current fence goal is | ||
349 | * invalid, or has a higher seqno than that of the current fence object. | ||
350 | * | ||
351 | * returns true if the device goal seqno was updated. False otherwise. | ||
352 | */ | ||
353 | static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence) | ||
354 | { | ||
355 | u32 goal_seqno; | ||
356 | __le32 __iomem *fifo_mem; | ||
357 | |||
358 | if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) | ||
359 | return false; | ||
360 | |||
361 | fifo_mem = fence->fman->dev_priv->mmio_virt; | ||
362 | goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL); | ||
363 | if (likely(fence->fman->seqno_valid && | ||
364 | goal_seqno - fence->seqno < VMW_FENCE_WRAP)) | ||
365 | return false; | ||
366 | |||
367 | iowrite32(fence->seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL); | ||
368 | fence->fman->seqno_valid = true; | ||
369 | |||
370 | return true; | ||
228 | } | 371 | } |
229 | 372 | ||
230 | void vmw_fences_update(struct vmw_fence_manager *fman, u32 seqno) | 373 | void vmw_fences_update(struct vmw_fence_manager *fman) |
231 | { | 374 | { |
232 | unsigned long flags; | 375 | unsigned long flags; |
233 | struct vmw_fence_obj *fence, *next_fence; | 376 | struct vmw_fence_obj *fence, *next_fence; |
234 | struct list_head action_list; | 377 | struct list_head action_list; |
378 | bool needs_rerun; | ||
379 | uint32_t seqno, new_seqno; | ||
380 | __le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt; | ||
235 | 381 | ||
382 | seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); | ||
383 | rerun: | ||
236 | spin_lock_irqsave(&fman->lock, flags); | 384 | spin_lock_irqsave(&fman->lock, flags); |
237 | list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { | 385 | list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { |
238 | if (seqno - fence->seqno < VMW_FENCE_WRAP) { | 386 | if (seqno - fence->seqno < VMW_FENCE_WRAP) { |
@@ -243,14 +391,30 @@ void vmw_fences_update(struct vmw_fence_manager *fman, u32 seqno) | |||
243 | &action_list); | 391 | &action_list); |
244 | vmw_fences_perform_actions(fman, &action_list); | 392 | vmw_fences_perform_actions(fman, &action_list); |
245 | wake_up_all(&fence->queue); | 393 | wake_up_all(&fence->queue); |
246 | } | 394 | } else |
247 | 395 | break; | |
248 | } | 396 | } |
397 | |||
398 | needs_rerun = vmw_fence_goal_new_locked(fman, seqno); | ||
399 | |||
249 | if (!list_empty(&fman->cleanup_list)) | 400 | if (!list_empty(&fman->cleanup_list)) |
250 | (void) schedule_work(&fman->work); | 401 | (void) schedule_work(&fman->work); |
251 | spin_unlock_irqrestore(&fman->lock, flags); | 402 | spin_unlock_irqrestore(&fman->lock, flags); |
252 | } | ||
253 | 403 | ||
404 | /* | ||
405 | * Rerun if the fence goal seqno was updated, and the | ||
406 | * hardware might have raced with that update, so that | ||
407 | * we missed a fence_goal irq. | ||
408 | */ | ||
409 | |||
410 | if (unlikely(needs_rerun)) { | ||
411 | new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); | ||
412 | if (new_seqno != seqno) { | ||
413 | seqno = new_seqno; | ||
414 | goto rerun; | ||
415 | } | ||
416 | } | ||
417 | } | ||
254 | 418 | ||
255 | bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence, | 419 | bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence, |
256 | uint32_t flags) | 420 | uint32_t flags) |
@@ -267,14 +431,8 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence, | |||
267 | if ((signaled & flags) == flags) | 431 | if ((signaled & flags) == flags) |
268 | return 1; | 432 | return 1; |
269 | 433 | ||
270 | if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0) { | 434 | if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0) |
271 | struct vmw_private *dev_priv = fman->dev_priv; | 435 | vmw_fences_update(fman); |
272 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
273 | u32 seqno; | ||
274 | |||
275 | seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); | ||
276 | vmw_fences_update(fman, seqno); | ||
277 | } | ||
278 | 436 | ||
279 | spin_lock_irqsave(&fman->lock, irq_flags); | 437 | spin_lock_irqsave(&fman->lock, irq_flags); |
280 | signaled = fence->signaled; | 438 | signaled = fence->signaled; |
@@ -624,3 +782,344 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, | |||
624 | arg->handle, | 782 | arg->handle, |
625 | TTM_REF_USAGE); | 783 | TTM_REF_USAGE); |
626 | } | 784 | } |
785 | |||
786 | /** | ||
787 | * vmw_event_fence_action_destroy | ||
788 | * | ||
789 | * @kref: The struct kref embedded in a struct vmw_event_fence_action. | ||
790 | * | ||
791 | * The vmw_event_fence_action destructor that may be called either after | ||
792 | * the fence action cleanup, or when the event is delivered. | ||
793 | * It frees both the vmw_event_fence_action struct and the actual | ||
794 | * event structure copied to user-space. | ||
795 | */ | ||
796 | static void vmw_event_fence_action_destroy(struct kref *kref) | ||
797 | { | ||
798 | struct vmw_event_fence_action *eaction = | ||
799 | container_of(kref, struct vmw_event_fence_action, kref); | ||
800 | struct ttm_mem_global *mem_glob = | ||
801 | vmw_mem_glob(vmw_priv(eaction->dev)); | ||
802 | uint32_t size = eaction->size; | ||
803 | |||
804 | kfree(eaction->e.event); | ||
805 | kfree(eaction); | ||
806 | ttm_mem_global_free(mem_glob, size); | ||
807 | } | ||
808 | |||
809 | |||
810 | /** | ||
811 | * vmw_event_fence_action_delivered | ||
812 | * | ||
813 | * @e: The struct drm_pending_event embedded in a struct | ||
814 | * vmw_event_fence_action. | ||
815 | * | ||
816 | * The struct drm_pending_event destructor that is called by drm | ||
817 | * once the event is delivered. Since we don't know whether this function | ||
818 | * will be called before or after the fence action destructor, we | ||
819 | * free a refcount and destroy if it becomes zero. | ||
820 | */ | ||
821 | static void vmw_event_fence_action_delivered(struct drm_pending_event *e) | ||
822 | { | ||
823 | struct vmw_event_fence_action *eaction = | ||
824 | container_of(e, struct vmw_event_fence_action, e); | ||
825 | |||
826 | kref_put(&eaction->kref, vmw_event_fence_action_destroy); | ||
827 | } | ||
828 | |||
829 | |||
830 | /** | ||
831 | * vmw_event_fence_action_seq_passed | ||
832 | * | ||
833 | * @action: The struct vmw_fence_action embedded in a struct | ||
834 | * vmw_event_fence_action. | ||
835 | * | ||
836 | * This function is called when the seqno of the fence where @action is | ||
837 | * attached has passed. It queues the event on the submitter's event list. | ||
838 | * This function is always called from atomic context, and may be called | ||
839 | * from irq context. It ups a refcount reflecting that we now have two | ||
840 | * destructors. | ||
841 | */ | ||
842 | static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) | ||
843 | { | ||
844 | struct vmw_event_fence_action *eaction = | ||
845 | container_of(action, struct vmw_event_fence_action, action); | ||
846 | struct drm_device *dev = eaction->dev; | ||
847 | struct drm_file *file_priv = eaction->e.file_priv; | ||
848 | unsigned long irq_flags; | ||
849 | |||
850 | kref_get(&eaction->kref); | ||
851 | spin_lock_irqsave(&dev->event_lock, irq_flags); | ||
852 | |||
853 | if (likely(eaction->tv_sec != NULL)) { | ||
854 | struct timeval tv; | ||
855 | |||
856 | do_gettimeofday(&tv); | ||
857 | *eaction->tv_sec = tv.tv_sec; | ||
858 | *eaction->tv_usec = tv.tv_usec; | ||
859 | } | ||
860 | |||
861 | list_add_tail(&eaction->e.link, &file_priv->event_list); | ||
862 | wake_up_all(&file_priv->event_wait); | ||
863 | spin_unlock_irqrestore(&dev->event_lock, irq_flags); | ||
864 | } | ||
865 | |||
866 | /** | ||
867 | * vmw_event_fence_action_cleanup | ||
868 | * | ||
869 | * @action: The struct vmw_fence_action embedded in a struct | ||
870 | * vmw_event_fence_action. | ||
871 | * | ||
872 | * This function is the struct vmw_fence_action destructor. It's typically | ||
873 | * called from a workqueue. | ||
874 | */ | ||
875 | static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action) | ||
876 | { | ||
877 | struct vmw_event_fence_action *eaction = | ||
878 | container_of(action, struct vmw_event_fence_action, action); | ||
879 | |||
880 | vmw_fence_obj_unreference(&eaction->fence); | ||
881 | kref_put(&eaction->kref, vmw_event_fence_action_destroy); | ||
882 | } | ||
883 | |||
884 | |||
885 | /** | ||
886 | * vmw_fence_obj_add_action - Add an action to a fence object. | ||
887 | * | ||
888 | * @fence - The fence object. | ||
889 | * @action - The action to add. | ||
890 | * | ||
891 | * Note that the action callbacks may be executed before this function | ||
892 | * returns. | ||
893 | */ | ||
894 | void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, | ||
895 | struct vmw_fence_action *action) | ||
896 | { | ||
897 | struct vmw_fence_manager *fman = fence->fman; | ||
898 | unsigned long irq_flags; | ||
899 | bool run_update = false; | ||
900 | |||
901 | mutex_lock(&fman->goal_irq_mutex); | ||
902 | spin_lock_irqsave(&fman->lock, irq_flags); | ||
903 | |||
904 | fman->pending_actions[action->type]++; | ||
905 | if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) { | ||
906 | struct list_head action_list; | ||
907 | |||
908 | INIT_LIST_HEAD(&action_list); | ||
909 | list_add_tail(&action->head, &action_list); | ||
910 | vmw_fences_perform_actions(fman, &action_list); | ||
911 | } else { | ||
912 | list_add_tail(&action->head, &fence->seq_passed_actions); | ||
913 | |||
914 | /* | ||
915 | * This function may set fman::seqno_valid, so it must | ||
916 | * be run with the goal_irq_mutex held. | ||
917 | */ | ||
918 | run_update = vmw_fence_goal_check_locked(fence); | ||
919 | } | ||
920 | |||
921 | spin_unlock_irqrestore(&fman->lock, irq_flags); | ||
922 | |||
923 | if (run_update) { | ||
924 | if (!fman->goal_irq_on) { | ||
925 | fman->goal_irq_on = true; | ||
926 | vmw_goal_waiter_add(fman->dev_priv); | ||
927 | } | ||
928 | vmw_fences_update(fman); | ||
929 | } | ||
930 | mutex_unlock(&fman->goal_irq_mutex); | ||
931 | |||
932 | } | ||
933 | |||
934 | /** | ||
935 | * vmw_event_fence_action_create - Post an event for sending when a fence | ||
936 | * object seqno has passed. | ||
937 | * | ||
938 | * @file_priv: The file connection on which the event should be posted. | ||
939 | * @fence: The fence object on which to post the event. | ||
940 | * @event: Event to be posted. This event should've been alloced | ||
941 | * using k[mz]alloc, and should've been completely initialized. | ||
942 | * @interruptible: Interruptible waits if possible. | ||
943 | * | ||
944 | * As a side effect, the object pointed to by @event may have been | ||
945 | * freed when this function returns. If this function returns with | ||
946 | * an error code, the caller needs to free that object. | ||
947 | */ | ||
948 | |||
949 | int vmw_event_fence_action_create(struct drm_file *file_priv, | ||
950 | struct vmw_fence_obj *fence, | ||
951 | struct drm_event *event, | ||
952 | uint32_t *tv_sec, | ||
953 | uint32_t *tv_usec, | ||
954 | bool interruptible) | ||
955 | { | ||
956 | struct vmw_event_fence_action *eaction = | ||
957 | kzalloc(sizeof(*eaction), GFP_KERNEL); | ||
958 | struct ttm_mem_global *mem_glob = | ||
959 | vmw_mem_glob(fence->fman->dev_priv); | ||
960 | struct vmw_fence_manager *fman = fence->fman; | ||
961 | uint32_t size = fman->event_fence_action_size + | ||
962 | ttm_round_pot(event->length); | ||
963 | int ret; | ||
964 | |||
965 | /* | ||
966 | * Account for internal structure size as well as the | ||
967 | * event size itself. | ||
968 | */ | ||
969 | |||
970 | ret = ttm_mem_global_alloc(mem_glob, size, false, interruptible); | ||
971 | if (unlikely(ret != 0)) | ||
972 | return ret; | ||
973 | |||
974 | eaction = kzalloc(sizeof(*eaction), GFP_KERNEL); | ||
975 | if (unlikely(eaction == NULL)) { | ||
976 | ttm_mem_global_free(mem_glob, size); | ||
977 | return -ENOMEM; | ||
978 | } | ||
979 | |||
980 | eaction->e.event = event; | ||
981 | eaction->e.file_priv = file_priv; | ||
982 | eaction->e.destroy = vmw_event_fence_action_delivered; | ||
983 | |||
984 | eaction->action.seq_passed = vmw_event_fence_action_seq_passed; | ||
985 | eaction->action.cleanup = vmw_event_fence_action_cleanup; | ||
986 | eaction->action.type = VMW_ACTION_EVENT; | ||
987 | |||
988 | eaction->fence = vmw_fence_obj_reference(fence); | ||
989 | eaction->dev = fman->dev_priv->dev; | ||
990 | eaction->size = size; | ||
991 | eaction->tv_sec = tv_sec; | ||
992 | eaction->tv_usec = tv_usec; | ||
993 | |||
994 | kref_init(&eaction->kref); | ||
995 | vmw_fence_obj_add_action(fence, &eaction->action); | ||
996 | |||
997 | return 0; | ||
998 | } | ||
999 | |||
1000 | int vmw_fence_event_ioctl(struct drm_device *dev, void *data, | ||
1001 | struct drm_file *file_priv) | ||
1002 | { | ||
1003 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
1004 | struct drm_vmw_fence_event_arg *arg = | ||
1005 | (struct drm_vmw_fence_event_arg *) data; | ||
1006 | struct vmw_fence_obj *fence = NULL; | ||
1007 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | ||
1008 | struct drm_vmw_fence_rep __user *user_fence_rep = | ||
1009 | (struct drm_vmw_fence_rep __user *)(unsigned long) | ||
1010 | arg->fence_rep; | ||
1011 | uint32_t handle; | ||
1012 | unsigned long irq_flags; | ||
1013 | struct drm_vmw_event_fence *event; | ||
1014 | int ret; | ||
1015 | |||
1016 | /* | ||
1017 | * Look up an existing fence object, | ||
1018 | * and if user-space wants a new reference, | ||
1019 | * add one. | ||
1020 | */ | ||
1021 | if (arg->handle) { | ||
1022 | struct ttm_base_object *base = | ||
1023 | ttm_base_object_lookup(vmw_fp->tfile, arg->handle); | ||
1024 | |||
1025 | if (unlikely(base == NULL)) { | ||
1026 | DRM_ERROR("Fence event invalid fence object handle " | ||
1027 | "0x%08lx.\n", | ||
1028 | (unsigned long)arg->handle); | ||
1029 | return -EINVAL; | ||
1030 | } | ||
1031 | fence = &(container_of(base, struct vmw_user_fence, | ||
1032 | base)->fence); | ||
1033 | (void) vmw_fence_obj_reference(fence); | ||
1034 | |||
1035 | if (user_fence_rep != NULL) { | ||
1036 | bool existed; | ||
1037 | |||
1038 | ret = ttm_ref_object_add(vmw_fp->tfile, base, | ||
1039 | TTM_REF_USAGE, &existed); | ||
1040 | if (unlikely(ret != 0)) { | ||
1041 | DRM_ERROR("Failed to reference a fence " | ||
1042 | "object.\n"); | ||
1043 | goto out_no_ref_obj; | ||
1044 | } | ||
1045 | handle = base->hash.key; | ||
1046 | } | ||
1047 | ttm_base_object_unref(&base); | ||
1048 | } | ||
1049 | |||
1050 | /* | ||
1051 | * Create a new fence object. | ||
1052 | */ | ||
1053 | if (!fence) { | ||
1054 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, | ||
1055 | &fence, | ||
1056 | (user_fence_rep) ? | ||
1057 | &handle : NULL); | ||
1058 | if (unlikely(ret != 0)) { | ||
1059 | DRM_ERROR("Fence event failed to create fence.\n"); | ||
1060 | return ret; | ||
1061 | } | ||
1062 | } | ||
1063 | |||
1064 | BUG_ON(fence == NULL); | ||
1065 | |||
1066 | spin_lock_irqsave(&dev->event_lock, irq_flags); | ||
1067 | |||
1068 | ret = (file_priv->event_space < sizeof(*event)) ? -EBUSY : 0; | ||
1069 | if (likely(ret == 0)) | ||
1070 | file_priv->event_space -= sizeof(*event); | ||
1071 | |||
1072 | spin_unlock_irqrestore(&dev->event_lock, irq_flags); | ||
1073 | |||
1074 | if (unlikely(ret != 0)) { | ||
1075 | DRM_ERROR("Failed to allocate event space for this file.\n"); | ||
1076 | goto out_no_event_space; | ||
1077 | } | ||
1078 | |||
1079 | event = kzalloc(sizeof(*event), GFP_KERNEL); | ||
1080 | if (unlikely(event == NULL)) { | ||
1081 | DRM_ERROR("Failed to allocate an event.\n"); | ||
1082 | goto out_no_event; | ||
1083 | } | ||
1084 | |||
1085 | event->base.type = DRM_VMW_EVENT_FENCE_SIGNALED; | ||
1086 | event->base.length = sizeof(*event); | ||
1087 | event->user_data = arg->user_data; | ||
1088 | |||
1089 | if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME) | ||
1090 | ret = vmw_event_fence_action_create(file_priv, fence, | ||
1091 | &event->base, | ||
1092 | &event->tv_sec, | ||
1093 | &event->tv_usec, | ||
1094 | true); | ||
1095 | else | ||
1096 | ret = vmw_event_fence_action_create(file_priv, fence, | ||
1097 | &event->base, | ||
1098 | NULL, | ||
1099 | NULL, | ||
1100 | true); | ||
1101 | |||
1102 | if (unlikely(ret != 0)) { | ||
1103 | if (ret != -ERESTARTSYS) | ||
1104 | DRM_ERROR("Failed to attach event to fence.\n"); | ||
1105 | goto out_no_attach; | ||
1106 | } | ||
1107 | |||
1108 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, | ||
1109 | handle); | ||
1110 | vmw_fence_obj_unreference(&fence); | ||
1111 | return 0; | ||
1112 | out_no_attach: | ||
1113 | kfree(event); | ||
1114 | out_no_event: | ||
1115 | spin_lock_irqsave(&dev->event_lock, irq_flags); | ||
1116 | file_priv->event_space += sizeof(*event); | ||
1117 | spin_unlock_irqrestore(&dev->event_lock, irq_flags); | ||
1118 | out_no_event_space: | ||
1119 | if (user_fence_rep != NULL) | ||
1120 | ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, | ||
1121 | handle, TTM_REF_USAGE); | ||
1122 | out_no_ref_obj: | ||
1123 | vmw_fence_obj_unreference(&fence); | ||
1124 | return ret; | ||
1125 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h index 93074064aaf3..0854a2096b55 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h | |||
@@ -37,8 +37,14 @@ struct vmw_fence_manager; | |||
37 | * | 37 | * |
38 | * | 38 | * |
39 | */ | 39 | */ |
40 | enum vmw_action_type { | ||
41 | VMW_ACTION_EVENT = 0, | ||
42 | VMW_ACTION_MAX | ||
43 | }; | ||
44 | |||
40 | struct vmw_fence_action { | 45 | struct vmw_fence_action { |
41 | struct list_head head; | 46 | struct list_head head; |
47 | enum vmw_action_type type; | ||
42 | void (*seq_passed) (struct vmw_fence_action *action); | 48 | void (*seq_passed) (struct vmw_fence_action *action); |
43 | void (*cleanup) (struct vmw_fence_action *action); | 49 | void (*cleanup) (struct vmw_fence_action *action); |
44 | }; | 50 | }; |
@@ -66,8 +72,7 @@ extern void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p); | |||
66 | extern struct vmw_fence_obj * | 72 | extern struct vmw_fence_obj * |
67 | vmw_fence_obj_reference(struct vmw_fence_obj *fence); | 73 | vmw_fence_obj_reference(struct vmw_fence_obj *fence); |
68 | 74 | ||
69 | extern void vmw_fences_update(struct vmw_fence_manager *fman, | 75 | extern void vmw_fences_update(struct vmw_fence_manager *fman); |
70 | u32 sequence); | ||
71 | 76 | ||
72 | extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence, | 77 | extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence, |
73 | uint32_t flags); | 78 | uint32_t flags); |
@@ -102,4 +107,7 @@ extern int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, | |||
102 | 107 | ||
103 | extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, | 108 | extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, |
104 | struct drm_file *file_priv); | 109 | struct drm_file *file_priv); |
110 | extern int vmw_fence_event_ioctl(struct drm_device *dev, void *data, | ||
111 | struct drm_file *file_priv); | ||
112 | |||
105 | #endif /* _VMWGFX_FENCE_H_ */ | 113 | #endif /* _VMWGFX_FENCE_H_ */ |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 62d6377b8ee8..03bbc2a6f9a7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
@@ -247,9 +247,8 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, | |||
247 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | 247 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); |
248 | outl(SVGA_IRQFLAG_FIFO_PROGRESS, | 248 | outl(SVGA_IRQFLAG_FIFO_PROGRESS, |
249 | dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | 249 | dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
250 | vmw_write(dev_priv, SVGA_REG_IRQMASK, | 250 | dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS; |
251 | vmw_read(dev_priv, SVGA_REG_IRQMASK) | | 251 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
252 | SVGA_IRQFLAG_FIFO_PROGRESS); | ||
253 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 252 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
254 | } | 253 | } |
255 | mutex_unlock(&dev_priv->hw_mutex); | 254 | mutex_unlock(&dev_priv->hw_mutex); |
@@ -271,9 +270,8 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, | |||
271 | mutex_lock(&dev_priv->hw_mutex); | 270 | mutex_lock(&dev_priv->hw_mutex); |
272 | if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { | 271 | if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { |
273 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | 272 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); |
274 | vmw_write(dev_priv, SVGA_REG_IRQMASK, | 273 | dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS; |
275 | vmw_read(dev_priv, SVGA_REG_IRQMASK) & | 274 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
276 | ~SVGA_IRQFLAG_FIFO_PROGRESS); | ||
277 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 275 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
278 | } | 276 | } |
279 | mutex_unlock(&dev_priv->hw_mutex); | 277 | mutex_unlock(&dev_priv->hw_mutex); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index a005292a8908..cabc95f7517e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | |||
@@ -34,28 +34,30 @@ irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS) | |||
34 | { | 34 | { |
35 | struct drm_device *dev = (struct drm_device *)arg; | 35 | struct drm_device *dev = (struct drm_device *)arg; |
36 | struct vmw_private *dev_priv = vmw_priv(dev); | 36 | struct vmw_private *dev_priv = vmw_priv(dev); |
37 | uint32_t status; | 37 | uint32_t status, masked_status; |
38 | 38 | ||
39 | spin_lock(&dev_priv->irq_lock); | 39 | spin_lock(&dev_priv->irq_lock); |
40 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | 40 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
41 | masked_status = status & dev_priv->irq_mask; | ||
41 | spin_unlock(&dev_priv->irq_lock); | 42 | spin_unlock(&dev_priv->irq_lock); |
42 | 43 | ||
43 | if (status & SVGA_IRQFLAG_ANY_FENCE) { | 44 | if (likely(status)) |
44 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 45 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
45 | uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); | 46 | |
47 | if (!masked_status) | ||
48 | return IRQ_NONE; | ||
46 | 49 | ||
47 | vmw_fences_update(dev_priv->fman, seqno); | 50 | if (masked_status & (SVGA_IRQFLAG_ANY_FENCE | |
51 | SVGA_IRQFLAG_FENCE_GOAL)) { | ||
52 | vmw_fences_update(dev_priv->fman); | ||
48 | wake_up_all(&dev_priv->fence_queue); | 53 | wake_up_all(&dev_priv->fence_queue); |
49 | } | 54 | } |
50 | if (status & SVGA_IRQFLAG_FIFO_PROGRESS) | 55 | |
56 | if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS) | ||
51 | wake_up_all(&dev_priv->fifo_queue); | 57 | wake_up_all(&dev_priv->fifo_queue); |
52 | 58 | ||
53 | if (likely(status)) { | ||
54 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
55 | return IRQ_HANDLED; | ||
56 | } | ||
57 | 59 | ||
58 | return IRQ_NONE; | 60 | return IRQ_HANDLED; |
59 | } | 61 | } |
60 | 62 | ||
61 | static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) | 63 | static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) |
@@ -78,7 +80,7 @@ void vmw_update_seqno(struct vmw_private *dev_priv, | |||
78 | if (dev_priv->last_read_seqno != seqno) { | 80 | if (dev_priv->last_read_seqno != seqno) { |
79 | dev_priv->last_read_seqno = seqno; | 81 | dev_priv->last_read_seqno = seqno; |
80 | vmw_marker_pull(&fifo_state->marker_queue, seqno); | 82 | vmw_marker_pull(&fifo_state->marker_queue, seqno); |
81 | vmw_fences_update(dev_priv->fman, seqno); | 83 | vmw_fences_update(dev_priv->fman); |
82 | } | 84 | } |
83 | } | 85 | } |
84 | 86 | ||
@@ -189,9 +191,8 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv) | |||
189 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | 191 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); |
190 | outl(SVGA_IRQFLAG_ANY_FENCE, | 192 | outl(SVGA_IRQFLAG_ANY_FENCE, |
191 | dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | 193 | dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
192 | vmw_write(dev_priv, SVGA_REG_IRQMASK, | 194 | dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE; |
193 | vmw_read(dev_priv, SVGA_REG_IRQMASK) | | 195 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
194 | SVGA_IRQFLAG_ANY_FENCE); | ||
195 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 196 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
196 | } | 197 | } |
197 | mutex_unlock(&dev_priv->hw_mutex); | 198 | mutex_unlock(&dev_priv->hw_mutex); |
@@ -204,9 +205,39 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) | |||
204 | unsigned long irq_flags; | 205 | unsigned long irq_flags; |
205 | 206 | ||
206 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | 207 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); |
207 | vmw_write(dev_priv, SVGA_REG_IRQMASK, | 208 | dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE; |
208 | vmw_read(dev_priv, SVGA_REG_IRQMASK) & | 209 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
209 | ~SVGA_IRQFLAG_ANY_FENCE); | 210 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
211 | } | ||
212 | mutex_unlock(&dev_priv->hw_mutex); | ||
213 | } | ||
214 | |||
215 | |||
216 | void vmw_goal_waiter_add(struct vmw_private *dev_priv) | ||
217 | { | ||
218 | mutex_lock(&dev_priv->hw_mutex); | ||
219 | if (dev_priv->goal_queue_waiters++ == 0) { | ||
220 | unsigned long irq_flags; | ||
221 | |||
222 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | ||
223 | outl(SVGA_IRQFLAG_FENCE_GOAL, | ||
224 | dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
225 | dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL; | ||
226 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | ||
227 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | ||
228 | } | ||
229 | mutex_unlock(&dev_priv->hw_mutex); | ||
230 | } | ||
231 | |||
232 | void vmw_goal_waiter_remove(struct vmw_private *dev_priv) | ||
233 | { | ||
234 | mutex_lock(&dev_priv->hw_mutex); | ||
235 | if (--dev_priv->goal_queue_waiters == 0) { | ||
236 | unsigned long irq_flags; | ||
237 | |||
238 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | ||
239 | dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL; | ||
240 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | ||
210 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 241 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
211 | } | 242 | } |
212 | mutex_unlock(&dev_priv->hw_mutex); | 243 | mutex_unlock(&dev_priv->hw_mutex); |
diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h index 5b5b0a891f36..cd7cd8162ed6 100644 --- a/include/drm/vmwgfx_drm.h +++ b/include/drm/vmwgfx_drm.h | |||
@@ -685,6 +685,51 @@ struct drm_vmw_fence_arg { | |||
685 | 685 | ||
686 | /*************************************************************************/ | 686 | /*************************************************************************/ |
687 | /** | 687 | /** |
688 | * DRM_VMW_FENCE_EVENT | ||
689 | * | ||
690 | * Queues an event on a fence to be delivered on the drm character device | ||
691 | * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag. | ||
692 | * Optionally the approximate time when the fence signaled is | ||
693 | * given by the event. | ||
694 | */ | ||
695 | |||
696 | /* | ||
697 | * The event type | ||
698 | */ | ||
699 | #define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000 | ||
700 | |||
701 | struct drm_vmw_event_fence { | ||
702 | struct drm_event base; | ||
703 | uint64_t user_data; | ||
704 | uint32_t tv_sec; | ||
705 | uint32_t tv_usec; | ||
706 | }; | ||
707 | |||
708 | /* | ||
709 | * Flags that may be given to the command. | ||
710 | */ | ||
711 | /* Request fence signaled time on the event. */ | ||
712 | #define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0) | ||
713 | |||
714 | /** | ||
715 | * struct drm_vmw_fence_event_arg | ||
716 | * | ||
717 | * @fence_rep: Pointer to fence_rep structure cast to uint64_t or 0 if | ||
718 | * the fence is not supposed to be referenced by user-space. | ||
719 | * @user_info: Info to be delivered with the event. | ||
720 | * @handle: Attach the event to this fence only. | ||
721 | * @flags: A set of flags as defined above. | ||
722 | */ | ||
723 | struct drm_vmw_fence_event_arg { | ||
724 | uint64_t fence_rep; | ||
725 | uint64_t user_data; | ||
726 | uint32_t handle; | ||
727 | uint32_t flags; | ||
728 | }; | ||
729 | |||
730 | |||
731 | /*************************************************************************/ | ||
732 | /** | ||
688 | * DRM_VMW_PRESENT | 733 | * DRM_VMW_PRESENT |
689 | * | 734 | * |
690 | * Executes an SVGA present on a given fb for a given surface. The surface | 735 | * Executes an SVGA present on a given fb for a given surface. The surface |
@@ -743,6 +788,4 @@ struct drm_vmw_present_readback_arg { | |||
743 | uint64_t clips_ptr; | 788 | uint64_t clips_ptr; |
744 | uint64_t fence_rep; | 789 | uint64_t fence_rep; |
745 | }; | 790 | }; |
746 | |||
747 | |||
748 | #endif | 791 | #endif |