aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2011-10-10 06:23:26 -0400
committerDave Airlie <airlied@redhat.com>2011-10-10 10:46:55 -0400
commit57c5ee79acba9582762c09c269e0e2ae1adf1b31 (patch)
tree26b2148b81e7a7c1785d9d07d45c68bb1117bfb5 /drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
parent8bf445cee3127de3779a395d08d1ada2ad70161e (diff)
vmwgfx: Add fence events
Add a way to send DRM events down the gpu fifo by attaching them to fence objects. This may be useful for Xserver swapbuffer throttling and page-flip done notifications. Bump version to 2.2 to signal the availability of the FENCE_EVENT ioctl. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Jakob Bornecrantz <jakob@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_fence.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c533
1 files changed, 516 insertions, 17 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 5f60be76166e..35d5f61fc7e4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -34,13 +34,18 @@ struct vmw_fence_manager {
34 int num_fence_objects; 34 int num_fence_objects;
35 struct vmw_private *dev_priv; 35 struct vmw_private *dev_priv;
36 spinlock_t lock; 36 spinlock_t lock;
37 u32 next_seqno;
38 struct list_head fence_list; 37 struct list_head fence_list;
39 struct work_struct work; 38 struct work_struct work;
40 u32 user_fence_size; 39 u32 user_fence_size;
41 u32 fence_size; 40 u32 fence_size;
41 u32 event_fence_action_size;
42 bool fifo_down; 42 bool fifo_down;
43 struct list_head cleanup_list; 43 struct list_head cleanup_list;
44 uint32_t pending_actions[VMW_ACTION_MAX];
45 struct mutex goal_irq_mutex;
46 bool goal_irq_on; /* Protected by @goal_irq_mutex */
47 bool seqno_valid; /* Protected by @lock, and may not be set to true
48 without the @goal_irq_mutex held. */
44}; 49};
45 50
46struct vmw_user_fence { 51struct vmw_user_fence {
@@ -49,8 +54,51 @@ struct vmw_user_fence {
49}; 54};
50 55
51/** 56/**
52 * vmw_fence_destroy_locked 57 * struct vmw_event_fence_action - fence action that delivers a drm event.
53 * 58 *
59 * @e: A struct drm_pending_event that controls the event delivery.
60 * @action: A struct vmw_fence_action to hook up to a fence.
61 * @fence: A referenced pointer to the fence to keep it alive while @action
62 * hangs on it.
63 * @dev: Pointer to a struct drm_device so we can access the event stuff.
64 * @kref: Both @e and @action has destructors, so we need to refcount.
65 * @size: Size accounted for this object.
66 * @tv_sec: If non-null, the variable pointed to will be assigned
67 * current time tv_sec val when the fence signals.
68 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
69 * be assigned the current time tv_usec val when the fence signals.
70 */
71struct vmw_event_fence_action {
72 struct drm_pending_event e;
73 struct vmw_fence_action action;
74 struct vmw_fence_obj *fence;
75 struct drm_device *dev;
76 struct kref kref;
77 uint32_t size;
78 uint32_t *tv_sec;
79 uint32_t *tv_usec;
80};
81
82/**
83 * Note on fencing subsystem usage of irqs:
84 * Typically the vmw_fences_update function is called
85 *
86 * a) When a new fence seqno has been submitted by the fifo code.
87 * b) On-demand when we have waiters. Sleeping waiters will switch on the
88 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
89 * irq is received. When the last fence waiter is gone, that IRQ is masked
90 * away.
91 *
92 * In situations where there are no waiters and we don't submit any new fences,
93 * fence objects may not be signaled. This is perfectly OK, since there are
94 * no consumers of the signaled data, but that is NOT ok when there are fence
95 * actions attached to a fence. The fencing subsystem then makes use of the
96 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
97 * which has an action attached, and each time vmw_fences_update is called,
98 * the subsystem makes sure the fence goal seqno is updated.
99 *
100 * The fence goal seqno irq is on as long as there are unsignaled fence
101 * objects with actions attached to them.
54 */ 102 */
55 103
56static void vmw_fence_obj_destroy_locked(struct kref *kref) 104static void vmw_fence_obj_destroy_locked(struct kref *kref)
@@ -85,24 +133,36 @@ static void vmw_fence_work_func(struct work_struct *work)
85 container_of(work, struct vmw_fence_manager, work); 133 container_of(work, struct vmw_fence_manager, work);
86 struct list_head list; 134 struct list_head list;
87 struct vmw_fence_action *action, *next_action; 135 struct vmw_fence_action *action, *next_action;
136 bool seqno_valid;
88 137
89 do { 138 do {
90 INIT_LIST_HEAD(&list); 139 INIT_LIST_HEAD(&list);
140 mutex_lock(&fman->goal_irq_mutex);
141
91 spin_lock_irq(&fman->lock); 142 spin_lock_irq(&fman->lock);
92 list_splice_init(&fman->cleanup_list, &list); 143 list_splice_init(&fman->cleanup_list, &list);
144 seqno_valid = fman->seqno_valid;
93 spin_unlock_irq(&fman->lock); 145 spin_unlock_irq(&fman->lock);
94 146
147 if (!seqno_valid && fman->goal_irq_on) {
148 fman->goal_irq_on = false;
149 vmw_goal_waiter_remove(fman->dev_priv);
150 }
151 mutex_unlock(&fman->goal_irq_mutex);
152
95 if (list_empty(&list)) 153 if (list_empty(&list))
96 return; 154 return;
97 155
98 /* 156 /*
99 * At this point, only we should be able to manipulate the 157 * At this point, only we should be able to manipulate the
100 * list heads of the actions we have on the private list. 158 * list heads of the actions we have on the private list.
159 * hence fman::lock not held.
101 */ 160 */
102 161
103 list_for_each_entry_safe(action, next_action, &list, head) { 162 list_for_each_entry_safe(action, next_action, &list, head) {
104 list_del_init(&action->head); 163 list_del_init(&action->head);
105 action->cleanup(action); 164 if (action->cleanup)
165 action->cleanup(action);
106 } 166 }
107 } while (1); 167 } while (1);
108} 168}
@@ -122,6 +182,9 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
122 fman->fifo_down = true; 182 fman->fifo_down = true;
123 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); 183 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
124 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); 184 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
185 fman->event_fence_action_size =
186 ttm_round_pot(sizeof(struct vmw_event_fence_action));
187 mutex_init(&fman->goal_irq_mutex);
125 188
126 return fman; 189 return fman;
127} 190}
@@ -214,6 +277,7 @@ void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
214 277
215 list_for_each_entry_safe(action, next_action, list, head) { 278 list_for_each_entry_safe(action, next_action, list, head) {
216 list_del_init(&action->head); 279 list_del_init(&action->head);
280 fman->pending_actions[action->type]--;
217 if (action->seq_passed != NULL) 281 if (action->seq_passed != NULL)
218 action->seq_passed(action); 282 action->seq_passed(action);
219 283
@@ -222,17 +286,101 @@ void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
222 * it will be performed by a worker task. 286 * it will be performed by a worker task.
223 */ 287 */
224 288
225 if (action->cleanup != NULL) 289 list_add_tail(&action->head, &fman->cleanup_list);
226 list_add_tail(&action->head, &fman->cleanup_list); 290 }
291}
292
293/**
294 * vmw_fence_goal_new_locked - Figure out a new device fence goal
295 * seqno if needed.
296 *
297 * @fman: Pointer to a fence manager.
298 * @passed_seqno: The seqno the device currently signals as passed.
299 *
300 * This function should be called with the fence manager lock held.
301 * It is typically called when we have a new passed_seqno, and
302 * we might need to update the fence goal. It checks to see whether
303 * the current fence goal has already passed, and, in that case,
304 * scans through all unsignaled fences to get the next fence object with an
305 * action attached, and sets the seqno of that fence as a new fence goal.
306 *
307 * returns true if the device goal seqno was updated. False otherwise.
308 */
309static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
310 u32 passed_seqno)
311{
312 u32 goal_seqno;
313 __le32 __iomem *fifo_mem;
314 struct vmw_fence_obj *fence;
315
316 if (likely(!fman->seqno_valid))
317 return false;
318
319 fifo_mem = fman->dev_priv->mmio_virt;
320 goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
321 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
322 return false;
323
324 fman->seqno_valid = false;
325 list_for_each_entry(fence, &fman->fence_list, head) {
326 if (!list_empty(&fence->seq_passed_actions)) {
327 fman->seqno_valid = true;
328 iowrite32(fence->seqno,
329 fifo_mem + SVGA_FIFO_FENCE_GOAL);
330 break;
331 }
227 } 332 }
333
334 return true;
335}
336
337
338/**
339 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
340 * needed.
341 *
342 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
343 * considered as a device fence goal.
344 *
345 * This function should be called with the fence manager lock held.
346 * It is typically called when an action has been attached to a fence to
347 * check whether the seqno of that fence should be used for a fence
348 * goal interrupt. This is typically needed if the current fence goal is
349 * invalid, or has a higher seqno than that of the current fence object.
350 *
351 * returns true if the device goal seqno was updated. False otherwise.
352 */
353static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
354{
355 u32 goal_seqno;
356 __le32 __iomem *fifo_mem;
357
358 if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC)
359 return false;
360
361 fifo_mem = fence->fman->dev_priv->mmio_virt;
362 goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
363 if (likely(fence->fman->seqno_valid &&
364 goal_seqno - fence->seqno < VMW_FENCE_WRAP))
365 return false;
366
367 iowrite32(fence->seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
368 fence->fman->seqno_valid = true;
369
370 return true;
228} 371}
229 372
230void vmw_fences_update(struct vmw_fence_manager *fman, u32 seqno) 373void vmw_fences_update(struct vmw_fence_manager *fman)
231{ 374{
232 unsigned long flags; 375 unsigned long flags;
233 struct vmw_fence_obj *fence, *next_fence; 376 struct vmw_fence_obj *fence, *next_fence;
234 struct list_head action_list; 377 struct list_head action_list;
378 bool needs_rerun;
379 uint32_t seqno, new_seqno;
380 __le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
235 381
382 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
383rerun:
236 spin_lock_irqsave(&fman->lock, flags); 384 spin_lock_irqsave(&fman->lock, flags);
237 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { 385 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
238 if (seqno - fence->seqno < VMW_FENCE_WRAP) { 386 if (seqno - fence->seqno < VMW_FENCE_WRAP) {
@@ -243,14 +391,30 @@ void vmw_fences_update(struct vmw_fence_manager *fman, u32 seqno)
243 &action_list); 391 &action_list);
244 vmw_fences_perform_actions(fman, &action_list); 392 vmw_fences_perform_actions(fman, &action_list);
245 wake_up_all(&fence->queue); 393 wake_up_all(&fence->queue);
246 } 394 } else
247 395 break;
248 } 396 }
397
398 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
399
249 if (!list_empty(&fman->cleanup_list)) 400 if (!list_empty(&fman->cleanup_list))
250 (void) schedule_work(&fman->work); 401 (void) schedule_work(&fman->work);
251 spin_unlock_irqrestore(&fman->lock, flags); 402 spin_unlock_irqrestore(&fman->lock, flags);
252}
253 403
404 /*
405 * Rerun if the fence goal seqno was updated, and the
406 * hardware might have raced with that update, so that
407 * we missed a fence_goal irq.
408 */
409
410 if (unlikely(needs_rerun)) {
411 new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
412 if (new_seqno != seqno) {
413 seqno = new_seqno;
414 goto rerun;
415 }
416 }
417}
254 418
255bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence, 419bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
256 uint32_t flags) 420 uint32_t flags)
@@ -267,14 +431,8 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
267 if ((signaled & flags) == flags) 431 if ((signaled & flags) == flags)
268 return 1; 432 return 1;
269 433
270 if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0) { 434 if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0)
271 struct vmw_private *dev_priv = fman->dev_priv; 435 vmw_fences_update(fman);
272 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
273 u32 seqno;
274
275 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
276 vmw_fences_update(fman, seqno);
277 }
278 436
279 spin_lock_irqsave(&fman->lock, irq_flags); 437 spin_lock_irqsave(&fman->lock, irq_flags);
280 signaled = fence->signaled; 438 signaled = fence->signaled;
@@ -624,3 +782,344 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
624 arg->handle, 782 arg->handle,
625 TTM_REF_USAGE); 783 TTM_REF_USAGE);
626} 784}
785
786/**
787 * vmw_event_fence_action_destroy
788 *
789 * @kref: The struct kref embedded in a struct vmw_event_fence_action.
790 *
791 * The vmw_event_fence_action destructor that may be called either after
792 * the fence action cleanup, or when the event is delivered.
793 * It frees both the vmw_event_fence_action struct and the actual
794 * event structure copied to user-space.
795 */
796static void vmw_event_fence_action_destroy(struct kref *kref)
797{
798 struct vmw_event_fence_action *eaction =
799 container_of(kref, struct vmw_event_fence_action, kref);
800 struct ttm_mem_global *mem_glob =
801 vmw_mem_glob(vmw_priv(eaction->dev));
802 uint32_t size = eaction->size;
803
804 kfree(eaction->e.event);
805 kfree(eaction);
806 ttm_mem_global_free(mem_glob, size);
807}
808
809
810/**
811 * vmw_event_fence_action_delivered
812 *
813 * @e: The struct drm_pending_event embedded in a struct
814 * vmw_event_fence_action.
815 *
816 * The struct drm_pending_event destructor that is called by drm
817 * once the event is delivered. Since we don't know whether this function
818 * will be called before or after the fence action destructor, we
819 * free a refcount and destroy if it becomes zero.
820 */
821static void vmw_event_fence_action_delivered(struct drm_pending_event *e)
822{
823 struct vmw_event_fence_action *eaction =
824 container_of(e, struct vmw_event_fence_action, e);
825
826 kref_put(&eaction->kref, vmw_event_fence_action_destroy);
827}
828
829
830/**
831 * vmw_event_fence_action_seq_passed
832 *
833 * @action: The struct vmw_fence_action embedded in a struct
834 * vmw_event_fence_action.
835 *
836 * This function is called when the seqno of the fence where @action is
837 * attached has passed. It queues the event on the submitter's event list.
838 * This function is always called from atomic context, and may be called
839 * from irq context. It ups a refcount reflecting that we now have two
840 * destructors.
841 */
842static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
843{
844 struct vmw_event_fence_action *eaction =
845 container_of(action, struct vmw_event_fence_action, action);
846 struct drm_device *dev = eaction->dev;
847 struct drm_file *file_priv = eaction->e.file_priv;
848 unsigned long irq_flags;
849
850 kref_get(&eaction->kref);
851 spin_lock_irqsave(&dev->event_lock, irq_flags);
852
853 if (likely(eaction->tv_sec != NULL)) {
854 struct timeval tv;
855
856 do_gettimeofday(&tv);
857 *eaction->tv_sec = tv.tv_sec;
858 *eaction->tv_usec = tv.tv_usec;
859 }
860
861 list_add_tail(&eaction->e.link, &file_priv->event_list);
862 wake_up_all(&file_priv->event_wait);
863 spin_unlock_irqrestore(&dev->event_lock, irq_flags);
864}
865
866/**
867 * vmw_event_fence_action_cleanup
868 *
869 * @action: The struct vmw_fence_action embedded in a struct
870 * vmw_event_fence_action.
871 *
872 * This function is the struct vmw_fence_action destructor. It's typically
873 * called from a workqueue.
874 */
875static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
876{
877 struct vmw_event_fence_action *eaction =
878 container_of(action, struct vmw_event_fence_action, action);
879
880 vmw_fence_obj_unreference(&eaction->fence);
881 kref_put(&eaction->kref, vmw_event_fence_action_destroy);
882}
883
884
885/**
886 * vmw_fence_obj_add_action - Add an action to a fence object.
887 *
888 * @fence - The fence object.
889 * @action - The action to add.
890 *
891 * Note that the action callbacks may be executed before this function
892 * returns.
893 */
894void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
895 struct vmw_fence_action *action)
896{
897 struct vmw_fence_manager *fman = fence->fman;
898 unsigned long irq_flags;
899 bool run_update = false;
900
901 mutex_lock(&fman->goal_irq_mutex);
902 spin_lock_irqsave(&fman->lock, irq_flags);
903
904 fman->pending_actions[action->type]++;
905 if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) {
906 struct list_head action_list;
907
908 INIT_LIST_HEAD(&action_list);
909 list_add_tail(&action->head, &action_list);
910 vmw_fences_perform_actions(fman, &action_list);
911 } else {
912 list_add_tail(&action->head, &fence->seq_passed_actions);
913
914 /*
915 * This function may set fman::seqno_valid, so it must
916 * be run with the goal_irq_mutex held.
917 */
918 run_update = vmw_fence_goal_check_locked(fence);
919 }
920
921 spin_unlock_irqrestore(&fman->lock, irq_flags);
922
923 if (run_update) {
924 if (!fman->goal_irq_on) {
925 fman->goal_irq_on = true;
926 vmw_goal_waiter_add(fman->dev_priv);
927 }
928 vmw_fences_update(fman);
929 }
930 mutex_unlock(&fman->goal_irq_mutex);
931
932}
933
934/**
935 * vmw_event_fence_action_create - Post an event for sending when a fence
936 * object seqno has passed.
937 *
938 * @file_priv: The file connection on which the event should be posted.
939 * @fence: The fence object on which to post the event.
940 * @event: Event to be posted. This event should've been alloced
941 * using k[mz]alloc, and should've been completely initialized.
942 * @interruptible: Interruptible waits if possible.
943 *
944 * As a side effect, the object pointed to by @event may have been
945 * freed when this function returns. If this function returns with
946 * an error code, the caller needs to free that object.
947 */
948
949int vmw_event_fence_action_create(struct drm_file *file_priv,
950 struct vmw_fence_obj *fence,
951 struct drm_event *event,
952 uint32_t *tv_sec,
953 uint32_t *tv_usec,
954 bool interruptible)
955{
956 struct vmw_event_fence_action *eaction =
957 kzalloc(sizeof(*eaction), GFP_KERNEL);
958 struct ttm_mem_global *mem_glob =
959 vmw_mem_glob(fence->fman->dev_priv);
960 struct vmw_fence_manager *fman = fence->fman;
961 uint32_t size = fman->event_fence_action_size +
962 ttm_round_pot(event->length);
963 int ret;
964
965 /*
966 * Account for internal structure size as well as the
967 * event size itself.
968 */
969
970 ret = ttm_mem_global_alloc(mem_glob, size, false, interruptible);
971 if (unlikely(ret != 0))
972 return ret;
973
974 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
975 if (unlikely(eaction == NULL)) {
976 ttm_mem_global_free(mem_glob, size);
977 return -ENOMEM;
978 }
979
980 eaction->e.event = event;
981 eaction->e.file_priv = file_priv;
982 eaction->e.destroy = vmw_event_fence_action_delivered;
983
984 eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
985 eaction->action.cleanup = vmw_event_fence_action_cleanup;
986 eaction->action.type = VMW_ACTION_EVENT;
987
988 eaction->fence = vmw_fence_obj_reference(fence);
989 eaction->dev = fman->dev_priv->dev;
990 eaction->size = size;
991 eaction->tv_sec = tv_sec;
992 eaction->tv_usec = tv_usec;
993
994 kref_init(&eaction->kref);
995 vmw_fence_obj_add_action(fence, &eaction->action);
996
997 return 0;
998}
999
1000int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1001 struct drm_file *file_priv)
1002{
1003 struct vmw_private *dev_priv = vmw_priv(dev);
1004 struct drm_vmw_fence_event_arg *arg =
1005 (struct drm_vmw_fence_event_arg *) data;
1006 struct vmw_fence_obj *fence = NULL;
1007 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1008 struct drm_vmw_fence_rep __user *user_fence_rep =
1009 (struct drm_vmw_fence_rep __user *)(unsigned long)
1010 arg->fence_rep;
1011 uint32_t handle;
1012 unsigned long irq_flags;
1013 struct drm_vmw_event_fence *event;
1014 int ret;
1015
1016 /*
1017 * Look up an existing fence object,
1018 * and if user-space wants a new reference,
1019 * add one.
1020 */
1021 if (arg->handle) {
1022 struct ttm_base_object *base =
1023 ttm_base_object_lookup(vmw_fp->tfile, arg->handle);
1024
1025 if (unlikely(base == NULL)) {
1026 DRM_ERROR("Fence event invalid fence object handle "
1027 "0x%08lx.\n",
1028 (unsigned long)arg->handle);
1029 return -EINVAL;
1030 }
1031 fence = &(container_of(base, struct vmw_user_fence,
1032 base)->fence);
1033 (void) vmw_fence_obj_reference(fence);
1034
1035 if (user_fence_rep != NULL) {
1036 bool existed;
1037
1038 ret = ttm_ref_object_add(vmw_fp->tfile, base,
1039 TTM_REF_USAGE, &existed);
1040 if (unlikely(ret != 0)) {
1041 DRM_ERROR("Failed to reference a fence "
1042 "object.\n");
1043 goto out_no_ref_obj;
1044 }
1045 handle = base->hash.key;
1046 }
1047 ttm_base_object_unref(&base);
1048 }
1049
1050 /*
1051 * Create a new fence object.
1052 */
1053 if (!fence) {
1054 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1055 &fence,
1056 (user_fence_rep) ?
1057 &handle : NULL);
1058 if (unlikely(ret != 0)) {
1059 DRM_ERROR("Fence event failed to create fence.\n");
1060 return ret;
1061 }
1062 }
1063
1064 BUG_ON(fence == NULL);
1065
1066 spin_lock_irqsave(&dev->event_lock, irq_flags);
1067
1068 ret = (file_priv->event_space < sizeof(*event)) ? -EBUSY : 0;
1069 if (likely(ret == 0))
1070 file_priv->event_space -= sizeof(*event);
1071
1072 spin_unlock_irqrestore(&dev->event_lock, irq_flags);
1073
1074 if (unlikely(ret != 0)) {
1075 DRM_ERROR("Failed to allocate event space for this file.\n");
1076 goto out_no_event_space;
1077 }
1078
1079 event = kzalloc(sizeof(*event), GFP_KERNEL);
1080 if (unlikely(event == NULL)) {
1081 DRM_ERROR("Failed to allocate an event.\n");
1082 goto out_no_event;
1083 }
1084
1085 event->base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1086 event->base.length = sizeof(*event);
1087 event->user_data = arg->user_data;
1088
1089 if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
1090 ret = vmw_event_fence_action_create(file_priv, fence,
1091 &event->base,
1092 &event->tv_sec,
1093 &event->tv_usec,
1094 true);
1095 else
1096 ret = vmw_event_fence_action_create(file_priv, fence,
1097 &event->base,
1098 NULL,
1099 NULL,
1100 true);
1101
1102 if (unlikely(ret != 0)) {
1103 if (ret != -ERESTARTSYS)
1104 DRM_ERROR("Failed to attach event to fence.\n");
1105 goto out_no_attach;
1106 }
1107
1108 vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1109 handle);
1110 vmw_fence_obj_unreference(&fence);
1111 return 0;
1112out_no_attach:
1113 kfree(event);
1114out_no_event:
1115 spin_lock_irqsave(&dev->event_lock, irq_flags);
1116 file_priv->event_space += sizeof(*event);
1117 spin_unlock_irqrestore(&dev->event_lock, irq_flags);
1118out_no_event_space:
1119 if (user_fence_rep != NULL)
1120 ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1121 handle, TTM_REF_USAGE);
1122out_no_ref_obj:
1123 vmw_fence_obj_unreference(&fence);
1124 return ret;
1125}