1 /**************************************************************************
3 * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "vmwgfx_drv.h"
31 #define VMW_FENCE_WRAP (1 << 31)
33 struct vmw_fence_manager {
34 int num_fence_objects;
35 struct vmw_private *dev_priv;
37 struct list_head fence_list;
38 struct work_struct work;
41 u32 event_fence_action_size;
43 struct list_head cleanup_list;
44 uint32_t pending_actions[VMW_ACTION_MAX];
45 struct mutex goal_irq_mutex;
46 bool goal_irq_on; /* Protected by @goal_irq_mutex */
47 bool seqno_valid; /* Protected by @lock, and may not be set to true
48 without the @goal_irq_mutex held. */
51 struct vmw_user_fence {
52 struct ttm_base_object base;
53 struct vmw_fence_obj fence;
57 * struct vmw_event_fence_action - fence action that delivers a drm event.
59 * @e: A struct drm_pending_event that controls the event delivery.
60 * @action: A struct vmw_fence_action to hook up to a fence.
61 * @fence: A referenced pointer to the fence to keep it alive while @action
63 * @dev: Pointer to a struct drm_device so we can access the event stuff.
64 * @kref: Both @e and @action has destructors, so we need to refcount.
65 * @size: Size accounted for this object.
66 * @tv_sec: If non-null, the variable pointed to will be assigned
67 * current time tv_sec val when the fence signals.
68 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
69 * be assigned the current time tv_usec val when the fence signals.
71 struct vmw_event_fence_action {
72 struct drm_pending_event e;
73 struct vmw_fence_action action;
74 struct vmw_fence_obj *fence;
75 struct drm_device *dev;
83 * Note on fencing subsystem usage of irqs:
84 * Typically the vmw_fences_update function is called
86 * a) When a new fence seqno has been submitted by the fifo code.
87 * b) On-demand when we have waiters. Sleeping waiters will switch on the
88 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
89 * irq is received. When the last fence waiter is gone, that IRQ is masked
92 * In situations where there are no waiters and we don't submit any new fences,
93 * fence objects may not be signaled. This is perfectly OK, since there are
94 * no consumers of the signaled data, but that is NOT ok when there are fence
95 * actions attached to a fence. The fencing subsystem then makes use of the
96 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
97 * which has an action attached, and each time vmw_fences_update is called,
98 * the subsystem makes sure the fence goal seqno is updated.
100 * The fence goal seqno irq is on as long as there are unsignaled fence
101 * objects with actions attached to them.
104 static void vmw_fence_obj_destroy_locked(struct kref *kref)
106 struct vmw_fence_obj *fence =
107 container_of(kref, struct vmw_fence_obj, kref);
109 struct vmw_fence_manager *fman = fence->fman;
110 unsigned int num_fences;
112 list_del_init(&fence->head);
113 num_fences = --fman->num_fence_objects;
114 spin_unlock_irq(&fman->lock);
116 fence->destroy(fence);
120 spin_lock_irq(&fman->lock);
125 * Execute signal actions on fences recently signaled.
126 * This is done from a workqueue so we don't have to execute
127 * signal actions from atomic context.
130 static void vmw_fence_work_func(struct work_struct *work)
132 struct vmw_fence_manager *fman =
133 container_of(work, struct vmw_fence_manager, work);
134 struct list_head list;
135 struct vmw_fence_action *action, *next_action;
139 INIT_LIST_HEAD(&list);
140 mutex_lock(&fman->goal_irq_mutex);
142 spin_lock_irq(&fman->lock);
143 list_splice_init(&fman->cleanup_list, &list);
144 seqno_valid = fman->seqno_valid;
145 spin_unlock_irq(&fman->lock);
147 if (!seqno_valid && fman->goal_irq_on) {
148 fman->goal_irq_on = false;
149 vmw_goal_waiter_remove(fman->dev_priv);
151 mutex_unlock(&fman->goal_irq_mutex);
153 if (list_empty(&list))
157 * At this point, only we should be able to manipulate the
158 * list heads of the actions we have on the private list.
159 * hence fman::lock not held.
162 list_for_each_entry_safe(action, next_action, &list, head) {
163 list_del_init(&action->head);
165 action->cleanup(action);
170 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
172 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
174 if (unlikely(fman == NULL))
177 fman->dev_priv = dev_priv;
178 spin_lock_init(&fman->lock);
179 INIT_LIST_HEAD(&fman->fence_list);
180 INIT_LIST_HEAD(&fman->cleanup_list);
181 INIT_WORK(&fman->work, &vmw_fence_work_func);
182 fman->fifo_down = true;
183 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
184 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
185 fman->event_fence_action_size =
186 ttm_round_pot(sizeof(struct vmw_event_fence_action));
187 mutex_init(&fman->goal_irq_mutex);
192 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
194 unsigned long irq_flags;
197 (void) cancel_work_sync(&fman->work);
199 spin_lock_irqsave(&fman->lock, irq_flags);
200 lists_empty = list_empty(&fman->fence_list) &&
201 list_empty(&fman->cleanup_list);
202 spin_unlock_irqrestore(&fman->lock, irq_flags);
204 BUG_ON(!lists_empty);
208 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
209 struct vmw_fence_obj *fence,
212 void (*destroy) (struct vmw_fence_obj *fence))
214 unsigned long irq_flags;
215 unsigned int num_fences;
218 fence->seqno = seqno;
219 INIT_LIST_HEAD(&fence->seq_passed_actions);
222 fence->signal_mask = mask;
223 kref_init(&fence->kref);
224 fence->destroy = destroy;
225 init_waitqueue_head(&fence->queue);
227 spin_lock_irqsave(&fman->lock, irq_flags);
228 if (unlikely(fman->fifo_down)) {
232 list_add_tail(&fence->head, &fman->fence_list);
233 num_fences = ++fman->num_fence_objects;
236 spin_unlock_irqrestore(&fman->lock, irq_flags);
241 struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence)
243 if (unlikely(fence == NULL))
246 kref_get(&fence->kref);
251 * vmw_fence_obj_unreference
253 * Note that this function may not be entered with disabled irqs since
254 * it may re-enable them in the destroy function.
257 void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
259 struct vmw_fence_obj *fence = *fence_p;
260 struct vmw_fence_manager *fman;
262 if (unlikely(fence == NULL))
267 spin_lock_irq(&fman->lock);
268 BUG_ON(atomic_read(&fence->kref.refcount) == 0);
269 kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
270 spin_unlock_irq(&fman->lock);
273 void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
274 struct list_head *list)
276 struct vmw_fence_action *action, *next_action;
278 list_for_each_entry_safe(action, next_action, list, head) {
279 list_del_init(&action->head);
280 fman->pending_actions[action->type]--;
281 if (action->seq_passed != NULL)
282 action->seq_passed(action);
285 * Add the cleanup action to the cleanup list so that
286 * it will be performed by a worker task.
289 list_add_tail(&action->head, &fman->cleanup_list);
294 * vmw_fence_goal_new_locked - Figure out a new device fence goal
297 * @fman: Pointer to a fence manager.
298 * @passed_seqno: The seqno the device currently signals as passed.
300 * This function should be called with the fence manager lock held.
301 * It is typically called when we have a new passed_seqno, and
302 * we might need to update the fence goal. It checks to see whether
303 * the current fence goal has already passed, and, in that case,
304 * scans through all unsignaled fences to get the next fence object with an
305 * action attached, and sets the seqno of that fence as a new fence goal.
307 * returns true if the device goal seqno was updated. False otherwise.
309 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
313 __le32 __iomem *fifo_mem;
314 struct vmw_fence_obj *fence;
316 if (likely(!fman->seqno_valid))
319 fifo_mem = fman->dev_priv->mmio_virt;
320 goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
321 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
324 fman->seqno_valid = false;
325 list_for_each_entry(fence, &fman->fence_list, head) {
326 if (!list_empty(&fence->seq_passed_actions)) {
327 fman->seqno_valid = true;
328 iowrite32(fence->seqno,
329 fifo_mem + SVGA_FIFO_FENCE_GOAL);
339 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
342 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
343 * considered as a device fence goal.
345 * This function should be called with the fence manager lock held.
346 * It is typically called when an action has been attached to a fence to
347 * check whether the seqno of that fence should be used for a fence
348 * goal interrupt. This is typically needed if the current fence goal is
349 * invalid, or has a higher seqno than that of the current fence object.
351 * returns true if the device goal seqno was updated. False otherwise.
353 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
356 __le32 __iomem *fifo_mem;
358 if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC)
361 fifo_mem = fence->fman->dev_priv->mmio_virt;
362 goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
363 if (likely(fence->fman->seqno_valid &&
364 goal_seqno - fence->seqno < VMW_FENCE_WRAP))
367 iowrite32(fence->seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
368 fence->fman->seqno_valid = true;
373 void vmw_fences_update(struct vmw_fence_manager *fman)
376 struct vmw_fence_obj *fence, *next_fence;
377 struct list_head action_list;
379 uint32_t seqno, new_seqno;
380 __le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
382 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
384 spin_lock_irqsave(&fman->lock, flags);
385 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
386 if (seqno - fence->seqno < VMW_FENCE_WRAP) {
387 list_del_init(&fence->head);
388 fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
389 INIT_LIST_HEAD(&action_list);
390 list_splice_init(&fence->seq_passed_actions,
392 vmw_fences_perform_actions(fman, &action_list);
393 wake_up_all(&fence->queue);
398 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
400 if (!list_empty(&fman->cleanup_list))
401 (void) schedule_work(&fman->work);
402 spin_unlock_irqrestore(&fman->lock, flags);
405 * Rerun if the fence goal seqno was updated, and the
406 * hardware might have raced with that update, so that
407 * we missed a fence_goal irq.
410 if (unlikely(needs_rerun)) {
411 new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
412 if (new_seqno != seqno) {
419 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
422 struct vmw_fence_manager *fman = fence->fman;
423 unsigned long irq_flags;
426 spin_lock_irqsave(&fman->lock, irq_flags);
427 signaled = fence->signaled;
428 spin_unlock_irqrestore(&fman->lock, irq_flags);
430 flags &= fence->signal_mask;
431 if ((signaled & flags) == flags)
434 if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0)
435 vmw_fences_update(fman);
437 spin_lock_irqsave(&fman->lock, irq_flags);
438 signaled = fence->signaled;
439 spin_unlock_irqrestore(&fman->lock, irq_flags);
441 return ((signaled & flags) == flags);
444 int vmw_fence_obj_wait(struct vmw_fence_obj *fence,
445 uint32_t flags, bool lazy,
446 bool interruptible, unsigned long timeout)
448 struct vmw_private *dev_priv = fence->fman->dev_priv;
451 if (likely(vmw_fence_obj_signaled(fence, flags)))
454 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
455 vmw_seqno_waiter_add(dev_priv);
458 ret = wait_event_interruptible_timeout
460 vmw_fence_obj_signaled(fence, flags),
463 ret = wait_event_timeout
465 vmw_fence_obj_signaled(fence, flags),
468 vmw_seqno_waiter_remove(dev_priv);
470 if (unlikely(ret == 0))
472 else if (likely(ret > 0))
478 void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
480 struct vmw_private *dev_priv = fence->fman->dev_priv;
482 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
485 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
490 int vmw_fence_create(struct vmw_fence_manager *fman,
493 struct vmw_fence_obj **p_fence)
495 struct vmw_fence_obj *fence;
498 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
499 if (unlikely(fence == NULL))
502 ret = vmw_fence_obj_init(fman, fence, seqno, mask,
504 if (unlikely(ret != 0))
516 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
518 struct vmw_user_fence *ufence =
519 container_of(fence, struct vmw_user_fence, fence);
520 struct vmw_fence_manager *fman = fence->fman;
524 * Free kernel space accounting.
526 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
527 fman->user_fence_size);
530 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
532 struct ttm_base_object *base = *p_base;
533 struct vmw_user_fence *ufence =
534 container_of(base, struct vmw_user_fence, base);
535 struct vmw_fence_obj *fence = &ufence->fence;
538 vmw_fence_obj_unreference(&fence);
541 int vmw_user_fence_create(struct drm_file *file_priv,
542 struct vmw_fence_manager *fman,
545 struct vmw_fence_obj **p_fence,
548 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
549 struct vmw_user_fence *ufence;
550 struct vmw_fence_obj *tmp;
551 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
555 * Kernel memory space accounting, since this object may
556 * be created by a user-space request.
559 ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
561 if (unlikely(ret != 0))
564 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
565 if (unlikely(ufence == NULL)) {
570 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
571 mask, vmw_user_fence_destroy);
572 if (unlikely(ret != 0)) {
578 * The base object holds a reference which is freed in
579 * vmw_user_fence_base_release.
581 tmp = vmw_fence_obj_reference(&ufence->fence);
582 ret = ttm_base_object_init(tfile, &ufence->base, false,
584 &vmw_user_fence_base_release, NULL);
587 if (unlikely(ret != 0)) {
589 * Free the base object's reference
591 vmw_fence_obj_unreference(&tmp);
595 *p_fence = &ufence->fence;
596 *p_handle = ufence->base.hash.key;
600 tmp = &ufence->fence;
601 vmw_fence_obj_unreference(&tmp);
603 ttm_mem_global_free(mem_glob, fman->user_fence_size);
609 * vmw_fence_fifo_down - signal all unsignaled fence objects.
612 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
614 unsigned long irq_flags;
615 struct list_head action_list;
619 * The list may be altered while we traverse it, so always
620 * restart when we've released the fman->lock.
623 spin_lock_irqsave(&fman->lock, irq_flags);
624 fman->fifo_down = true;
625 while (!list_empty(&fman->fence_list)) {
626 struct vmw_fence_obj *fence =
627 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
629 kref_get(&fence->kref);
630 spin_unlock_irq(&fman->lock);
632 ret = vmw_fence_obj_wait(fence, fence->signal_mask,
634 VMW_FENCE_WAIT_TIMEOUT);
636 if (unlikely(ret != 0)) {
637 list_del_init(&fence->head);
638 fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
639 INIT_LIST_HEAD(&action_list);
640 list_splice_init(&fence->seq_passed_actions,
642 vmw_fences_perform_actions(fman, &action_list);
643 wake_up_all(&fence->queue);
646 spin_lock_irq(&fman->lock);
648 BUG_ON(!list_empty(&fence->head));
649 kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
651 spin_unlock_irqrestore(&fman->lock, irq_flags);
654 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
656 unsigned long irq_flags;
658 spin_lock_irqsave(&fman->lock, irq_flags);
659 fman->fifo_down = false;
660 spin_unlock_irqrestore(&fman->lock, irq_flags);
664 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
665 struct drm_file *file_priv)
667 struct drm_vmw_fence_wait_arg *arg =
668 (struct drm_vmw_fence_wait_arg *)data;
669 unsigned long timeout;
670 struct ttm_base_object *base;
671 struct vmw_fence_obj *fence;
672 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
674 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
677 * 64-bit division not present on 32-bit systems, so do an
678 * approximation. (Divide by 1000000).
681 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
682 (wait_timeout >> 26);
684 if (!arg->cookie_valid) {
685 arg->cookie_valid = 1;
686 arg->kernel_cookie = jiffies + wait_timeout;
689 base = ttm_base_object_lookup(tfile, arg->handle);
690 if (unlikely(base == NULL)) {
691 printk(KERN_ERR "Wait invalid fence object handle "
693 (unsigned long)arg->handle);
697 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
700 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
701 ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ?
706 timeout = (unsigned long)arg->kernel_cookie - timeout;
708 ret = vmw_fence_obj_wait(fence, arg->flags, arg->lazy, true, timeout);
711 ttm_base_object_unref(&base);
714 * Optionally unref the fence object.
717 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
718 return ttm_ref_object_base_unref(tfile, arg->handle,
723 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
724 struct drm_file *file_priv)
726 struct drm_vmw_fence_signaled_arg *arg =
727 (struct drm_vmw_fence_signaled_arg *) data;
728 struct ttm_base_object *base;
729 struct vmw_fence_obj *fence;
730 struct vmw_fence_manager *fman;
731 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
732 struct vmw_private *dev_priv = vmw_priv(dev);
734 base = ttm_base_object_lookup(tfile, arg->handle);
735 if (unlikely(base == NULL)) {
736 printk(KERN_ERR "Fence signaled invalid fence object handle "
738 (unsigned long)arg->handle);
742 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
745 arg->signaled = vmw_fence_obj_signaled(fence, arg->flags);
746 spin_lock_irq(&fman->lock);
748 arg->signaled_flags = fence->signaled;
749 arg->passed_seqno = dev_priv->last_read_seqno;
750 spin_unlock_irq(&fman->lock);
752 ttm_base_object_unref(&base);
758 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
759 struct drm_file *file_priv)
761 struct drm_vmw_fence_arg *arg =
762 (struct drm_vmw_fence_arg *) data;
764 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
770 * vmw_event_fence_action_destroy
772 * @kref: The struct kref embedded in a struct vmw_event_fence_action.
774 * The vmw_event_fence_action destructor that may be called either after
775 * the fence action cleanup, or when the event is delivered.
776 * It frees both the vmw_event_fence_action struct and the actual
777 * event structure copied to user-space.
779 static void vmw_event_fence_action_destroy(struct kref *kref)
781 struct vmw_event_fence_action *eaction =
782 container_of(kref, struct vmw_event_fence_action, kref);
783 struct ttm_mem_global *mem_glob =
784 vmw_mem_glob(vmw_priv(eaction->dev));
785 uint32_t size = eaction->size;
787 kfree(eaction->e.event);
789 ttm_mem_global_free(mem_glob, size);
794 * vmw_event_fence_action_delivered
796 * @e: The struct drm_pending_event embedded in a struct
797 * vmw_event_fence_action.
799 * The struct drm_pending_event destructor that is called by drm
800 * once the event is delivered. Since we don't know whether this function
801 * will be called before or after the fence action destructor, we
802 * free a refcount and destroy if it becomes zero.
804 static void vmw_event_fence_action_delivered(struct drm_pending_event *e)
806 struct vmw_event_fence_action *eaction =
807 container_of(e, struct vmw_event_fence_action, e);
809 kref_put(&eaction->kref, vmw_event_fence_action_destroy);
814 * vmw_event_fence_action_seq_passed
816 * @action: The struct vmw_fence_action embedded in a struct
817 * vmw_event_fence_action.
819 * This function is called when the seqno of the fence where @action is
820 * attached has passed. It queues the event on the submitter's event list.
821 * This function is always called from atomic context, and may be called
822 * from irq context. It ups a refcount reflecting that we now have two
825 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
827 struct vmw_event_fence_action *eaction =
828 container_of(action, struct vmw_event_fence_action, action);
829 struct drm_device *dev = eaction->dev;
830 struct drm_file *file_priv = eaction->e.file_priv;
831 unsigned long irq_flags;
833 kref_get(&eaction->kref);
834 spin_lock_irqsave(&dev->event_lock, irq_flags);
836 if (likely(eaction->tv_sec != NULL)) {
839 do_gettimeofday(&tv);
840 *eaction->tv_sec = tv.tv_sec;
841 *eaction->tv_usec = tv.tv_usec;
844 list_add_tail(&eaction->e.link, &file_priv->event_list);
845 wake_up_all(&file_priv->event_wait);
846 spin_unlock_irqrestore(&dev->event_lock, irq_flags);
850 * vmw_event_fence_action_cleanup
852 * @action: The struct vmw_fence_action embedded in a struct
853 * vmw_event_fence_action.
855 * This function is the struct vmw_fence_action destructor. It's typically
856 * called from a workqueue.
858 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
860 struct vmw_event_fence_action *eaction =
861 container_of(action, struct vmw_event_fence_action, action);
863 vmw_fence_obj_unreference(&eaction->fence);
864 kref_put(&eaction->kref, vmw_event_fence_action_destroy);
869 * vmw_fence_obj_add_action - Add an action to a fence object.
871 * @fence - The fence object.
872 * @action - The action to add.
874 * Note that the action callbacks may be executed before this function
877 void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
878 struct vmw_fence_action *action)
880 struct vmw_fence_manager *fman = fence->fman;
881 unsigned long irq_flags;
882 bool run_update = false;
884 mutex_lock(&fman->goal_irq_mutex);
885 spin_lock_irqsave(&fman->lock, irq_flags);
887 fman->pending_actions[action->type]++;
888 if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) {
889 struct list_head action_list;
891 INIT_LIST_HEAD(&action_list);
892 list_add_tail(&action->head, &action_list);
893 vmw_fences_perform_actions(fman, &action_list);
895 list_add_tail(&action->head, &fence->seq_passed_actions);
898 * This function may set fman::seqno_valid, so it must
899 * be run with the goal_irq_mutex held.
901 run_update = vmw_fence_goal_check_locked(fence);
904 spin_unlock_irqrestore(&fman->lock, irq_flags);
907 if (!fman->goal_irq_on) {
908 fman->goal_irq_on = true;
909 vmw_goal_waiter_add(fman->dev_priv);
911 vmw_fences_update(fman);
913 mutex_unlock(&fman->goal_irq_mutex);
918 * vmw_event_fence_action_create - Post an event for sending when a fence
919 * object seqno has passed.
921 * @file_priv: The file connection on which the event should be posted.
922 * @fence: The fence object on which to post the event.
923 * @event: Event to be posted. This event should've been alloced
924 * using k[mz]alloc, and should've been completely initialized.
925 * @interruptible: Interruptible waits if possible.
927 * As a side effect, the object pointed to by @event may have been
928 * freed when this function returns. If this function returns with
929 * an error code, the caller needs to free that object.
932 int vmw_event_fence_action_create(struct drm_file *file_priv,
933 struct vmw_fence_obj *fence,
934 struct drm_event *event,
939 struct vmw_event_fence_action *eaction;
940 struct ttm_mem_global *mem_glob =
941 vmw_mem_glob(fence->fman->dev_priv);
942 struct vmw_fence_manager *fman = fence->fman;
943 uint32_t size = fman->event_fence_action_size +
944 ttm_round_pot(event->length);
948 * Account for internal structure size as well as the
952 ret = ttm_mem_global_alloc(mem_glob, size, false, interruptible);
953 if (unlikely(ret != 0))
956 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
957 if (unlikely(eaction == NULL)) {
958 ttm_mem_global_free(mem_glob, size);
962 eaction->e.event = event;
963 eaction->e.file_priv = file_priv;
964 eaction->e.destroy = vmw_event_fence_action_delivered;
966 eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
967 eaction->action.cleanup = vmw_event_fence_action_cleanup;
968 eaction->action.type = VMW_ACTION_EVENT;
970 eaction->fence = vmw_fence_obj_reference(fence);
971 eaction->dev = fman->dev_priv->dev;
972 eaction->size = size;
973 eaction->tv_sec = tv_sec;
974 eaction->tv_usec = tv_usec;
976 kref_init(&eaction->kref);
977 vmw_fence_obj_add_action(fence, &eaction->action);
982 int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
983 struct drm_file *file_priv)
985 struct vmw_private *dev_priv = vmw_priv(dev);
986 struct drm_vmw_fence_event_arg *arg =
987 (struct drm_vmw_fence_event_arg *) data;
988 struct vmw_fence_obj *fence = NULL;
989 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
990 struct drm_vmw_fence_rep __user *user_fence_rep =
991 (struct drm_vmw_fence_rep __user *)(unsigned long)
994 unsigned long irq_flags;
995 struct drm_vmw_event_fence *event;
999 * Look up an existing fence object,
1000 * and if user-space wants a new reference,
1004 struct ttm_base_object *base =
1005 ttm_base_object_lookup(vmw_fp->tfile, arg->handle);
1007 if (unlikely(base == NULL)) {
1008 DRM_ERROR("Fence event invalid fence object handle "
1010 (unsigned long)arg->handle);
1013 fence = &(container_of(base, struct vmw_user_fence,
1015 (void) vmw_fence_obj_reference(fence);
1017 if (user_fence_rep != NULL) {
1020 ret = ttm_ref_object_add(vmw_fp->tfile, base,
1021 TTM_REF_USAGE, &existed);
1022 if (unlikely(ret != 0)) {
1023 DRM_ERROR("Failed to reference a fence "
1025 goto out_no_ref_obj;
1027 handle = base->hash.key;
1029 ttm_base_object_unref(&base);
1033 * Create a new fence object.
1036 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1040 if (unlikely(ret != 0)) {
1041 DRM_ERROR("Fence event failed to create fence.\n");
1046 BUG_ON(fence == NULL);
1048 spin_lock_irqsave(&dev->event_lock, irq_flags);
1050 ret = (file_priv->event_space < sizeof(*event)) ? -EBUSY : 0;
1051 if (likely(ret == 0))
1052 file_priv->event_space -= sizeof(*event);
1054 spin_unlock_irqrestore(&dev->event_lock, irq_flags);
1056 if (unlikely(ret != 0)) {
1057 DRM_ERROR("Failed to allocate event space for this file.\n");
1058 goto out_no_event_space;
1061 event = kzalloc(sizeof(*event), GFP_KERNEL);
1062 if (unlikely(event == NULL)) {
1063 DRM_ERROR("Failed to allocate an event.\n");
1067 event->base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1068 event->base.length = sizeof(*event);
1069 event->user_data = arg->user_data;
1071 if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
1072 ret = vmw_event_fence_action_create(file_priv, fence,
1078 ret = vmw_event_fence_action_create(file_priv, fence,
1084 if (unlikely(ret != 0)) {
1085 if (ret != -ERESTARTSYS)
1086 DRM_ERROR("Failed to attach event to fence.\n");
1090 vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1092 vmw_fence_obj_unreference(&fence);
1097 spin_lock_irqsave(&dev->event_lock, irq_flags);
1098 file_priv->event_space += sizeof(*event);
1099 spin_unlock_irqrestore(&dev->event_lock, irq_flags);
1101 if (user_fence_rep != NULL)
1102 ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1103 handle, TTM_REF_USAGE);
1105 vmw_fence_obj_unreference(&fence);