1 /**************************************************************************
3 * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "vmwgfx_drv.h"
31 #define VMW_FENCE_WRAP (1 << 31)
33 struct vmw_fence_manager {
34 int num_fence_objects;
35 struct vmw_private *dev_priv;
38 struct list_head fence_list;
39 struct work_struct work;
43 struct list_head cleanup_list;
46 struct vmw_user_fence {
47 struct ttm_base_object base;
48 struct vmw_fence_obj fence;
52 * vmw_fence_destroy_locked
56 static void vmw_fence_obj_destroy_locked(struct kref *kref)
58 struct vmw_fence_obj *fence =
59 container_of(kref, struct vmw_fence_obj, kref);
61 struct vmw_fence_manager *fman = fence->fman;
62 unsigned int num_fences;
64 list_del_init(&fence->head);
65 num_fences = --fman->num_fence_objects;
66 spin_unlock_irq(&fman->lock);
68 fence->destroy(fence);
72 spin_lock_irq(&fman->lock);
77 * Execute signal actions on fences recently signaled.
78 * This is done from a workqueue so we don't have to execute
79 * signal actions from atomic context.
82 static void vmw_fence_work_func(struct work_struct *work)
84 struct vmw_fence_manager *fman =
85 container_of(work, struct vmw_fence_manager, work);
86 struct list_head list;
87 struct vmw_fence_action *action, *next_action;
90 INIT_LIST_HEAD(&list);
91 spin_lock_irq(&fman->lock);
92 list_splice_init(&fman->cleanup_list, &list);
93 spin_unlock_irq(&fman->lock);
95 if (list_empty(&list))
99 * At this point, only we should be able to manipulate the
100 * list heads of the actions we have on the private list.
103 list_for_each_entry_safe(action, next_action, &list, head) {
104 list_del_init(&action->head);
105 action->cleanup(action);
110 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
112 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
114 if (unlikely(fman == NULL))
117 fman->dev_priv = dev_priv;
118 spin_lock_init(&fman->lock);
119 INIT_LIST_HEAD(&fman->fence_list);
120 INIT_LIST_HEAD(&fman->cleanup_list);
121 INIT_WORK(&fman->work, &vmw_fence_work_func);
122 fman->fifo_down = true;
123 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
124 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
129 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
131 unsigned long irq_flags;
134 (void) cancel_work_sync(&fman->work);
136 spin_lock_irqsave(&fman->lock, irq_flags);
137 lists_empty = list_empty(&fman->fence_list) &&
138 list_empty(&fman->cleanup_list);
139 spin_unlock_irqrestore(&fman->lock, irq_flags);
141 BUG_ON(!lists_empty);
145 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
146 struct vmw_fence_obj *fence,
149 void (*destroy) (struct vmw_fence_obj *fence))
151 unsigned long irq_flags;
152 unsigned int num_fences;
155 fence->seqno = seqno;
156 INIT_LIST_HEAD(&fence->seq_passed_actions);
159 fence->signal_mask = mask;
160 kref_init(&fence->kref);
161 fence->destroy = destroy;
162 init_waitqueue_head(&fence->queue);
164 spin_lock_irqsave(&fman->lock, irq_flags);
165 if (unlikely(fman->fifo_down)) {
169 list_add_tail(&fence->head, &fman->fence_list);
170 num_fences = ++fman->num_fence_objects;
173 spin_unlock_irqrestore(&fman->lock, irq_flags);
178 struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence)
180 kref_get(&fence->kref);
185 * vmw_fence_obj_unreference
187 * Note that this function may not be entered with disabled irqs since
188 * it may re-enable them in the destroy function.
191 void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
193 struct vmw_fence_obj *fence = *fence_p;
194 struct vmw_fence_manager *fman = fence->fman;
197 spin_lock_irq(&fman->lock);
198 BUG_ON(atomic_read(&fence->kref.refcount) == 0);
199 kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
200 spin_unlock_irq(&fman->lock);
203 void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
204 struct list_head *list)
206 struct vmw_fence_action *action, *next_action;
208 list_for_each_entry_safe(action, next_action, list, head) {
209 list_del_init(&action->head);
210 if (action->seq_passed != NULL)
211 action->seq_passed(action);
214 * Add the cleanup action to the cleanup list so that
215 * it will be performed by a worker task.
218 if (action->cleanup != NULL)
219 list_add_tail(&action->head, &fman->cleanup_list);
223 void vmw_fences_update(struct vmw_fence_manager *fman, u32 seqno)
226 struct vmw_fence_obj *fence, *next_fence;
227 struct list_head action_list;
229 spin_lock_irqsave(&fman->lock, flags);
230 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
231 if (seqno - fence->seqno < VMW_FENCE_WRAP) {
232 list_del_init(&fence->head);
233 fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
234 INIT_LIST_HEAD(&action_list);
235 list_splice_init(&fence->seq_passed_actions,
237 vmw_fences_perform_actions(fman, &action_list);
238 wake_up_all(&fence->queue);
242 if (!list_empty(&fman->cleanup_list))
243 (void) schedule_work(&fman->work);
244 spin_unlock_irqrestore(&fman->lock, flags);
248 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
251 struct vmw_fence_manager *fman = fence->fman;
252 unsigned long irq_flags;
255 spin_lock_irqsave(&fman->lock, irq_flags);
256 signaled = fence->signaled;
257 spin_unlock_irqrestore(&fman->lock, irq_flags);
259 flags &= fence->signal_mask;
260 if ((signaled & flags) == flags)
263 if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0) {
264 struct vmw_private *dev_priv = fman->dev_priv;
265 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
268 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
269 vmw_fences_update(fman, seqno);
272 spin_lock_irqsave(&fman->lock, irq_flags);
273 signaled = fence->signaled;
274 spin_unlock_irqrestore(&fman->lock, irq_flags);
276 return ((signaled & flags) == flags);
279 int vmw_fence_obj_wait(struct vmw_fence_obj *fence,
280 uint32_t flags, bool lazy,
281 bool interruptible, unsigned long timeout)
283 struct vmw_private *dev_priv = fence->fman->dev_priv;
286 if (likely(vmw_fence_obj_signaled(fence, flags)))
289 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
290 vmw_seqno_waiter_add(dev_priv);
293 ret = wait_event_interruptible_timeout
295 vmw_fence_obj_signaled(fence, flags),
298 ret = wait_event_timeout
300 vmw_fence_obj_signaled(fence, flags),
303 vmw_seqno_waiter_remove(dev_priv);
305 if (unlikely(ret == 0))
307 else if (likely(ret > 0))
313 void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
315 struct vmw_private *dev_priv = fence->fman->dev_priv;
317 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
320 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
322 struct vmw_fence_manager *fman = fence->fman;
326 * Free kernel space accounting.
328 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
332 int vmw_fence_create(struct vmw_fence_manager *fman,
335 struct vmw_fence_obj **p_fence)
337 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
338 struct vmw_fence_obj *fence;
341 ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
343 if (unlikely(ret != 0))
346 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
347 if (unlikely(fence == NULL)) {
352 ret = vmw_fence_obj_init(fman, fence, seqno, mask,
354 if (unlikely(ret != 0))
363 ttm_mem_global_free(mem_glob, fman->fence_size);
368 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
370 struct vmw_user_fence *ufence =
371 container_of(fence, struct vmw_user_fence, fence);
372 struct vmw_fence_manager *fman = fence->fman;
376 * Free kernel space accounting.
378 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
379 fman->user_fence_size);
382 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
384 struct ttm_base_object *base = *p_base;
385 struct vmw_user_fence *ufence =
386 container_of(base, struct vmw_user_fence, base);
387 struct vmw_fence_obj *fence = &ufence->fence;
390 vmw_fence_obj_unreference(&fence);
393 int vmw_user_fence_create(struct drm_file *file_priv,
394 struct vmw_fence_manager *fman,
397 struct vmw_fence_obj **p_fence,
400 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
401 struct vmw_user_fence *ufence;
402 struct vmw_fence_obj *tmp;
403 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
407 * Kernel memory space accounting, since this object may
408 * be created by a user-space request.
411 ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
413 if (unlikely(ret != 0))
416 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
417 if (unlikely(ufence == NULL)) {
422 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
423 mask, vmw_user_fence_destroy);
424 if (unlikely(ret != 0)) {
430 * The base object holds a reference which is freed in
431 * vmw_user_fence_base_release.
433 tmp = vmw_fence_obj_reference(&ufence->fence);
434 ret = ttm_base_object_init(tfile, &ufence->base, false,
436 &vmw_user_fence_base_release, NULL);
439 if (unlikely(ret != 0)) {
441 * Free the base object's reference
443 vmw_fence_obj_unreference(&tmp);
447 *p_fence = &ufence->fence;
448 *p_handle = ufence->base.hash.key;
452 tmp = &ufence->fence;
453 vmw_fence_obj_unreference(&tmp);
455 ttm_mem_global_free(mem_glob, fman->user_fence_size);
461 * vmw_fence_fifo_down - signal all unsignaled fence objects.
464 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
466 unsigned long irq_flags;
467 struct list_head action_list;
471 * The list may be altered while we traverse it, so always
472 * restart when we've released the fman->lock.
475 spin_lock_irqsave(&fman->lock, irq_flags);
476 fman->fifo_down = true;
477 while (!list_empty(&fman->fence_list)) {
478 struct vmw_fence_obj *fence =
479 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
481 kref_get(&fence->kref);
482 spin_unlock_irq(&fman->lock);
484 ret = vmw_fence_obj_wait(fence, fence->signal_mask,
486 VMW_FENCE_WAIT_TIMEOUT);
488 if (unlikely(ret != 0)) {
489 list_del_init(&fence->head);
490 fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
491 INIT_LIST_HEAD(&action_list);
492 list_splice_init(&fence->seq_passed_actions,
494 vmw_fences_perform_actions(fman, &action_list);
495 wake_up_all(&fence->queue);
498 spin_lock_irq(&fman->lock);
500 BUG_ON(!list_empty(&fence->head));
501 kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
503 spin_unlock_irqrestore(&fman->lock, irq_flags);
506 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
508 unsigned long irq_flags;
510 spin_lock_irqsave(&fman->lock, irq_flags);
511 fman->fifo_down = false;
512 spin_unlock_irqrestore(&fman->lock, irq_flags);
516 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
517 struct drm_file *file_priv)
519 struct drm_vmw_fence_wait_arg *arg =
520 (struct drm_vmw_fence_wait_arg *)data;
521 unsigned long timeout;
522 struct ttm_base_object *base;
523 struct vmw_fence_obj *fence;
524 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
526 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
529 * 64-bit division not present on 32-bit systems, so do an
530 * approximation. (Divide by 1000000).
533 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
534 (wait_timeout >> 26);
536 if (!arg->cookie_valid) {
537 arg->cookie_valid = 1;
538 arg->kernel_cookie = jiffies + wait_timeout;
541 base = ttm_base_object_lookup(tfile, arg->handle);
542 if (unlikely(base == NULL)) {
543 printk(KERN_ERR "Wait invalid fence object handle "
545 (unsigned long)arg->handle);
549 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
552 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
553 ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ?
558 timeout = (unsigned long)arg->kernel_cookie - timeout;
560 ret = vmw_fence_obj_wait(fence, arg->flags, arg->lazy, true, timeout);
563 ttm_base_object_unref(&base);
566 * Optionally unref the fence object.
569 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
570 return ttm_ref_object_base_unref(tfile, arg->handle,
575 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
576 struct drm_file *file_priv)
578 struct drm_vmw_fence_signaled_arg *arg =
579 (struct drm_vmw_fence_signaled_arg *) data;
580 struct ttm_base_object *base;
581 struct vmw_fence_obj *fence;
582 struct vmw_fence_manager *fman;
583 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
584 struct vmw_private *dev_priv = vmw_priv(dev);
586 base = ttm_base_object_lookup(tfile, arg->handle);
587 if (unlikely(base == NULL)) {
588 printk(KERN_ERR "Fence signaled invalid fence object handle "
590 (unsigned long)arg->handle);
594 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
597 arg->signaled = vmw_fence_obj_signaled(fence, arg->flags);
598 spin_lock_irq(&fman->lock);
600 arg->signaled_flags = fence->signaled;
601 arg->passed_seqno = dev_priv->last_read_seqno;
602 spin_unlock_irq(&fman->lock);
604 ttm_base_object_unref(&base);
610 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
611 struct drm_file *file_priv)
613 struct drm_vmw_fence_arg *arg =
614 (struct drm_vmw_fence_arg *) data;
616 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,