vmwgfx: Implement fence objects
[pandora-kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_fence.c
1 /**************************************************************************
2  *
3  * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "drmP.h"
29 #include "vmwgfx_drv.h"
30
31 #define VMW_FENCE_WRAP (1 << 31)
32
33 struct vmw_fence_manager {
34         int num_fence_objects;
35         struct vmw_private *dev_priv;
36         spinlock_t lock;
37         u32 next_seqno;
38         struct list_head fence_list;
39         struct work_struct work;
40         u32 user_fence_size;
41         u32 fence_size;
42         bool fifo_down;
43         struct list_head cleanup_list;
44 };
45
46 struct vmw_user_fence {
47         struct ttm_base_object base;
48         struct vmw_fence_obj fence;
49 };
50
51 /**
52  * vmw_fence_destroy_locked
53  *
54  */
55
56 static void vmw_fence_obj_destroy_locked(struct kref *kref)
57 {
58         struct vmw_fence_obj *fence =
59                 container_of(kref, struct vmw_fence_obj, kref);
60
61         struct vmw_fence_manager *fman = fence->fman;
62         unsigned int num_fences;
63
64         list_del_init(&fence->head);
65         num_fences = --fman->num_fence_objects;
66         spin_unlock_irq(&fman->lock);
67         if (fence->destroy)
68                 fence->destroy(fence);
69         else
70                 kfree(fence);
71
72         spin_lock_irq(&fman->lock);
73 }
74
75
76 /**
77  * Execute signal actions on fences recently signaled.
78  * This is done from a workqueue so we don't have to execute
79  * signal actions from atomic context.
80  */
81
82 static void vmw_fence_work_func(struct work_struct *work)
83 {
84         struct vmw_fence_manager *fman =
85                 container_of(work, struct vmw_fence_manager, work);
86         struct list_head list;
87         struct vmw_fence_action *action, *next_action;
88
89         do {
90                 INIT_LIST_HEAD(&list);
91                 spin_lock_irq(&fman->lock);
92                 list_splice_init(&fman->cleanup_list, &list);
93                 spin_unlock_irq(&fman->lock);
94
95                 if (list_empty(&list))
96                         return;
97
98                 /*
99                  * At this point, only we should be able to manipulate the
100                  * list heads of the actions we have on the private list.
101                  */
102
103                 list_for_each_entry_safe(action, next_action, &list, head) {
104                         list_del_init(&action->head);
105                         action->cleanup(action);
106                 }
107         } while (1);
108 }
109
110 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
111 {
112         struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
113
114         if (unlikely(fman == NULL))
115                 return NULL;
116
117         fman->dev_priv = dev_priv;
118         spin_lock_init(&fman->lock);
119         INIT_LIST_HEAD(&fman->fence_list);
120         INIT_LIST_HEAD(&fman->cleanup_list);
121         INIT_WORK(&fman->work, &vmw_fence_work_func);
122         fman->fifo_down = true;
123         fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
124         fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
125
126         return fman;
127 }
128
129 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
130 {
131         unsigned long irq_flags;
132         bool lists_empty;
133
134         (void) cancel_work_sync(&fman->work);
135
136         spin_lock_irqsave(&fman->lock, irq_flags);
137         lists_empty = list_empty(&fman->fence_list) &&
138                 list_empty(&fman->cleanup_list);
139         spin_unlock_irqrestore(&fman->lock, irq_flags);
140
141         BUG_ON(!lists_empty);
142         kfree(fman);
143 }
144
145 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
146                               struct vmw_fence_obj *fence,
147                               u32 seqno,
148                               uint32_t mask,
149                               void (*destroy) (struct vmw_fence_obj *fence))
150 {
151         unsigned long irq_flags;
152         unsigned int num_fences;
153         int ret = 0;
154
155         fence->seqno = seqno;
156         INIT_LIST_HEAD(&fence->seq_passed_actions);
157         fence->fman = fman;
158         fence->signaled = 0;
159         fence->signal_mask = mask;
160         kref_init(&fence->kref);
161         fence->destroy = destroy;
162         init_waitqueue_head(&fence->queue);
163
164         spin_lock_irqsave(&fman->lock, irq_flags);
165         if (unlikely(fman->fifo_down)) {
166                 ret = -EBUSY;
167                 goto out_unlock;
168         }
169         list_add_tail(&fence->head, &fman->fence_list);
170         num_fences = ++fman->num_fence_objects;
171
172 out_unlock:
173         spin_unlock_irqrestore(&fman->lock, irq_flags);
174         return ret;
175
176 }
177
178 struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence)
179 {
180         kref_get(&fence->kref);
181         return fence;
182 }
183
184 /**
185  * vmw_fence_obj_unreference
186  *
187  * Note that this function may not be entered with disabled irqs since
188  * it may re-enable them in the destroy function.
189  *
190  */
191 void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
192 {
193         struct vmw_fence_obj *fence = *fence_p;
194         struct vmw_fence_manager *fman = fence->fman;
195
196         *fence_p = NULL;
197         spin_lock_irq(&fman->lock);
198         BUG_ON(atomic_read(&fence->kref.refcount) == 0);
199         kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
200         spin_unlock_irq(&fman->lock);
201 }
202
203 void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
204                                 struct list_head *list)
205 {
206         struct vmw_fence_action *action, *next_action;
207
208         list_for_each_entry_safe(action, next_action, list, head) {
209                 list_del_init(&action->head);
210                 if (action->seq_passed != NULL)
211                         action->seq_passed(action);
212
213                 /*
214                  * Add the cleanup action to the cleanup list so that
215                  * it will be performed by a worker task.
216                  */
217
218                 if (action->cleanup != NULL)
219                         list_add_tail(&action->head, &fman->cleanup_list);
220         }
221 }
222
223 void vmw_fences_update(struct vmw_fence_manager *fman, u32 seqno)
224 {
225         unsigned long flags;
226         struct vmw_fence_obj *fence, *next_fence;
227         struct list_head action_list;
228
229         spin_lock_irqsave(&fman->lock, flags);
230         list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
231                 if (seqno - fence->seqno < VMW_FENCE_WRAP) {
232                         list_del_init(&fence->head);
233                         fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
234                         INIT_LIST_HEAD(&action_list);
235                         list_splice_init(&fence->seq_passed_actions,
236                                          &action_list);
237                         vmw_fences_perform_actions(fman, &action_list);
238                         wake_up_all(&fence->queue);
239                 }
240
241         }
242         if (!list_empty(&fman->cleanup_list))
243                 (void) schedule_work(&fman->work);
244         spin_unlock_irqrestore(&fman->lock, flags);
245 }
246
247
248 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
249                             uint32_t flags)
250 {
251         struct vmw_fence_manager *fman = fence->fman;
252         unsigned long irq_flags;
253         uint32_t signaled;
254
255         spin_lock_irqsave(&fman->lock, irq_flags);
256         signaled = fence->signaled;
257         spin_unlock_irqrestore(&fman->lock, irq_flags);
258
259         flags &= fence->signal_mask;
260         if ((signaled & flags) == flags)
261                 return 1;
262
263         if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0) {
264                 struct vmw_private *dev_priv = fman->dev_priv;
265                 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
266                 u32 seqno;
267
268                 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
269                 vmw_fences_update(fman, seqno);
270         }
271
272         spin_lock_irqsave(&fman->lock, irq_flags);
273         signaled = fence->signaled;
274         spin_unlock_irqrestore(&fman->lock, irq_flags);
275
276         return ((signaled & flags) == flags);
277 }
278
279 int vmw_fence_obj_wait(struct vmw_fence_obj *fence,
280                        uint32_t flags, bool lazy,
281                        bool interruptible, unsigned long timeout)
282 {
283         struct vmw_private *dev_priv = fence->fman->dev_priv;
284         long ret;
285
286         if (likely(vmw_fence_obj_signaled(fence, flags)))
287                 return 0;
288
289         vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
290         vmw_seqno_waiter_add(dev_priv);
291
292         if (interruptible)
293                 ret = wait_event_interruptible_timeout
294                         (fence->queue,
295                          vmw_fence_obj_signaled(fence, flags),
296                          timeout);
297         else
298                 ret = wait_event_timeout
299                         (fence->queue,
300                          vmw_fence_obj_signaled(fence, flags),
301                          timeout);
302
303         vmw_seqno_waiter_remove(dev_priv);
304
305         if (unlikely(ret == 0))
306                 ret = -EBUSY;
307         else if (likely(ret > 0))
308                 ret = 0;
309
310         return ret;
311 }
312
313 void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
314 {
315         struct vmw_private *dev_priv = fence->fman->dev_priv;
316
317         vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
318 }
319
320 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
321 {
322         struct vmw_fence_manager *fman = fence->fman;
323
324         kfree(fence);
325         /*
326          * Free kernel space accounting.
327          */
328         ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
329                             fman->fence_size);
330 }
331
332 int vmw_fence_create(struct vmw_fence_manager *fman,
333                      uint32_t seqno,
334                      uint32_t mask,
335                      struct vmw_fence_obj **p_fence)
336 {
337         struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
338         struct vmw_fence_obj *fence;
339         int ret;
340
341         ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
342                                    false, false);
343         if (unlikely(ret != 0))
344                 return ret;
345
346         fence = kzalloc(sizeof(*fence), GFP_KERNEL);
347         if (unlikely(fence == NULL)) {
348                 ret = -ENOMEM;
349                 goto out_no_object;
350         }
351
352         ret = vmw_fence_obj_init(fman, fence, seqno, mask,
353                                  vmw_fence_destroy);
354         if (unlikely(ret != 0))
355                 goto out_err_init;
356
357         *p_fence = fence;
358         return 0;
359
360 out_err_init:
361         kfree(fence);
362 out_no_object:
363         ttm_mem_global_free(mem_glob, fman->fence_size);
364         return ret;
365 }
366
367
368 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
369 {
370         struct vmw_user_fence *ufence =
371                 container_of(fence, struct vmw_user_fence, fence);
372         struct vmw_fence_manager *fman = fence->fman;
373
374         kfree(ufence);
375         /*
376          * Free kernel space accounting.
377          */
378         ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
379                             fman->user_fence_size);
380 }
381
382 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
383 {
384         struct ttm_base_object *base = *p_base;
385         struct vmw_user_fence *ufence =
386                 container_of(base, struct vmw_user_fence, base);
387         struct vmw_fence_obj *fence = &ufence->fence;
388
389         *p_base = NULL;
390         vmw_fence_obj_unreference(&fence);
391 }
392
393 int vmw_user_fence_create(struct drm_file *file_priv,
394                           struct vmw_fence_manager *fman,
395                           uint32_t seqno,
396                           uint32_t mask,
397                           struct vmw_fence_obj **p_fence,
398                           uint32_t *p_handle)
399 {
400         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
401         struct vmw_user_fence *ufence;
402         struct vmw_fence_obj *tmp;
403         struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
404         int ret;
405
406         /*
407          * Kernel memory space accounting, since this object may
408          * be created by a user-space request.
409          */
410
411         ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
412                                    false, false);
413         if (unlikely(ret != 0))
414                 return ret;
415
416         ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
417         if (unlikely(ufence == NULL)) {
418                 ret = -ENOMEM;
419                 goto out_no_object;
420         }
421
422         ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
423                                  mask, vmw_user_fence_destroy);
424         if (unlikely(ret != 0)) {
425                 kfree(ufence);
426                 goto out_no_object;
427         }
428
429         /*
430          * The base object holds a reference which is freed in
431          * vmw_user_fence_base_release.
432          */
433         tmp = vmw_fence_obj_reference(&ufence->fence);
434         ret = ttm_base_object_init(tfile, &ufence->base, false,
435                                    VMW_RES_FENCE,
436                                    &vmw_user_fence_base_release, NULL);
437
438
439         if (unlikely(ret != 0)) {
440                 /*
441                  * Free the base object's reference
442                  */
443                 vmw_fence_obj_unreference(&tmp);
444                 goto out_err;
445         }
446
447         *p_fence = &ufence->fence;
448         *p_handle = ufence->base.hash.key;
449
450         return 0;
451 out_err:
452         tmp = &ufence->fence;
453         vmw_fence_obj_unreference(&tmp);
454 out_no_object:
455         ttm_mem_global_free(mem_glob, fman->user_fence_size);
456         return ret;
457 }
458
459
460 /**
461  * vmw_fence_fifo_down - signal all unsignaled fence objects.
462  */
463
464 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
465 {
466         unsigned long irq_flags;
467         struct list_head action_list;
468         int ret;
469
470         /*
471          * The list may be altered while we traverse it, so always
472          * restart when we've released the fman->lock.
473          */
474
475         spin_lock_irqsave(&fman->lock, irq_flags);
476         fman->fifo_down = true;
477         while (!list_empty(&fman->fence_list)) {
478                 struct vmw_fence_obj *fence =
479                         list_entry(fman->fence_list.prev, struct vmw_fence_obj,
480                                    head);
481                 kref_get(&fence->kref);
482                 spin_unlock_irq(&fman->lock);
483
484                 ret = vmw_fence_obj_wait(fence, fence->signal_mask,
485                                          false, false,
486                                          VMW_FENCE_WAIT_TIMEOUT);
487
488                 if (unlikely(ret != 0)) {
489                         list_del_init(&fence->head);
490                         fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
491                         INIT_LIST_HEAD(&action_list);
492                         list_splice_init(&fence->seq_passed_actions,
493                                          &action_list);
494                         vmw_fences_perform_actions(fman, &action_list);
495                         wake_up_all(&fence->queue);
496                 }
497
498                 spin_lock_irq(&fman->lock);
499
500                 BUG_ON(!list_empty(&fence->head));
501                 kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
502         }
503         spin_unlock_irqrestore(&fman->lock, irq_flags);
504 }
505
506 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
507 {
508         unsigned long irq_flags;
509
510         spin_lock_irqsave(&fman->lock, irq_flags);
511         fman->fifo_down = false;
512         spin_unlock_irqrestore(&fman->lock, irq_flags);
513 }
514
515
516 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
517                              struct drm_file *file_priv)
518 {
519         struct drm_vmw_fence_wait_arg *arg =
520             (struct drm_vmw_fence_wait_arg *)data;
521         unsigned long timeout;
522         struct ttm_base_object *base;
523         struct vmw_fence_obj *fence;
524         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
525         int ret;
526         uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
527
528         /*
529          * 64-bit division not present on 32-bit systems, so do an
530          * approximation. (Divide by 1000000).
531          */
532
533         wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
534           (wait_timeout >> 26);
535
536         if (!arg->cookie_valid) {
537                 arg->cookie_valid = 1;
538                 arg->kernel_cookie = jiffies + wait_timeout;
539         }
540
541         base = ttm_base_object_lookup(tfile, arg->handle);
542         if (unlikely(base == NULL)) {
543                 printk(KERN_ERR "Wait invalid fence object handle "
544                        "0x%08lx.\n",
545                        (unsigned long)arg->handle);
546                 return -EINVAL;
547         }
548
549         fence = &(container_of(base, struct vmw_user_fence, base)->fence);
550
551         timeout = jiffies;
552         if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
553                 ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ?
554                        0 : -EBUSY);
555                 goto out;
556         }
557
558         timeout = (unsigned long)arg->kernel_cookie - timeout;
559
560         ret = vmw_fence_obj_wait(fence, arg->flags, arg->lazy, true, timeout);
561
562 out:
563         ttm_base_object_unref(&base);
564
565         /*
566          * Optionally unref the fence object.
567          */
568
569         if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
570                 return ttm_ref_object_base_unref(tfile, arg->handle,
571                                                  TTM_REF_USAGE);
572         return ret;
573 }
574
575 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
576                                  struct drm_file *file_priv)
577 {
578         struct drm_vmw_fence_signaled_arg *arg =
579                 (struct drm_vmw_fence_signaled_arg *) data;
580         struct ttm_base_object *base;
581         struct vmw_fence_obj *fence;
582         struct vmw_fence_manager *fman;
583         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
584         struct vmw_private *dev_priv = vmw_priv(dev);
585
586         base = ttm_base_object_lookup(tfile, arg->handle);
587         if (unlikely(base == NULL)) {
588                 printk(KERN_ERR "Fence signaled invalid fence object handle "
589                        "0x%08lx.\n",
590                        (unsigned long)arg->handle);
591                 return -EINVAL;
592         }
593
594         fence = &(container_of(base, struct vmw_user_fence, base)->fence);
595         fman = fence->fman;
596
597         arg->signaled = vmw_fence_obj_signaled(fence, arg->flags);
598         spin_lock_irq(&fman->lock);
599
600         arg->signaled_flags = fence->signaled;
601         arg->passed_seqno = dev_priv->last_read_seqno;
602         spin_unlock_irq(&fman->lock);
603
604         ttm_base_object_unref(&base);
605
606         return 0;
607 }
608
609
610 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
611                               struct drm_file *file_priv)
612 {
613         struct drm_vmw_fence_arg *arg =
614                 (struct drm_vmw_fence_arg *) data;
615
616         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
617                                          arg->handle,
618                                          TTM_REF_USAGE);
619 }