reservation: update api and add some helpers
authorMaarten Lankhorst <maarten.lankhorst@canonical.com>
Tue, 1 Jul 2014 10:57:54 +0000 (12:57 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 Jul 2014 20:37:35 +0000 (13:37 -0700)
Move the list of shared fences to a struct, and return it in
reservation_object_get_list().
Add reservation_object_get_excl to get the exclusive fence.

Add reservation_object_reserve_shared(), which reserves space
in the reservation_object for 1 more shared fence.

reservation_object_add_shared_fence() and
reservation_object_add_excl_fence() are used to assign a new
fence to a reservation_object pointer, to complete a reservation.

Changes since v1:
- Add reservation_object_get_excl, reorder code a bit.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Acked-by: Sumit Semwal <sumit.semwal@linaro.org>
Acked-by: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Documentation/DocBook/device-drivers.tmpl
drivers/dma-buf/dma-buf.c
drivers/dma-buf/reservation.c
include/linux/reservation.h

index ed0ef00..dd3f278 100644 (file)
@@ -133,6 +133,7 @@ X!Edrivers/base/interface.c
 !Edrivers/dma-buf/seqno-fence.c
 !Iinclude/linux/fence.h
 !Iinclude/linux/seqno-fence.h
+!Edrivers/dma-buf/reservation.c
 !Iinclude/linux/reservation.h
 !Edrivers/base/dma-coherent.c
 !Edrivers/base/dma-mapping.c
index 25e8c41..cb8379d 100644 (file)
@@ -134,7 +134,10 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
 {
        struct dma_buf *dmabuf;
        struct reservation_object *resv;
+       struct reservation_object_list *fobj;
+       struct fence *fence_excl;
        unsigned long events;
+       unsigned shared_count;
 
        dmabuf = file->private_data;
        if (!dmabuf || !dmabuf->resv)
@@ -150,12 +153,18 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
 
        ww_mutex_lock(&resv->lock, NULL);
 
-       if (resv->fence_excl && (!(events & POLLOUT) ||
-                                resv->fence_shared_count == 0)) {
+       fobj = resv->fence;
+       if (!fobj)
+               goto out;
+
+       shared_count = fobj->shared_count;
+       fence_excl = resv->fence_excl;
+
+       if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) {
                struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
                unsigned long pevents = POLLIN;
 
-               if (resv->fence_shared_count == 0)
+               if (shared_count == 0)
                        pevents |= POLLOUT;
 
                spin_lock_irq(&dmabuf->poll.lock);
@@ -167,19 +176,20 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
                spin_unlock_irq(&dmabuf->poll.lock);
 
                if (events & pevents) {
-                       if (!fence_add_callback(resv->fence_excl,
-                                               &dcb->cb, dma_buf_poll_cb))
+                       if (!fence_add_callback(fence_excl, &dcb->cb,
+                                                      dma_buf_poll_cb)) {
                                events &= ~pevents;
-                       else
+                       } else {
                                /*
                                 * No callback queued, wake up any additional
                                 * waiters.
                                 */
                                dma_buf_poll_cb(NULL, &dcb->cb);
+                       }
                }
        }
 
-       if ((events & POLLOUT) && resv->fence_shared_count > 0) {
+       if ((events & POLLOUT) && shared_count > 0) {
                struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
                int i;
 
@@ -194,15 +204,18 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
                if (!(events & POLLOUT))
                        goto out;
 
-               for (i = 0; i < resv->fence_shared_count; ++i)
-                       if (!fence_add_callback(resv->fence_shared[i],
-                                               &dcb->cb, dma_buf_poll_cb)) {
+               for (i = 0; i < shared_count; ++i) {
+                       struct fence *fence = fobj->shared[i];
+
+                       if (!fence_add_callback(fence, &dcb->cb,
+                                               dma_buf_poll_cb)) {
                                events &= ~POLLOUT;
                                break;
                        }
+               }
 
                /* No callback queued, wake up any additional waiters. */
-               if (i == resv->fence_shared_count)
+               if (i == shared_count)
                        dma_buf_poll_cb(NULL, &dcb->cb);
        }
 
index a73fbf3..e616672 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012-2013 Canonical Ltd
+ * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
  *
  * Based on bo.c which bears the following copyright notice,
  * but is dual licensed:
 
 DEFINE_WW_CLASS(reservation_ww_class);
 EXPORT_SYMBOL(reservation_ww_class);
+
+/*
+ * Reserve space to add a shared fence to a reservation_object,
+ * must be called with obj->lock held.
+ */
+int reservation_object_reserve_shared(struct reservation_object *obj)
+{
+       struct reservation_object_list *fobj, *old;
+       u32 max;
+
+       old = reservation_object_get_list(obj);
+
+       if (old && old->shared_max) {
+               if (old->shared_count < old->shared_max) {
+                       /* perform an in-place update */
+                       kfree(obj->staged);
+                       obj->staged = NULL;
+                       return 0;
+               } else
+                       max = old->shared_max * 2;
+       } else
+               max = 4;
+
+       /*
+        * resize obj->staged or allocate if it doesn't exist,
+        * noop if already correct size
+        */
+       fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
+                       GFP_KERNEL);
+       if (!fobj)
+               return -ENOMEM;
+
+       obj->staged = fobj;
+       fobj->shared_max = max;
+       return 0;
+}
+EXPORT_SYMBOL(reservation_object_reserve_shared);
+
+static void
+reservation_object_add_shared_inplace(struct reservation_object *obj,
+                                     struct reservation_object_list *fobj,
+                                     struct fence *fence)
+{
+       u32 i;
+
+       for (i = 0; i < fobj->shared_count; ++i) {
+               if (fobj->shared[i]->context == fence->context) {
+                       struct fence *old_fence = fobj->shared[i];
+
+                       fence_get(fence);
+
+                       fobj->shared[i] = fence;
+
+                       fence_put(old_fence);
+                       return;
+               }
+       }
+
+       fence_get(fence);
+       fobj->shared[fobj->shared_count] = fence;
+       /*
+        * make the new fence visible before incrementing
+        * fobj->shared_count
+        */
+       smp_wmb();
+       fobj->shared_count++;
+}
+
+static void
+reservation_object_add_shared_replace(struct reservation_object *obj,
+                                     struct reservation_object_list *old,
+                                     struct reservation_object_list *fobj,
+                                     struct fence *fence)
+{
+       unsigned i;
+
+       fence_get(fence);
+
+       if (!old) {
+               fobj->shared[0] = fence;
+               fobj->shared_count = 1;
+               goto done;
+       }
+
+       /*
+        * no need to bump fence refcounts, rcu_read access
+        * requires the use of kref_get_unless_zero, and the
+        * references from the old struct are carried over to
+        * the new.
+        */
+       fobj->shared_count = old->shared_count;
+
+       for (i = 0; i < old->shared_count; ++i) {
+               if (fence && old->shared[i]->context == fence->context) {
+                       fence_put(old->shared[i]);
+                       fobj->shared[i] = fence;
+                       fence = NULL;
+               } else
+                       fobj->shared[i] = old->shared[i];
+       }
+       if (fence)
+               fobj->shared[fobj->shared_count++] = fence;
+
+done:
+       obj->fence = fobj;
+       kfree(old);
+}
+
+/*
+ * Add a fence to a shared slot, obj->lock must be held, and
+ * reservation_object_reserve_shared_fence has been called.
+ */
+void reservation_object_add_shared_fence(struct reservation_object *obj,
+                                        struct fence *fence)
+{
+       struct reservation_object_list *old, *fobj = obj->staged;
+
+       old = reservation_object_get_list(obj);
+       obj->staged = NULL;
+
+       if (!fobj) {
+               BUG_ON(old->shared_count == old->shared_max);
+               reservation_object_add_shared_inplace(obj, old, fence);
+       } else
+               reservation_object_add_shared_replace(obj, old, fobj, fence);
+}
+EXPORT_SYMBOL(reservation_object_add_shared_fence);
+
+void reservation_object_add_excl_fence(struct reservation_object *obj,
+                                      struct fence *fence)
+{
+       struct fence *old_fence = obj->fence_excl;
+       struct reservation_object_list *old;
+       u32 i = 0;
+
+       old = reservation_object_get_list(obj);
+       if (old) {
+               i = old->shared_count;
+               old->shared_count = 0;
+       }
+
+       if (fence)
+               fence_get(fence);
+
+       obj->fence_excl = fence;
+
+       /* inplace update, no shared fences */
+       while (i--)
+               fence_put(old->shared[i]);
+
+       if (old_fence)
+               fence_put(old_fence);
+}
+EXPORT_SYMBOL(reservation_object_add_excl_fence);
index f3f5746..2affe67 100644 (file)
 
 extern struct ww_class reservation_ww_class;
 
+struct reservation_object_list {
+       u32 shared_count, shared_max;
+       struct fence *shared[];
+};
+
 struct reservation_object {
        struct ww_mutex lock;
 
        struct fence *fence_excl;
-       struct fence **fence_shared;
-       u32 fence_shared_count, fence_shared_max;
+       struct reservation_object_list *fence;
+       struct reservation_object_list *staged;
 };
 
+#define reservation_object_assert_held(obj) \
+       lockdep_assert_held(&(obj)->lock.base)
+
 static inline void
 reservation_object_init(struct reservation_object *obj)
 {
        ww_mutex_init(&obj->lock, &reservation_ww_class);
 
-       obj->fence_shared_count = obj->fence_shared_max = 0;
-       obj->fence_shared = NULL;
        obj->fence_excl = NULL;
+       obj->fence = NULL;
+       obj->staged = NULL;
 }
 
 static inline void
 reservation_object_fini(struct reservation_object *obj)
 {
        int i;
+       struct reservation_object_list *fobj;
 
+       /*
+        * This object should be dead and all references must have
+        * been released to it.
+        */
        if (obj->fence_excl)
                fence_put(obj->fence_excl);
-       for (i = 0; i < obj->fence_shared_count; ++i)
-               fence_put(obj->fence_shared[i]);
-       kfree(obj->fence_shared);
+
+       fobj = obj->fence;
+       if (fobj) {
+               for (i = 0; i < fobj->shared_count; ++i)
+                       fence_put(fobj->shared[i]);
+
+               kfree(fobj);
+       }
+       kfree(obj->staged);
 
        ww_mutex_destroy(&obj->lock);
 }
 
+static inline struct reservation_object_list *
+reservation_object_get_list(struct reservation_object *obj)
+{
+       reservation_object_assert_held(obj);
+
+       return obj->fence;
+}
+
+static inline struct fence *
+reservation_object_get_excl(struct reservation_object *obj)
+{
+       reservation_object_assert_held(obj);
+
+       return obj->fence_excl;
+}
+
+int reservation_object_reserve_shared(struct reservation_object *obj);
+void reservation_object_add_shared_fence(struct reservation_object *obj,
+                                        struct fence *fence);
+
+void reservation_object_add_excl_fence(struct reservation_object *obj,
+                                      struct fence *fence);
+
 #endif /* _LINUX_RESERVATION_H */