drm/i915: Use a device flag for non-interruptible phases
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
index dcfdf41..71a4a3b 100644 (file)
@@ -282,21 +282,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 
        target_offset = to_intel_bo(target_obj)->gtt_offset;
 
-#if WATCH_RELOC
-       DRM_INFO("%s: obj %p offset %08x target %d "
-                "read %08x write %08x gtt %08x "
-                "presumed %08x delta %08x\n",
-                __func__,
-                obj,
-                (int) reloc->offset,
-                (int) reloc->target_handle,
-                (int) reloc->read_domains,
-                (int) reloc->write_domain,
-                (int) target_offset,
-                (int) reloc->presumed_offset,
-                reloc->delta);
-#endif
-
        /* The target buffer should have appeared before us in the
         * exec_object list, so it should have a GTT space bound by now.
         */
@@ -575,7 +560,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 
                        if (has_fenced_gpu_access) {
                                if (need_fence) {
-                                       ret = i915_gem_object_get_fence(obj, ring, 1);
+                                       ret = i915_gem_object_get_fence(obj, ring);
                                        if (ret)
                                                break;
                                } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
@@ -690,11 +675,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
        /* reacquire the objects */
        eb_reset(eb);
        for (i = 0; i < count; i++) {
-               struct drm_i915_gem_object *obj;
-
                obj = to_intel_bo(drm_gem_object_lookup(dev, file,
                                                        exec[i].handle));
-               if (obj == NULL) {
+               if (&obj->base == NULL) {
                        DRM_ERROR("Invalid object handle %d at index %d\n",
                                   exec[i].handle, i);
                        ret = -ENOENT;
@@ -749,8 +732,7 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
        if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
                for (i = 0; i < I915_NUM_RINGS; i++)
                        if (flush_rings & (1 << i)) {
-                               ret = i915_gem_flush_ring(dev,
-                                                         &dev_priv->ring[i],
+                               ret = i915_gem_flush_ring(&dev_priv->ring[i],
                                                          invalidate_domains,
                                                          flush_domains);
                                if (ret)
@@ -774,7 +756,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
 
        /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */
        if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev))
-               return i915_gem_object_wait_rendering(obj, true);
+               return i915_gem_object_wait_rendering(obj);
 
        idx = intel_ring_sync_index(from, to);
 
@@ -789,7 +771,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
                if (request == NULL)
                        return -ENOMEM;
 
-               ret = i915_add_request(obj->base.dev, NULL, request, from);
+               ret = i915_add_request(from, NULL, request);
                if (ret) {
                        kfree(request);
                        return ret;
@@ -817,12 +799,6 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
                i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
 
        if (cd.invalidate_domains | cd.flush_domains) {
-#if WATCH_EXEC
-               DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
-                         __func__,
-                        cd.invalidate_domains,
-                        cd.flush_domains);
-#endif
                ret = i915_gem_execbuffer_flush(ring->dev,
                                                cd.invalidate_domains,
                                                cd.flush_domains,
@@ -926,6 +902,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
        struct drm_i915_gem_object *obj;
 
        list_for_each_entry(obj, objects, exec_list) {
+                 u32 old_read = obj->base.read_domains;
+                 u32 old_write = obj->base.write_domain;
+
+
                obj->base.read_domains = obj->base.pending_read_domains;
                obj->base.write_domain = obj->base.pending_write_domain;
                obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
@@ -939,9 +919,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
                        intel_mark_busy(ring->dev, obj);
                }
 
-               trace_i915_gem_object_change_domain(obj,
-                                                   obj->base.read_domains,
-                                                   obj->base.write_domain);
+               trace_i915_gem_object_change_domain(obj, old_read, old_write);
        }
 }
 
@@ -963,14 +941,14 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
        if (INTEL_INFO(dev)->gen >= 4)
                invalidate |= I915_GEM_DOMAIN_SAMPLER;
        if (ring->flush(ring, invalidate, 0)) {
-               i915_gem_next_request_seqno(dev, ring);
+               i915_gem_next_request_seqno(ring);
                return;
        }
 
        /* Add a breadcrumb for the completion of the batch buffer */
        request = kzalloc(sizeof(*request), GFP_KERNEL);
-       if (request == NULL || i915_add_request(dev, file, request, ring)) {
-               i915_gem_next_request_seqno(dev, ring);
+       if (request == NULL || i915_add_request(ring, file, request)) {
+               i915_gem_next_request_seqno(ring);
                kfree(request);
        }
 }
@@ -1000,10 +978,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-#if WATCH_EXEC
-       DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
-                 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
        switch (args->flags & I915_EXEC_RING_MASK) {
        case I915_EXEC_DEFAULT:
        case I915_EXEC_RENDER:
@@ -1113,7 +1087,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
                obj = to_intel_bo(drm_gem_object_lookup(dev, file,
                                                        exec[i].handle));
-               if (obj == NULL) {
+               if (&obj->base == NULL) {
                        DRM_ERROR("Invalid object handle %d at index %d\n",
                                   exec[i].handle, i);
                        /* prevent error path from reading uninitialized data */
@@ -1174,8 +1148,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (ret)
                goto err;
 
-       seqno = i915_gem_next_request_seqno(dev, ring);
-       for (i = 0; i < I915_NUM_RINGS-1; i++) {
+       seqno = i915_gem_next_request_seqno(ring);
+       for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
                if (seqno < ring->sync_seqno[i]) {
                        /* The GPU can not handle its semaphore value wrapping,
                         * so every billion or so execbuffers, we need to stall
@@ -1189,6 +1163,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                }
        }
 
+       trace_i915_gem_ring_dispatch(ring, seqno);
+
        exec_start = batch_obj->gtt_offset + args->batch_start_offset;
        exec_len = args->batch_len;
        if (cliprects) {
@@ -1245,11 +1221,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
        struct drm_i915_gem_exec_object2 *exec2_list = NULL;
        int ret, i;
 
-#if WATCH_EXEC
-       DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
-                 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
        if (args->buffer_count < 1) {
                DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
                return -EINVAL;
@@ -1330,17 +1301,16 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
        struct drm_i915_gem_exec_object2 *exec2_list = NULL;
        int ret;
 
-#if WATCH_EXEC
-       DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
-                 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
        if (args->buffer_count < 1) {
                DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
                return -EINVAL;
        }
 
-       exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+       exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
+                            GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+       if (exec2_list == NULL)
+               exec2_list = drm_malloc_ab(sizeof(*exec2_list),
+                                          args->buffer_count);
        if (exec2_list == NULL) {
                DRM_ERROR("Failed to allocate exec list for %d buffers\n",
                          args->buffer_count);