drm/i915/ringbuffer: Use the HEAD auto-reporting mechanism
[pandora-kernel.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
index d6eba66..390aa21 100644 (file)
@@ -112,10 +112,11 @@ render_ring_flush(struct intel_ring_buffer *ring,
 #if WATCH_EXEC
                DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
 #endif
-               intel_ring_begin(ring, 2);
-               intel_ring_emit(ring, cmd);
-               intel_ring_emit(ring, MI_NOOP);
-               intel_ring_advance(ring);
+               if (intel_ring_begin(ring, 2) == 0) {
+                       intel_ring_emit(ring, cmd);
+                       intel_ring_emit(ring, MI_NOOP);
+                       intel_ring_advance(ring);
+               }
        }
 }
 
@@ -173,11 +174,12 @@ static int init_ring_common(struct intel_ring_buffer *ring)
 
        I915_WRITE_CTL(ring,
                        ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
-                       | RING_NO_REPORT | RING_VALID);
+                       | RING_REPORT_64K | RING_VALID);
 
-       head = I915_READ_HEAD(ring) & HEAD_ADDR;
        /* If the head is still not zero, the ring is dead */
-       if (head != 0) {
+       if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
+           I915_READ_START(ring) != obj_priv->gtt_offset ||
+           (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
                DRM_ERROR("%s initialization failed "
                                "ctl %08x head %08x tail %08x start %08x\n",
                                ring->name,
@@ -233,27 +235,28 @@ do {                                                                      \
  *
  * Returned sequence numbers are nonzero on success.
  */
-static u32
+static int
 render_ring_add_request(struct intel_ring_buffer *ring,
-                       u32 flush_domains)
+                       u32 *result)
 {
        struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       u32 seqno;
-
-       seqno = i915_gem_get_seqno(dev);
+       u32 seqno = i915_gem_get_seqno(dev);
+       int ret;
 
        if (IS_GEN6(dev)) {
-               intel_ring_begin(ring, 6);
+               ret = intel_ring_begin(ring, 6);
+               if (ret)
+                   return ret;
+
                intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | 3);
                intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE |
-                        PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
-                        PIPE_CONTROL_NOTIFY);
+                               PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
+                               PIPE_CONTROL_NOTIFY);
                intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
                intel_ring_emit(ring, seqno);
                intel_ring_emit(ring, 0);
                intel_ring_emit(ring, 0);
-               intel_ring_advance(ring);
        } else if (HAS_PIPE_CONTROL(dev)) {
                u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
 
@@ -262,9 +265,12 @@ render_ring_add_request(struct intel_ring_buffer *ring,
                 * PIPE_NOTIFY buffers out to memory before requesting
                 * an interrupt.
                 */
-               intel_ring_begin(ring, 32);
+               ret = intel_ring_begin(ring, 32);
+               if (ret)
+                       return ret;
+
                intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
-                        PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+                               PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
                intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
                intel_ring_emit(ring, seqno);
                intel_ring_emit(ring, 0);
@@ -280,22 +286,26 @@ render_ring_add_request(struct intel_ring_buffer *ring,
                scratch_addr += 128;
                PIPE_CONTROL_FLUSH(ring, scratch_addr);
                intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
-                        PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
-                        PIPE_CONTROL_NOTIFY);
+                               PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+                               PIPE_CONTROL_NOTIFY);
                intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
                intel_ring_emit(ring, seqno);
                intel_ring_emit(ring, 0);
-               intel_ring_advance(ring);
        } else {
-               intel_ring_begin(ring, 4);
+               ret = intel_ring_begin(ring, 4);
+               if (ret)
+                   return ret;
+
                intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
                intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
                intel_ring_emit(ring, seqno);
 
                intel_ring_emit(ring, MI_USER_INTERRUPT);
-               intel_ring_advance(ring);
        }
-       return seqno;
+
+       intel_ring_advance(ring);
+       *result = seqno;
+       return 0;
 }
 
 static u32
@@ -359,21 +369,26 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
               u32     invalidate_domains,
               u32     flush_domains)
 {
-       intel_ring_begin(ring, 2);
-       intel_ring_emit(ring, MI_FLUSH);
-       intel_ring_emit(ring, MI_NOOP);
-       intel_ring_advance(ring);
+       if (intel_ring_begin(ring, 2) == 0) {
+               intel_ring_emit(ring, MI_FLUSH);
+               intel_ring_emit(ring, MI_NOOP);
+               intel_ring_advance(ring);
+       }
 }
 
-static u32
+static int
 ring_add_request(struct intel_ring_buffer *ring,
-                u32 flush_domains)
+                u32 *result)
 {
        u32 seqno;
+       int ret;
+
+       ret = intel_ring_begin(ring, 4);
+       if (ret)
+               return ret;
 
        seqno = i915_gem_get_seqno(ring->dev);
 
-       intel_ring_begin(ring, 4);
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
        intel_ring_emit(ring, seqno);
@@ -381,8 +396,8 @@ ring_add_request(struct intel_ring_buffer *ring,
        intel_ring_advance(ring);
 
        DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
-
-       return seqno;
+       *result = seqno;
+       return 0;
 }
 
 static void
@@ -409,10 +424,14 @@ ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
                         uint64_t exec_offset)
 {
        uint32_t exec_start;
+       int ret;
 
        exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
 
-       intel_ring_begin(ring, 2);
+       ret = intel_ring_begin(ring, 2);
+       if (ret)
+               return ret;
+
        intel_ring_emit(ring,
                        MI_BATCH_BUFFER_START |
                        (2 << 6) |
@@ -432,8 +451,8 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
        struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        int nbox = exec->num_cliprects;
-       int i = 0, count;
        uint32_t exec_start, exec_len;
+       int i, count, ret;
 
        exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
        exec_len = (uint32_t) exec->batch_len;
@@ -441,23 +460,28 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
        trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
 
        count = nbox ? nbox : 1;
-
        for (i = 0; i < count; i++) {
                if (i < nbox) {
-                       int ret = i915_emit_box(dev, cliprects, i,
-                                               exec->DR1, exec->DR4);
+                       ret = i915_emit_box(dev, cliprects, i,
+                                           exec->DR1, exec->DR4);
                        if (ret)
                                return ret;
                }
 
                if (IS_I830(dev) || IS_845G(dev)) {
-                       intel_ring_begin(ring, 4);
+                       ret = intel_ring_begin(ring, 4);
+                       if (ret)
+                               return ret;
+
                        intel_ring_emit(ring, MI_BATCH_BUFFER);
                        intel_ring_emit(ring, exec_start | MI_BATCH_NON_SECURE);
                        intel_ring_emit(ring, exec_start + exec_len - 4);
                        intel_ring_emit(ring, 0);
                } else {
-                       intel_ring_begin(ring, 2);
+                       ret = intel_ring_begin(ring, 2);
+                       if (ret)
+                               return ret;
+
                        if (INTEL_INFO(dev)->gen >= 4) {
                                intel_ring_emit(ring,
                                                MI_BATCH_BUFFER_START | (2 << 6)
@@ -474,12 +498,13 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
        }
 
        if (IS_G4X(dev) || IS_GEN5(dev)) {
-               intel_ring_begin(ring, 2);
-               intel_ring_emit(ring, MI_FLUSH |
-                               MI_NO_WRITE_FLUSH |
-                               MI_INVALIDATE_ISP );
-               intel_ring_emit(ring, MI_NOOP);
-               intel_ring_advance(ring);
+               if (intel_ring_begin(ring, 2) == 0) {
+                       intel_ring_emit(ring, MI_FLUSH |
+                                       MI_NO_WRITE_FLUSH |
+                                       MI_INVALIDATE_ISP );
+                       intel_ring_emit(ring, MI_NOOP);
+                       intel_ring_advance(ring);
+               }
        }
        /* XXX breadcrumb */
 
@@ -522,7 +547,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
        obj_priv = to_intel_bo(obj);
        obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
 
-       ret = i915_gem_object_pin(obj, 4096);
+       ret = i915_gem_object_pin(obj, 4096, true, false);
        if (ret != 0) {
                goto err_unref;
        }
@@ -553,7 +578,6 @@ err:
 int intel_init_ring_buffer(struct drm_device *dev,
                           struct intel_ring_buffer *ring)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv;
        struct drm_gem_object *obj;
        int ret;
@@ -578,7 +602,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
 
        ring->gem_object = obj;
 
-       ret = i915_gem_object_pin(obj, PAGE_SIZE);
+       ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
        if (ret)
                goto err_unref;
 
@@ -601,16 +625,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
        if (ret)
                goto err_unmap;
 
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               i915_kernel_lost_context(dev);
-       else {
-               ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
-               ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-               ring->space = ring->head - (ring->tail + 8);
-               if (ring->space < 0)
-                       ring->space += ring->size;
-       }
-       return ret;
+       return 0;
 
 err_unmap:
        drm_core_ioremapfree(&ring->map, dev);
@@ -626,9 +641,17 @@ err_hws:
 
 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
 {
+       struct drm_i915_private *dev_priv;
+       int ret;
+
        if (ring->gem_object == NULL)
                return;
 
+       /* Disable the ring buffer. The ring must be idle at this point */
+       dev_priv = ring->dev->dev_private;
+       ret = intel_wait_ring_buffer(ring, ring->size - 8);
+       I915_WRITE_CTL(ring, 0);
+
        drm_core_ioremapfree(&ring->map, ring->dev);
 
        i915_gem_object_unpin(ring->gem_object);
@@ -668,6 +691,17 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
        struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        unsigned long end;
+       u32 head;
+
+       head = intel_read_status_page(ring, 4);
+       if (head) {
+               ring->head = head & HEAD_ADDR;
+               ring->space = ring->head - (ring->tail + 8);
+               if (ring->space < 0)
+                       ring->space += ring->size;
+               if (ring->space >= n)
+                       return 0;
+       }
 
        trace_i915_ring_wait_begin (dev);
        end = jiffies + 3 * HZ;
@@ -688,23 +722,33 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
                }
 
                msleep(1);
+               if (atomic_read(&dev_priv->mm.wedged))
+                       return -EAGAIN;
        } while (!time_after(jiffies, end));
        trace_i915_ring_wait_end (dev);
        return -EBUSY;
 }
 
-void intel_ring_begin(struct intel_ring_buffer *ring,
-                     int num_dwords)
+int intel_ring_begin(struct intel_ring_buffer *ring,
+                    int num_dwords)
 {
        int n = 4*num_dwords;
+       int ret;
 
-       if (unlikely(ring->tail + n > ring->size))
-               intel_wrap_ring_buffer(ring);
+       if (unlikely(ring->tail + n > ring->size)) {
+               ret = intel_wrap_ring_buffer(ring);
+               if (unlikely(ret))
+                       return ret;
+       }
 
-       if (unlikely(ring->space < n))
-               intel_wait_ring_buffer(ring, n);
+       if (unlikely(ring->space < n)) {
+               ret = intel_wait_ring_buffer(ring, n);
+               if (unlikely(ret))
+                       return ret;
+       }
 
        ring->space -= n;
+       return 0;
 }
 
 void intel_ring_advance(struct intel_ring_buffer *ring)
@@ -772,12 +816,13 @@ static void gen6_ring_flush(struct intel_ring_buffer *ring,
                            u32 invalidate_domains,
                            u32 flush_domains)
 {
-       intel_ring_begin(ring, 4);
-       intel_ring_emit(ring, MI_FLUSH_DW);
-       intel_ring_emit(ring, 0);
-       intel_ring_emit(ring, 0);
-       intel_ring_emit(ring, 0);
-       intel_ring_advance(ring);
+       if (intel_ring_begin(ring, 4) == 0) {
+               intel_ring_emit(ring, MI_FLUSH_DW);
+               intel_ring_emit(ring, 0);
+               intel_ring_emit(ring, 0);
+               intel_ring_emit(ring, 0);
+               intel_ring_advance(ring);
+       }
 }
 
 static int
@@ -787,10 +832,14 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
                              uint64_t exec_offset)
 {
        uint32_t exec_start;
+       int ret;
 
        exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
 
-       intel_ring_begin(ring, 2);
+       ret = intel_ring_begin(ring, 2);
+       if (ret)
+              return ret;
+
        intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
        /* bit0-7 is the length on GEN6+ */
        intel_ring_emit(ring, exec_start);