Merge tag 'drm-intel-fixes-2014-02-14' of ssh://git.freedesktop.org/git/drm-intel...
authorDave Airlie <airlied@redhat.com>
Tue, 18 Feb 2014 06:21:49 +0000 (16:21 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 18 Feb 2014 06:21:49 +0000 (16:21 +1000)
3 fixes plus 1 prep patch, all four cc: stable. Jani will take over from
here and the plan is that he'll do 3.14-fixes for the entire release just
to work things out a bit.

* tag 'drm-intel-fixes-2014-02-14' of ssh://git.freedesktop.org/git/drm-intel:
  drm/i915/dp: add native aux defer retry limit
  drm/i915/dp: increase native aux defer retry timeout
  drm/i915: Prevent MI_DISPLAY_FLIP straddling two cachelines on IVB
  drm/i915: Add intel_ring_cachline_align()

drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index 9fa2434..4c16728 100644 (file)
@@ -8586,6 +8586,20 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
        if (ring->id == RCS)
                len += 6;
 
+       /*
+        * BSpec MI_DISPLAY_FLIP for IVB:
+        * "The full packet must be contained within the same cache line."
+        *
+        * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
+        * cacheline, if we ever start emitting more commands before
+        * the MI_DISPLAY_FLIP we may need to first emit everything else,
+        * then do the cacheline alignment, and finally emit the
+        * MI_DISPLAY_FLIP.
+        */
+       ret = intel_ring_cacheline_align(ring);
+       if (ret)
+               goto err_unpin;
+
        ret = intel_ring_begin(ring, len);
        if (ret)
                goto err_unpin;
index 2f517b8..57552eb 100644 (file)
@@ -537,6 +537,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
        uint8_t msg[20];
        int msg_bytes;
        uint8_t ack;
+       int retry;
 
        if (WARN_ON(send_bytes > 16))
                return -E2BIG;
@@ -548,19 +549,21 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
        msg[3] = send_bytes - 1;
        memcpy(&msg[4], send, send_bytes);
        msg_bytes = send_bytes + 4;
-       for (;;) {
+       for (retry = 0; retry < 7; retry++) {
                ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
                if (ret < 0)
                        return ret;
                ack >>= 4;
                if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
-                       break;
+                       return send_bytes;
                else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
-                       udelay(100);
+                       usleep_range(400, 500);
                else
                        return -EIO;
        }
-       return send_bytes;
+
+       DRM_ERROR("too many retries, giving up\n");
+       return -EIO;
 }
 
 /* Write a single byte to the aux channel in native mode */
@@ -582,6 +585,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
        int reply_bytes;
        uint8_t ack;
        int ret;
+       int retry;
 
        if (WARN_ON(recv_bytes > 19))
                return -E2BIG;
@@ -595,7 +599,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
        msg_bytes = 4;
        reply_bytes = recv_bytes + 1;
 
-       for (;;) {
+       for (retry = 0; retry < 7; retry++) {
                ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
                                      reply, reply_bytes);
                if (ret == 0)
@@ -608,10 +612,13 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
                        return ret - 1;
                }
                else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
-                       udelay(100);
+                       usleep_range(400, 500);
                else
                        return -EIO;
        }
+
+       DRM_ERROR("too many retries, giving up\n");
+       return -EIO;
 }
 
 static int
index b7f1742..31b36c5 100644 (file)
@@ -1653,6 +1653,27 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
        return 0;
 }
 
+/* Align the ring tail to a cacheline boundary */
+int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
+{
+       int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t);
+       int ret;
+
+       if (num_dwords == 0)
+               return 0;
+
+       ret = intel_ring_begin(ring, num_dwords);
+       if (ret)
+               return ret;
+
+       while (num_dwords--)
+               intel_ring_emit(ring, MI_NOOP);
+
+       intel_ring_advance(ring);
+
+       return 0;
+}
+
 void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
index 71a73f4..0b243ce 100644 (file)
@@ -233,6 +233,7 @@ intel_write_status_page(struct intel_ring_buffer *ring,
 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
 
 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
+int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
 static inline void intel_ring_emit(struct intel_ring_buffer *ring,
                                   u32 data)
 {