Merge tag 'v3.4-rc3' into drm-intel-next-queued
[pandora-kernel.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
index f75806e..9d4e5f0 100644 (file)
@@ -290,9 +290,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
                        | RING_VALID);
 
        /* If the head is still not zero, the ring is dead */
-       if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
-           I915_READ_START(ring) != obj->gtt_offset ||
-           (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
+       if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
+                    I915_READ_START(ring) == obj->gtt_offset &&
+                    (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
                DRM_ERROR("%s initialization failed "
                                "ctl %08x head %08x tail %08x start %08x\n",
                                ring->name,
@@ -472,21 +472,30 @@ gen6_add_request(struct intel_ring_buffer *ring,
  * @seqno - seqno which the waiter will block on
  */
 static int
-intel_ring_sync(struct intel_ring_buffer *waiter,
-               struct intel_ring_buffer *signaller,
-               int ring,
-               u32 seqno)
+gen6_ring_sync(struct intel_ring_buffer *waiter,
+              struct intel_ring_buffer *signaller,
+              u32 seqno)
 {
        int ret;
        u32 dw1 = MI_SEMAPHORE_MBOX |
                  MI_SEMAPHORE_COMPARE |
                  MI_SEMAPHORE_REGISTER;
 
+       /* Throughout all of the GEM code, seqno passed implies our current
+        * seqno is >= the last seqno executed. However for hardware the
+        * comparison is strictly greater than.
+        */
+       seqno -= 1;
+
+       WARN_ON(signaller->semaphore_register[waiter->id] ==
+               MI_SEMAPHORE_SYNC_INVALID);
+
        ret = intel_ring_begin(waiter, 4);
        if (ret)
                return ret;
 
-       intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
+       intel_ring_emit(waiter,
+                       dw1 | signaller->semaphore_register[waiter->id]);
        intel_ring_emit(waiter, seqno);
        intel_ring_emit(waiter, 0);
        intel_ring_emit(waiter, MI_NOOP);
@@ -495,47 +504,6 @@ intel_ring_sync(struct intel_ring_buffer *waiter,
        return 0;
 }
 
-/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
-int
-render_ring_sync_to(struct intel_ring_buffer *waiter,
-                   struct intel_ring_buffer *signaller,
-                   u32 seqno)
-{
-       WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
-       return intel_ring_sync(waiter,
-                              signaller,
-                              RCS,
-                              seqno);
-}
-
-/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
-int
-gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
-                     struct intel_ring_buffer *signaller,
-                     u32 seqno)
-{
-       WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
-       return intel_ring_sync(waiter,
-                              signaller,
-                              VCS,
-                              seqno);
-}
-
-/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
-int
-gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
-                     struct intel_ring_buffer *signaller,
-                     u32 seqno)
-{
-       WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
-       return intel_ring_sync(waiter,
-                              signaller,
-                              BCS,
-                              seqno);
-}
-
-
-
 #define PIPE_CONTROL_FLUSH(ring__, addr__)                                     \
 do {                                                                   \
        intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |                \
@@ -597,27 +565,6 @@ pc_render_add_request(struct intel_ring_buffer *ring,
        return 0;
 }
 
-static int
-render_ring_add_request(struct intel_ring_buffer *ring,
-                       u32 *result)
-{
-       u32 seqno = i915_gem_next_request_seqno(ring);
-       int ret;
-
-       ret = intel_ring_begin(ring, 4);
-       if (ret)
-               return ret;
-
-       intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
-       intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring, seqno);
-       intel_ring_emit(ring, MI_USER_INTERRUPT);
-       intel_ring_advance(ring);
-
-       *result = seqno;
-       return 0;
-}
-
 static u32
 gen6_ring_get_seqno(struct intel_ring_buffer *ring)
 {
@@ -644,40 +591,43 @@ pc_render_get_seqno(struct intel_ring_buffer *ring)
        return pc->cpu_page[0];
 }
 
-static void
-ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+static bool
+gen5_ring_get_irq(struct intel_ring_buffer *ring)
 {
-       dev_priv->gt_irq_mask &= ~mask;
-       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-       POSTING_READ(GTIMR);
-}
+       struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
 
-static void
-ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
-{
-       dev_priv->gt_irq_mask |= mask;
-       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-       POSTING_READ(GTIMR);
-}
+       if (!dev->irq_enabled)
+               return false;
 
-static void
-i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
-{
-       dev_priv->irq_mask &= ~mask;
-       I915_WRITE(IMR, dev_priv->irq_mask);
-       POSTING_READ(IMR);
+       spin_lock(&ring->irq_lock);
+       if (ring->irq_refcount++ == 0) {
+               dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+               I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+               POSTING_READ(GTIMR);
+       }
+       spin_unlock(&ring->irq_lock);
+
+       return true;
 }
 
 static void
-i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+gen5_ring_put_irq(struct intel_ring_buffer *ring)
 {
-       dev_priv->irq_mask |= mask;
-       I915_WRITE(IMR, dev_priv->irq_mask);
-       POSTING_READ(IMR);
+       struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       spin_lock(&ring->irq_lock);
+       if (--ring->irq_refcount == 0) {
+               dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+               I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+               POSTING_READ(GTIMR);
+       }
+       spin_unlock(&ring->irq_lock);
 }
 
 static bool
-render_ring_get_irq(struct intel_ring_buffer *ring)
+i9xx_ring_get_irq(struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -687,11 +637,9 @@ render_ring_get_irq(struct intel_ring_buffer *ring)
 
        spin_lock(&ring->irq_lock);
        if (ring->irq_refcount++ == 0) {
-               if (HAS_PCH_SPLIT(dev))
-                       ironlake_enable_irq(dev_priv,
-                                           GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
-               else
-                       i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+               dev_priv->irq_mask &= ~ring->irq_enable_mask;
+               I915_WRITE(IMR, dev_priv->irq_mask);
+               POSTING_READ(IMR);
        }
        spin_unlock(&ring->irq_lock);
 
@@ -699,19 +647,16 @@ render_ring_get_irq(struct intel_ring_buffer *ring)
 }
 
 static void
-render_ring_put_irq(struct intel_ring_buffer *ring)
+i9xx_ring_put_irq(struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
 
        spin_lock(&ring->irq_lock);
        if (--ring->irq_refcount == 0) {
-               if (HAS_PCH_SPLIT(dev))
-                       ironlake_disable_irq(dev_priv,
-                                            GT_USER_INTERRUPT |
-                                            GT_PIPE_NOTIFY);
-               else
-                       i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+               dev_priv->irq_mask |= ring->irq_enable_mask;
+               I915_WRITE(IMR, dev_priv->irq_mask);
+               POSTING_READ(IMR);
        }
        spin_unlock(&ring->irq_lock);
 }
@@ -765,7 +710,7 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
 }
 
 static int
-ring_add_request(struct intel_ring_buffer *ring,
+i9xx_add_request(struct intel_ring_buffer *ring,
                 u32 *result)
 {
        u32 seqno;
@@ -788,7 +733,7 @@ ring_add_request(struct intel_ring_buffer *ring,
 }
 
 static bool
-gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+gen6_ring_get_irq(struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -803,9 +748,10 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
 
        spin_lock(&ring->irq_lock);
        if (ring->irq_refcount++ == 0) {
-               ring->irq_mask &= ~rflag;
-               I915_WRITE_IMR(ring, ring->irq_mask);
-               ironlake_enable_irq(dev_priv, gflag);
+               I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+               dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+               I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+               POSTING_READ(GTIMR);
        }
        spin_unlock(&ring->irq_lock);
 
@@ -813,60 +759,25 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
 }
 
 static void
-gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+gen6_ring_put_irq(struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
 
        spin_lock(&ring->irq_lock);
        if (--ring->irq_refcount == 0) {
-               ring->irq_mask |= rflag;
-               I915_WRITE_IMR(ring, ring->irq_mask);
-               ironlake_disable_irq(dev_priv, gflag);
+               I915_WRITE_IMR(ring, ~0);
+               dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+               I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+               POSTING_READ(GTIMR);
        }
        spin_unlock(&ring->irq_lock);
 
        gen6_gt_force_wake_put(dev_priv);
 }
 
-static bool
-bsd_ring_get_irq(struct intel_ring_buffer *ring)
-{
-       struct drm_device *dev = ring->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-
-       if (!dev->irq_enabled)
-               return false;
-
-       spin_lock(&ring->irq_lock);
-       if (ring->irq_refcount++ == 0) {
-               if (IS_G4X(dev))
-                       i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
-               else
-                       ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
-       }
-       spin_unlock(&ring->irq_lock);
-
-       return true;
-}
-static void
-bsd_ring_put_irq(struct intel_ring_buffer *ring)
-{
-       struct drm_device *dev = ring->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-
-       spin_lock(&ring->irq_lock);
-       if (--ring->irq_refcount == 0) {
-               if (IS_G4X(dev))
-                       i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
-               else
-                       ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
-       }
-       spin_unlock(&ring->irq_lock);
-}
-
 static int
-ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
+i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
 {
        int ret;
 
@@ -884,37 +795,36 @@ ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
 }
 
 static int
-render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
                                u32 offset, u32 len)
 {
-       struct drm_device *dev = ring->dev;
        int ret;
 
-       if (IS_I830(dev) || IS_845G(dev)) {
-               ret = intel_ring_begin(ring, 4);
-               if (ret)
-                       return ret;
+       ret = intel_ring_begin(ring, 4);
+       if (ret)
+               return ret;
 
-               intel_ring_emit(ring, MI_BATCH_BUFFER);
-               intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
-               intel_ring_emit(ring, offset + len - 8);
-               intel_ring_emit(ring, 0);
-       } else {
-               ret = intel_ring_begin(ring, 2);
-               if (ret)
-                       return ret;
+       intel_ring_emit(ring, MI_BATCH_BUFFER);
+       intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+       intel_ring_emit(ring, offset + len - 8);
+       intel_ring_emit(ring, 0);
+       intel_ring_advance(ring);
 
-               if (INTEL_INFO(dev)->gen >= 4) {
-                       intel_ring_emit(ring,
-                                       MI_BATCH_BUFFER_START | (2 << 6) |
-                                       MI_BATCH_NON_SECURE_I965);
-                       intel_ring_emit(ring, offset);
-               } else {
-                       intel_ring_emit(ring,
-                                       MI_BATCH_BUFFER_START | (2 << 6));
-                       intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
-               }
-       }
+       return 0;
+}
+
+static int
+i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
+                               u32 offset, u32 len)
+{
+       int ret;
+
+       ret = intel_ring_begin(ring, 2);
+       if (ret)
+               return ret;
+
+       intel_ring_emit(ring, MI_BATCH_BUFFER_START | (2 << 6));
+       intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
        intel_ring_advance(ring);
 
        return 0;
@@ -991,10 +901,10 @@ int intel_init_ring_buffer(struct drm_device *dev,
        INIT_LIST_HEAD(&ring->active_list);
        INIT_LIST_HEAD(&ring->request_list);
        INIT_LIST_HEAD(&ring->gpu_write_list);
+       ring->size = 32 * PAGE_SIZE;
 
        init_waitqueue_head(&ring->irq_queue);
        spin_lock_init(&ring->irq_lock);
-       ring->irq_mask = ~0;
 
        if (I915_NEED_GFX_HWS(dev)) {
                ret = init_status_page(ring);
@@ -1261,44 +1171,6 @@ void intel_ring_advance(struct intel_ring_buffer *ring)
        ring->write_tail(ring, ring->tail);
 }
 
-static const struct intel_ring_buffer render_ring = {
-       .name                   = "render ring",
-       .id                     = RCS,
-       .mmio_base              = RENDER_RING_BASE,
-       .size                   = 32 * PAGE_SIZE,
-       .init                   = init_render_ring,
-       .write_tail             = ring_write_tail,
-       .flush                  = render_ring_flush,
-       .add_request            = render_ring_add_request,
-       .get_seqno              = ring_get_seqno,
-       .irq_get                = render_ring_get_irq,
-       .irq_put                = render_ring_put_irq,
-       .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
-       .cleanup                = render_ring_cleanup,
-       .sync_to                = render_ring_sync_to,
-       .semaphore_register     = {MI_SEMAPHORE_SYNC_INVALID,
-                                  MI_SEMAPHORE_SYNC_RV,
-                                  MI_SEMAPHORE_SYNC_RB},
-       .signal_mbox            = {GEN6_VRSYNC, GEN6_BRSYNC},
-};
-
-/* ring buffer for bit-stream decoder */
-
-static const struct intel_ring_buffer bsd_ring = {
-       .name                   = "bsd ring",
-       .id                     = VCS,
-       .mmio_base              = BSD_RING_BASE,
-       .size                   = 32 * PAGE_SIZE,
-       .init                   = init_ring_common,
-       .write_tail             = ring_write_tail,
-       .flush                  = bsd_ring_flush,
-       .add_request            = ring_add_request,
-       .get_seqno              = ring_get_seqno,
-       .irq_get                = bsd_ring_get_irq,
-       .irq_put                = bsd_ring_put_irq,
-       .dispatch_execbuffer    = ring_dispatch_execbuffer,
-};
-
 
 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
                                     u32 value)
@@ -1361,77 +1233,8 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
        return 0;
 }
 
-static bool
-gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
-{
-       return gen6_ring_get_irq(ring,
-                                GT_USER_INTERRUPT,
-                                GEN6_RENDER_USER_INTERRUPT);
-}
-
-static void
-gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
-{
-       return gen6_ring_put_irq(ring,
-                                GT_USER_INTERRUPT,
-                                GEN6_RENDER_USER_INTERRUPT);
-}
-
-static bool
-gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
-{
-       return gen6_ring_get_irq(ring,
-                                GT_GEN6_BSD_USER_INTERRUPT,
-                                GEN6_BSD_USER_INTERRUPT);
-}
-
-static void
-gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
-{
-       return gen6_ring_put_irq(ring,
-                                GT_GEN6_BSD_USER_INTERRUPT,
-                                GEN6_BSD_USER_INTERRUPT);
-}
-
-/* ring buffer for Video Codec for Gen6+ */
-static const struct intel_ring_buffer gen6_bsd_ring = {
-       .name                   = "gen6 bsd ring",
-       .id                     = VCS,
-       .mmio_base              = GEN6_BSD_RING_BASE,
-       .size                   = 32 * PAGE_SIZE,
-       .init                   = init_ring_common,
-       .write_tail             = gen6_bsd_ring_write_tail,
-       .flush                  = gen6_ring_flush,
-       .add_request            = gen6_add_request,
-       .get_seqno              = gen6_ring_get_seqno,
-       .irq_get                = gen6_bsd_ring_get_irq,
-       .irq_put                = gen6_bsd_ring_put_irq,
-       .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
-       .sync_to                = gen6_bsd_ring_sync_to,
-       .semaphore_register     = {MI_SEMAPHORE_SYNC_VR,
-                                  MI_SEMAPHORE_SYNC_INVALID,
-                                  MI_SEMAPHORE_SYNC_VB},
-       .signal_mbox            = {GEN6_RVSYNC, GEN6_BVSYNC},
-};
-
 /* Blitter support (SandyBridge+) */
 
-static bool
-blt_ring_get_irq(struct intel_ring_buffer *ring)
-{
-       return gen6_ring_get_irq(ring,
-                                GT_BLT_USER_INTERRUPT,
-                                GEN6_BLITTER_USER_INTERRUPT);
-}
-
-static void
-blt_ring_put_irq(struct intel_ring_buffer *ring)
-{
-       gen6_ring_put_irq(ring,
-                         GT_BLT_USER_INTERRUPT,
-                         GEN6_BLITTER_USER_INTERRUPT);
-}
-
 static int blt_ring_flush(struct intel_ring_buffer *ring,
                          u32 invalidate, u32 flush)
 {
@@ -1453,42 +1256,55 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
        return 0;
 }
 
-static const struct intel_ring_buffer gen6_blt_ring = {
-       .name                   = "blt ring",
-       .id                     = BCS,
-       .mmio_base              = BLT_RING_BASE,
-       .size                   = 32 * PAGE_SIZE,
-       .init                   = init_ring_common,
-       .write_tail             = ring_write_tail,
-       .flush                  = blt_ring_flush,
-       .add_request            = gen6_add_request,
-       .get_seqno              = gen6_ring_get_seqno,
-       .irq_get                = blt_ring_get_irq,
-       .irq_put                = blt_ring_put_irq,
-       .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
-       .sync_to                = gen6_blt_ring_sync_to,
-       .semaphore_register     = {MI_SEMAPHORE_SYNC_BR,
-                                  MI_SEMAPHORE_SYNC_BV,
-                                  MI_SEMAPHORE_SYNC_INVALID},
-       .signal_mbox            = {GEN6_RBSYNC, GEN6_VBSYNC},
-};
-
 int intel_init_render_ring_buffer(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
 
-       *ring = render_ring;
+       ring->name = "render ring";
+       ring->id = RCS;
+       ring->mmio_base = RENDER_RING_BASE;
+
        if (INTEL_INFO(dev)->gen >= 6) {
                ring->add_request = gen6_add_request;
                ring->flush = gen6_render_ring_flush;
-               ring->irq_get = gen6_render_ring_get_irq;
-               ring->irq_put = gen6_render_ring_put_irq;
+               ring->irq_get = gen6_ring_get_irq;
+               ring->irq_put = gen6_ring_put_irq;
+               ring->irq_enable_mask = GT_USER_INTERRUPT;
                ring->get_seqno = gen6_ring_get_seqno;
+               ring->sync_to = gen6_ring_sync;
+               ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
+               ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
+               ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
+               ring->signal_mbox[0] = GEN6_VRSYNC;
+               ring->signal_mbox[1] = GEN6_BRSYNC;
        } else if (IS_GEN5(dev)) {
                ring->add_request = pc_render_add_request;
+               ring->flush = render_ring_flush;
                ring->get_seqno = pc_render_get_seqno;
+               ring->irq_get = gen5_ring_get_irq;
+               ring->irq_put = gen5_ring_put_irq;
+               ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
+       } else {
+               ring->add_request = i9xx_add_request;
+               ring->flush = render_ring_flush;
+               ring->get_seqno = ring_get_seqno;
+               ring->irq_get = i9xx_ring_get_irq;
+               ring->irq_put = i9xx_ring_put_irq;
+               ring->irq_enable_mask = I915_USER_INTERRUPT;
        }
+       ring->write_tail = ring_write_tail;
+       if (INTEL_INFO(dev)->gen >= 6)
+               ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+       else if (INTEL_INFO(dev)->gen >= 4)
+               ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+       else if (IS_I830(dev) || IS_845G(dev))
+               ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+       else
+               ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+       ring->init = init_render_ring;
+       ring->cleanup = render_ring_cleanup;
+
 
        if (!I915_NEED_GFX_HWS(dev)) {
                ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
@@ -1503,16 +1319,34 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
 
-       *ring = render_ring;
+       ring->name = "render ring";
+       ring->id = RCS;
+       ring->mmio_base = RENDER_RING_BASE;
+
        if (INTEL_INFO(dev)->gen >= 6) {
-               ring->add_request = gen6_add_request;
-               ring->irq_get = gen6_render_ring_get_irq;
-               ring->irq_put = gen6_render_ring_put_irq;
-       } else if (IS_GEN5(dev)) {
-               ring->add_request = pc_render_add_request;
-               ring->get_seqno = pc_render_get_seqno;
+               /* non-kms not supported on gen6+ */
+               return -ENODEV;
        }
 
+       /* Note: gem is not supported on gen5/ilk without kms (the corresponding
+        * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
+        * the special gen5 functions. */
+       ring->add_request = i9xx_add_request;
+       ring->flush = render_ring_flush;
+       ring->get_seqno = ring_get_seqno;
+       ring->irq_get = i9xx_ring_get_irq;
+       ring->irq_put = i9xx_ring_put_irq;
+       ring->irq_enable_mask = I915_USER_INTERRUPT;
+       ring->write_tail = ring_write_tail;
+       if (INTEL_INFO(dev)->gen >= 4)
+               ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+       else if (IS_I830(dev) || IS_845G(dev))
+               ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+       else
+               ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+       ring->init = init_render_ring;
+       ring->cleanup = render_ring_cleanup;
+
        if (!I915_NEED_GFX_HWS(dev))
                ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
 
@@ -1548,10 +1382,46 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
 
-       if (IS_GEN6(dev) || IS_GEN7(dev))
-               *ring = gen6_bsd_ring;
-       else
-               *ring = bsd_ring;
+       ring->name = "bsd ring";
+       ring->id = VCS;
+
+       ring->write_tail = ring_write_tail;
+       if (IS_GEN6(dev) || IS_GEN7(dev)) {
+               ring->mmio_base = GEN6_BSD_RING_BASE;
+               /* gen6 bsd needs a special wa for tail updates */
+               if (IS_GEN6(dev))
+                       ring->write_tail = gen6_bsd_ring_write_tail;
+               ring->flush = gen6_ring_flush;
+               ring->add_request = gen6_add_request;
+               ring->get_seqno = gen6_ring_get_seqno;
+               ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
+               ring->irq_get = gen6_ring_get_irq;
+               ring->irq_put = gen6_ring_put_irq;
+               ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+               ring->sync_to = gen6_ring_sync;
+               ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
+               ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
+               ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
+               ring->signal_mbox[0] = GEN6_RVSYNC;
+               ring->signal_mbox[1] = GEN6_BVSYNC;
+       } else {
+               ring->mmio_base = BSD_RING_BASE;
+               ring->flush = bsd_ring_flush;
+               ring->add_request = i9xx_add_request;
+               ring->get_seqno = ring_get_seqno;
+               if (IS_GEN5(dev)) {
+                       ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+                       ring->irq_get = gen5_ring_get_irq;
+                       ring->irq_put = gen5_ring_put_irq;
+               } else {
+                       ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
+                       ring->irq_get = i9xx_ring_get_irq;
+                       ring->irq_put = i9xx_ring_put_irq;
+               }
+               ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+       }
+       ring->init = init_ring_common;
+
 
        return intel_init_ring_buffer(dev, ring);
 }
@@ -1561,7 +1431,25 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
 
-       *ring = gen6_blt_ring;
+       ring->name = "blitter ring";
+       ring->id = BCS;
+
+       ring->mmio_base = BLT_RING_BASE;
+       ring->write_tail = ring_write_tail;
+       ring->flush = blt_ring_flush;
+       ring->add_request = gen6_add_request;
+       ring->get_seqno = gen6_ring_get_seqno;
+       ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
+       ring->irq_get = gen6_ring_get_irq;
+       ring->irq_put = gen6_ring_put_irq;
+       ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+       ring->sync_to = gen6_ring_sync;
+       ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
+       ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
+       ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
+       ring->signal_mbox[0] = GEN6_RBSYNC;
+       ring->signal_mbox[1] = GEN6_VBSYNC;
+       ring->init = init_ring_common;
 
        return intel_init_ring_buffer(dev, ring);
 }