Merge branch 'kvm-updates/2.6.39' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[pandora-kernel.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include "drmP.h"
31 #include "drm.h"
32 #include "i915_drv.h"
33 #include "i915_drm.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 static inline int ring_space(struct intel_ring_buffer *ring)
38 {
39         int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
40         if (space < 0)
41                 space += ring->size;
42         return space;
43 }
44
45 static u32 i915_gem_get_seqno(struct drm_device *dev)
46 {
47         drm_i915_private_t *dev_priv = dev->dev_private;
48         u32 seqno;
49
50         seqno = dev_priv->next_seqno;
51
52         /* reserve 0 for non-seqno */
53         if (++dev_priv->next_seqno == 0)
54                 dev_priv->next_seqno = 1;
55
56         return seqno;
57 }
58
59 static int
60 render_ring_flush(struct intel_ring_buffer *ring,
61                   u32   invalidate_domains,
62                   u32   flush_domains)
63 {
64         struct drm_device *dev = ring->dev;
65         u32 cmd;
66         int ret;
67
68         if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
69                 /*
70                  * read/write caches:
71                  *
72                  * I915_GEM_DOMAIN_RENDER is always invalidated, but is
73                  * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
74                  * also flushed at 2d versus 3d pipeline switches.
75                  *
76                  * read-only caches:
77                  *
78                  * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
79                  * MI_READ_FLUSH is set, and is always flushed on 965.
80                  *
81                  * I915_GEM_DOMAIN_COMMAND may not exist?
82                  *
83                  * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
84                  * invalidated when MI_EXE_FLUSH is set.
85                  *
86                  * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
87                  * invalidated with every MI_FLUSH.
88                  *
89                  * TLBs:
90                  *
91                  * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
92                  * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
93                  * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
94                  * are flushed at any MI_FLUSH.
95                  */
96
97                 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
98                 if ((invalidate_domains|flush_domains) &
99                     I915_GEM_DOMAIN_RENDER)
100                         cmd &= ~MI_NO_WRITE_FLUSH;
101                 if (INTEL_INFO(dev)->gen < 4) {
102                         /*
103                          * On the 965, the sampler cache always gets flushed
104                          * and this bit is reserved.
105                          */
106                         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
107                                 cmd |= MI_READ_FLUSH;
108                 }
109                 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
110                         cmd |= MI_EXE_FLUSH;
111
112                 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
113                     (IS_G4X(dev) || IS_GEN5(dev)))
114                         cmd |= MI_INVALIDATE_ISP;
115
116                 ret = intel_ring_begin(ring, 2);
117                 if (ret)
118                         return ret;
119
120                 intel_ring_emit(ring, cmd);
121                 intel_ring_emit(ring, MI_NOOP);
122                 intel_ring_advance(ring);
123         }
124
125         return 0;
126 }
127
128 static void ring_write_tail(struct intel_ring_buffer *ring,
129                             u32 value)
130 {
131         drm_i915_private_t *dev_priv = ring->dev->dev_private;
132         I915_WRITE_TAIL(ring, value);
133 }
134
135 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
136 {
137         drm_i915_private_t *dev_priv = ring->dev->dev_private;
138         u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
139                         RING_ACTHD(ring->mmio_base) : ACTHD;
140
141         return I915_READ(acthd_reg);
142 }
143
144 static int init_ring_common(struct intel_ring_buffer *ring)
145 {
146         drm_i915_private_t *dev_priv = ring->dev->dev_private;
147         struct drm_i915_gem_object *obj = ring->obj;
148         u32 head;
149
150         /* Stop the ring if it's running. */
151         I915_WRITE_CTL(ring, 0);
152         I915_WRITE_HEAD(ring, 0);
153         ring->write_tail(ring, 0);
154
155         /* Initialize the ring. */
156         I915_WRITE_START(ring, obj->gtt_offset);
157         head = I915_READ_HEAD(ring) & HEAD_ADDR;
158
159         /* G45 ring initialization fails to reset head to zero */
160         if (head != 0) {
161                 DRM_DEBUG_KMS("%s head not reset to zero "
162                               "ctl %08x head %08x tail %08x start %08x\n",
163                               ring->name,
164                               I915_READ_CTL(ring),
165                               I915_READ_HEAD(ring),
166                               I915_READ_TAIL(ring),
167                               I915_READ_START(ring));
168
169                 I915_WRITE_HEAD(ring, 0);
170
171                 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
172                         DRM_ERROR("failed to set %s head to zero "
173                                   "ctl %08x head %08x tail %08x start %08x\n",
174                                   ring->name,
175                                   I915_READ_CTL(ring),
176                                   I915_READ_HEAD(ring),
177                                   I915_READ_TAIL(ring),
178                                   I915_READ_START(ring));
179                 }
180         }
181
182         I915_WRITE_CTL(ring,
183                         ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
184                         | RING_REPORT_64K | RING_VALID);
185
186         /* If the head is still not zero, the ring is dead */
187         if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
188             I915_READ_START(ring) != obj->gtt_offset ||
189             (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
190                 DRM_ERROR("%s initialization failed "
191                                 "ctl %08x head %08x tail %08x start %08x\n",
192                                 ring->name,
193                                 I915_READ_CTL(ring),
194                                 I915_READ_HEAD(ring),
195                                 I915_READ_TAIL(ring),
196                                 I915_READ_START(ring));
197                 return -EIO;
198         }
199
200         if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
201                 i915_kernel_lost_context(ring->dev);
202         else {
203                 ring->head = I915_READ_HEAD(ring);
204                 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
205                 ring->space = ring_space(ring);
206         }
207
208         return 0;
209 }
210
211 /*
212  * 965+ support PIPE_CONTROL commands, which provide finer grained control
213  * over cache flushing.
214  */
215 struct pipe_control {
216         struct drm_i915_gem_object *obj;
217         volatile u32 *cpu_page;
218         u32 gtt_offset;
219 };
220
221 static int
222 init_pipe_control(struct intel_ring_buffer *ring)
223 {
224         struct pipe_control *pc;
225         struct drm_i915_gem_object *obj;
226         int ret;
227
228         if (ring->private)
229                 return 0;
230
231         pc = kmalloc(sizeof(*pc), GFP_KERNEL);
232         if (!pc)
233                 return -ENOMEM;
234
235         obj = i915_gem_alloc_object(ring->dev, 4096);
236         if (obj == NULL) {
237                 DRM_ERROR("Failed to allocate seqno page\n");
238                 ret = -ENOMEM;
239                 goto err;
240         }
241         obj->agp_type = AGP_USER_CACHED_MEMORY;
242
243         ret = i915_gem_object_pin(obj, 4096, true);
244         if (ret)
245                 goto err_unref;
246
247         pc->gtt_offset = obj->gtt_offset;
248         pc->cpu_page =  kmap(obj->pages[0]);
249         if (pc->cpu_page == NULL)
250                 goto err_unpin;
251
252         pc->obj = obj;
253         ring->private = pc;
254         return 0;
255
256 err_unpin:
257         i915_gem_object_unpin(obj);
258 err_unref:
259         drm_gem_object_unreference(&obj->base);
260 err:
261         kfree(pc);
262         return ret;
263 }
264
265 static void
266 cleanup_pipe_control(struct intel_ring_buffer *ring)
267 {
268         struct pipe_control *pc = ring->private;
269         struct drm_i915_gem_object *obj;
270
271         if (!ring->private)
272                 return;
273
274         obj = pc->obj;
275         kunmap(obj->pages[0]);
276         i915_gem_object_unpin(obj);
277         drm_gem_object_unreference(&obj->base);
278
279         kfree(pc);
280         ring->private = NULL;
281 }
282
283 static int init_render_ring(struct intel_ring_buffer *ring)
284 {
285         struct drm_device *dev = ring->dev;
286         struct drm_i915_private *dev_priv = dev->dev_private;
287         int ret = init_ring_common(ring);
288
289         if (INTEL_INFO(dev)->gen > 3) {
290                 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
291                 if (IS_GEN6(dev))
292                         mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
293                 I915_WRITE(MI_MODE, mode);
294         }
295
296         if (INTEL_INFO(dev)->gen >= 6) {
297         } else if (IS_GEN5(dev)) {
298                 ret = init_pipe_control(ring);
299                 if (ret)
300                         return ret;
301         }
302
303         return ret;
304 }
305
306 static void render_ring_cleanup(struct intel_ring_buffer *ring)
307 {
308         if (!ring->private)
309                 return;
310
311         cleanup_pipe_control(ring);
312 }
313
314 static void
315 update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
316 {
317         struct drm_device *dev = ring->dev;
318         struct drm_i915_private *dev_priv = dev->dev_private;
319         int id;
320
321         /*
322          * cs -> 1 = vcs, 0 = bcs
323          * vcs -> 1 = bcs, 0 = cs,
324          * bcs -> 1 = cs, 0 = vcs.
325          */
326         id = ring - dev_priv->ring;
327         id += 2 - i;
328         id %= 3;
329
330         intel_ring_emit(ring,
331                         MI_SEMAPHORE_MBOX |
332                         MI_SEMAPHORE_REGISTER |
333                         MI_SEMAPHORE_UPDATE);
334         intel_ring_emit(ring, seqno);
335         intel_ring_emit(ring,
336                         RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
337 }
338
339 static int
340 gen6_add_request(struct intel_ring_buffer *ring,
341                  u32 *result)
342 {
343         u32 seqno;
344         int ret;
345
346         ret = intel_ring_begin(ring, 10);
347         if (ret)
348                 return ret;
349
350         seqno = i915_gem_get_seqno(ring->dev);
351         update_semaphore(ring, 0, seqno);
352         update_semaphore(ring, 1, seqno);
353
354         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
355         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
356         intel_ring_emit(ring, seqno);
357         intel_ring_emit(ring, MI_USER_INTERRUPT);
358         intel_ring_advance(ring);
359
360         *result = seqno;
361         return 0;
362 }
363
364 int
365 intel_ring_sync(struct intel_ring_buffer *ring,
366                 struct intel_ring_buffer *to,
367                 u32 seqno)
368 {
369         int ret;
370
371         ret = intel_ring_begin(ring, 4);
372         if (ret)
373                 return ret;
374
375         intel_ring_emit(ring,
376                         MI_SEMAPHORE_MBOX |
377                         MI_SEMAPHORE_REGISTER |
378                         intel_ring_sync_index(ring, to) << 17 |
379                         MI_SEMAPHORE_COMPARE);
380         intel_ring_emit(ring, seqno);
381         intel_ring_emit(ring, 0);
382         intel_ring_emit(ring, MI_NOOP);
383         intel_ring_advance(ring);
384
385         return 0;
386 }
387
388 #define PIPE_CONTROL_FLUSH(ring__, addr__)                                      \
389 do {                                                                    \
390         intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |           \
391                  PIPE_CONTROL_DEPTH_STALL | 2);                         \
392         intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
393         intel_ring_emit(ring__, 0);                                                     \
394         intel_ring_emit(ring__, 0);                                                     \
395 } while (0)
396
397 static int
398 pc_render_add_request(struct intel_ring_buffer *ring,
399                       u32 *result)
400 {
401         struct drm_device *dev = ring->dev;
402         u32 seqno = i915_gem_get_seqno(dev);
403         struct pipe_control *pc = ring->private;
404         u32 scratch_addr = pc->gtt_offset + 128;
405         int ret;
406
407         /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
408          * incoherent with writes to memory, i.e. completely fubar,
409          * so we need to use PIPE_NOTIFY instead.
410          *
411          * However, we also need to workaround the qword write
412          * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
413          * memory before requesting an interrupt.
414          */
415         ret = intel_ring_begin(ring, 32);
416         if (ret)
417                 return ret;
418
419         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
420                         PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
421         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
422         intel_ring_emit(ring, seqno);
423         intel_ring_emit(ring, 0);
424         PIPE_CONTROL_FLUSH(ring, scratch_addr);
425         scratch_addr += 128; /* write to separate cachelines */
426         PIPE_CONTROL_FLUSH(ring, scratch_addr);
427         scratch_addr += 128;
428         PIPE_CONTROL_FLUSH(ring, scratch_addr);
429         scratch_addr += 128;
430         PIPE_CONTROL_FLUSH(ring, scratch_addr);
431         scratch_addr += 128;
432         PIPE_CONTROL_FLUSH(ring, scratch_addr);
433         scratch_addr += 128;
434         PIPE_CONTROL_FLUSH(ring, scratch_addr);
435         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
436                         PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
437                         PIPE_CONTROL_NOTIFY);
438         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
439         intel_ring_emit(ring, seqno);
440         intel_ring_emit(ring, 0);
441         intel_ring_advance(ring);
442
443         *result = seqno;
444         return 0;
445 }
446
447 static int
448 render_ring_add_request(struct intel_ring_buffer *ring,
449                         u32 *result)
450 {
451         struct drm_device *dev = ring->dev;
452         u32 seqno = i915_gem_get_seqno(dev);
453         int ret;
454
455         ret = intel_ring_begin(ring, 4);
456         if (ret)
457                 return ret;
458
459         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
460         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
461         intel_ring_emit(ring, seqno);
462         intel_ring_emit(ring, MI_USER_INTERRUPT);
463         intel_ring_advance(ring);
464
465         *result = seqno;
466         return 0;
467 }
468
469 static u32
470 ring_get_seqno(struct intel_ring_buffer *ring)
471 {
472         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
473 }
474
475 static u32
476 pc_render_get_seqno(struct intel_ring_buffer *ring)
477 {
478         struct pipe_control *pc = ring->private;
479         return pc->cpu_page[0];
480 }
481
482 static void
483 ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
484 {
485         dev_priv->gt_irq_mask &= ~mask;
486         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
487         POSTING_READ(GTIMR);
488 }
489
490 static void
491 ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
492 {
493         dev_priv->gt_irq_mask |= mask;
494         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
495         POSTING_READ(GTIMR);
496 }
497
498 static void
499 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
500 {
501         dev_priv->irq_mask &= ~mask;
502         I915_WRITE(IMR, dev_priv->irq_mask);
503         POSTING_READ(IMR);
504 }
505
506 static void
507 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
508 {
509         dev_priv->irq_mask |= mask;
510         I915_WRITE(IMR, dev_priv->irq_mask);
511         POSTING_READ(IMR);
512 }
513
514 static bool
515 render_ring_get_irq(struct intel_ring_buffer *ring)
516 {
517         struct drm_device *dev = ring->dev;
518         drm_i915_private_t *dev_priv = dev->dev_private;
519
520         if (!dev->irq_enabled)
521                 return false;
522
523         spin_lock(&ring->irq_lock);
524         if (ring->irq_refcount++ == 0) {
525                 if (HAS_PCH_SPLIT(dev))
526                         ironlake_enable_irq(dev_priv,
527                                             GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
528                 else
529                         i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
530         }
531         spin_unlock(&ring->irq_lock);
532
533         return true;
534 }
535
536 static void
537 render_ring_put_irq(struct intel_ring_buffer *ring)
538 {
539         struct drm_device *dev = ring->dev;
540         drm_i915_private_t *dev_priv = dev->dev_private;
541
542         spin_lock(&ring->irq_lock);
543         if (--ring->irq_refcount == 0) {
544                 if (HAS_PCH_SPLIT(dev))
545                         ironlake_disable_irq(dev_priv,
546                                              GT_USER_INTERRUPT |
547                                              GT_PIPE_NOTIFY);
548                 else
549                         i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
550         }
551         spin_unlock(&ring->irq_lock);
552 }
553
554 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
555 {
556         drm_i915_private_t *dev_priv = ring->dev->dev_private;
557         u32 mmio = IS_GEN6(ring->dev) ?
558                 RING_HWS_PGA_GEN6(ring->mmio_base) :
559                 RING_HWS_PGA(ring->mmio_base);
560         I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
561         POSTING_READ(mmio);
562 }
563
564 static int
565 bsd_ring_flush(struct intel_ring_buffer *ring,
566                u32     invalidate_domains,
567                u32     flush_domains)
568 {
569         int ret;
570
571         if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
572                 return 0;
573
574         ret = intel_ring_begin(ring, 2);
575         if (ret)
576                 return ret;
577
578         intel_ring_emit(ring, MI_FLUSH);
579         intel_ring_emit(ring, MI_NOOP);
580         intel_ring_advance(ring);
581         return 0;
582 }
583
584 static int
585 ring_add_request(struct intel_ring_buffer *ring,
586                  u32 *result)
587 {
588         u32 seqno;
589         int ret;
590
591         ret = intel_ring_begin(ring, 4);
592         if (ret)
593                 return ret;
594
595         seqno = i915_gem_get_seqno(ring->dev);
596
597         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
598         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
599         intel_ring_emit(ring, seqno);
600         intel_ring_emit(ring, MI_USER_INTERRUPT);
601         intel_ring_advance(ring);
602
603         *result = seqno;
604         return 0;
605 }
606
607 static bool
608 ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
609 {
610         struct drm_device *dev = ring->dev;
611         drm_i915_private_t *dev_priv = dev->dev_private;
612
613         if (!dev->irq_enabled)
614                return false;
615
616         spin_lock(&ring->irq_lock);
617         if (ring->irq_refcount++ == 0)
618                 ironlake_enable_irq(dev_priv, flag);
619         spin_unlock(&ring->irq_lock);
620
621         return true;
622 }
623
624 static void
625 ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
626 {
627         struct drm_device *dev = ring->dev;
628         drm_i915_private_t *dev_priv = dev->dev_private;
629
630         spin_lock(&ring->irq_lock);
631         if (--ring->irq_refcount == 0)
632                 ironlake_disable_irq(dev_priv, flag);
633         spin_unlock(&ring->irq_lock);
634 }
635
636 static bool
637 gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
638 {
639         struct drm_device *dev = ring->dev;
640         drm_i915_private_t *dev_priv = dev->dev_private;
641
642         if (!dev->irq_enabled)
643                return false;
644
645         spin_lock(&ring->irq_lock);
646         if (ring->irq_refcount++ == 0) {
647                 ring->irq_mask &= ~rflag;
648                 I915_WRITE_IMR(ring, ring->irq_mask);
649                 ironlake_enable_irq(dev_priv, gflag);
650         }
651         spin_unlock(&ring->irq_lock);
652
653         return true;
654 }
655
656 static void
657 gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
658 {
659         struct drm_device *dev = ring->dev;
660         drm_i915_private_t *dev_priv = dev->dev_private;
661
662         spin_lock(&ring->irq_lock);
663         if (--ring->irq_refcount == 0) {
664                 ring->irq_mask |= rflag;
665                 I915_WRITE_IMR(ring, ring->irq_mask);
666                 ironlake_disable_irq(dev_priv, gflag);
667         }
668         spin_unlock(&ring->irq_lock);
669 }
670
671 static bool
672 bsd_ring_get_irq(struct intel_ring_buffer *ring)
673 {
674         return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
675 }
676 static void
677 bsd_ring_put_irq(struct intel_ring_buffer *ring)
678 {
679         ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
680 }
681
682 static int
683 ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
684 {
685         int ret;
686
687         ret = intel_ring_begin(ring, 2);
688         if (ret)
689                 return ret;
690
691         intel_ring_emit(ring,
692                         MI_BATCH_BUFFER_START | (2 << 6) |
693                         MI_BATCH_NON_SECURE_I965);
694         intel_ring_emit(ring, offset);
695         intel_ring_advance(ring);
696
697         return 0;
698 }
699
700 static int
701 render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
702                                 u32 offset, u32 len)
703 {
704         struct drm_device *dev = ring->dev;
705         int ret;
706
707         if (IS_I830(dev) || IS_845G(dev)) {
708                 ret = intel_ring_begin(ring, 4);
709                 if (ret)
710                         return ret;
711
712                 intel_ring_emit(ring, MI_BATCH_BUFFER);
713                 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
714                 intel_ring_emit(ring, offset + len - 8);
715                 intel_ring_emit(ring, 0);
716         } else {
717                 ret = intel_ring_begin(ring, 2);
718                 if (ret)
719                         return ret;
720
721                 if (INTEL_INFO(dev)->gen >= 4) {
722                         intel_ring_emit(ring,
723                                         MI_BATCH_BUFFER_START | (2 << 6) |
724                                         MI_BATCH_NON_SECURE_I965);
725                         intel_ring_emit(ring, offset);
726                 } else {
727                         intel_ring_emit(ring,
728                                         MI_BATCH_BUFFER_START | (2 << 6));
729                         intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
730                 }
731         }
732         intel_ring_advance(ring);
733
734         return 0;
735 }
736
737 static void cleanup_status_page(struct intel_ring_buffer *ring)
738 {
739         drm_i915_private_t *dev_priv = ring->dev->dev_private;
740         struct drm_i915_gem_object *obj;
741
742         obj = ring->status_page.obj;
743         if (obj == NULL)
744                 return;
745
746         kunmap(obj->pages[0]);
747         i915_gem_object_unpin(obj);
748         drm_gem_object_unreference(&obj->base);
749         ring->status_page.obj = NULL;
750
751         memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
752 }
753
754 static int init_status_page(struct intel_ring_buffer *ring)
755 {
756         struct drm_device *dev = ring->dev;
757         drm_i915_private_t *dev_priv = dev->dev_private;
758         struct drm_i915_gem_object *obj;
759         int ret;
760
761         obj = i915_gem_alloc_object(dev, 4096);
762         if (obj == NULL) {
763                 DRM_ERROR("Failed to allocate status page\n");
764                 ret = -ENOMEM;
765                 goto err;
766         }
767         obj->agp_type = AGP_USER_CACHED_MEMORY;
768
769         ret = i915_gem_object_pin(obj, 4096, true);
770         if (ret != 0) {
771                 goto err_unref;
772         }
773
774         ring->status_page.gfx_addr = obj->gtt_offset;
775         ring->status_page.page_addr = kmap(obj->pages[0]);
776         if (ring->status_page.page_addr == NULL) {
777                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
778                 goto err_unpin;
779         }
780         ring->status_page.obj = obj;
781         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
782
783         intel_ring_setup_status_page(ring);
784         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
785                         ring->name, ring->status_page.gfx_addr);
786
787         return 0;
788
789 err_unpin:
790         i915_gem_object_unpin(obj);
791 err_unref:
792         drm_gem_object_unreference(&obj->base);
793 err:
794         return ret;
795 }
796
797 int intel_init_ring_buffer(struct drm_device *dev,
798                            struct intel_ring_buffer *ring)
799 {
800         struct drm_i915_gem_object *obj;
801         int ret;
802
803         ring->dev = dev;
804         INIT_LIST_HEAD(&ring->active_list);
805         INIT_LIST_HEAD(&ring->request_list);
806         INIT_LIST_HEAD(&ring->gpu_write_list);
807
808         spin_lock_init(&ring->irq_lock);
809         ring->irq_mask = ~0;
810
811         if (I915_NEED_GFX_HWS(dev)) {
812                 ret = init_status_page(ring);
813                 if (ret)
814                         return ret;
815         }
816
817         obj = i915_gem_alloc_object(dev, ring->size);
818         if (obj == NULL) {
819                 DRM_ERROR("Failed to allocate ringbuffer\n");
820                 ret = -ENOMEM;
821                 goto err_hws;
822         }
823
824         ring->obj = obj;
825
826         ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
827         if (ret)
828                 goto err_unref;
829
830         ring->map.size = ring->size;
831         ring->map.offset = dev->agp->base + obj->gtt_offset;
832         ring->map.type = 0;
833         ring->map.flags = 0;
834         ring->map.mtrr = 0;
835
836         drm_core_ioremap_wc(&ring->map, dev);
837         if (ring->map.handle == NULL) {
838                 DRM_ERROR("Failed to map ringbuffer.\n");
839                 ret = -EINVAL;
840                 goto err_unpin;
841         }
842
843         ring->virtual_start = ring->map.handle;
844         ret = ring->init(ring);
845         if (ret)
846                 goto err_unmap;
847
848         /* Workaround an erratum on the i830 which causes a hang if
849          * the TAIL pointer points to within the last 2 cachelines
850          * of the buffer.
851          */
852         ring->effective_size = ring->size;
853         if (IS_I830(ring->dev))
854                 ring->effective_size -= 128;
855
856         return 0;
857
858 err_unmap:
859         drm_core_ioremapfree(&ring->map, dev);
860 err_unpin:
861         i915_gem_object_unpin(obj);
862 err_unref:
863         drm_gem_object_unreference(&obj->base);
864         ring->obj = NULL;
865 err_hws:
866         cleanup_status_page(ring);
867         return ret;
868 }
869
870 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
871 {
872         struct drm_i915_private *dev_priv;
873         int ret;
874
875         if (ring->obj == NULL)
876                 return;
877
878         /* Disable the ring buffer. The ring must be idle at this point */
879         dev_priv = ring->dev->dev_private;
880         ret = intel_wait_ring_buffer(ring, ring->size - 8);
881         if (ret)
882                 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
883                           ring->name, ret);
884
885         I915_WRITE_CTL(ring, 0);
886
887         drm_core_ioremapfree(&ring->map, ring->dev);
888
889         i915_gem_object_unpin(ring->obj);
890         drm_gem_object_unreference(&ring->obj->base);
891         ring->obj = NULL;
892
893         if (ring->cleanup)
894                 ring->cleanup(ring);
895
896         cleanup_status_page(ring);
897 }
898
899 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
900 {
901         unsigned int *virt;
902         int rem = ring->size - ring->tail;
903
904         if (ring->space < rem) {
905                 int ret = intel_wait_ring_buffer(ring, rem);
906                 if (ret)
907                         return ret;
908         }
909
910         virt = (unsigned int *)(ring->virtual_start + ring->tail);
911         rem /= 8;
912         while (rem--) {
913                 *virt++ = MI_NOOP;
914                 *virt++ = MI_NOOP;
915         }
916
917         ring->tail = 0;
918         ring->space = ring_space(ring);
919
920         return 0;
921 }
922
923 int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
924 {
925         struct drm_device *dev = ring->dev;
926         struct drm_i915_private *dev_priv = dev->dev_private;
927         unsigned long end;
928         u32 head;
929
930         /* If the reported head position has wrapped or hasn't advanced,
931          * fallback to the slow and accurate path.
932          */
933         head = intel_read_status_page(ring, 4);
934         if (head > ring->head) {
935                 ring->head = head;
936                 ring->space = ring_space(ring);
937                 if (ring->space >= n)
938                         return 0;
939         }
940
941         trace_i915_ring_wait_begin(ring);
942         end = jiffies + 3 * HZ;
943         do {
944                 ring->head = I915_READ_HEAD(ring);
945                 ring->space = ring_space(ring);
946                 if (ring->space >= n) {
947                         trace_i915_ring_wait_end(ring);
948                         return 0;
949                 }
950
951                 if (dev->primary->master) {
952                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
953                         if (master_priv->sarea_priv)
954                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
955                 }
956
957                 msleep(1);
958                 if (atomic_read(&dev_priv->mm.wedged))
959                         return -EAGAIN;
960         } while (!time_after(jiffies, end));
961         trace_i915_ring_wait_end(ring);
962         return -EBUSY;
963 }
964
965 int intel_ring_begin(struct intel_ring_buffer *ring,
966                      int num_dwords)
967 {
968         struct drm_i915_private *dev_priv = ring->dev->dev_private;
969         int n = 4*num_dwords;
970         int ret;
971
972         if (unlikely(atomic_read(&dev_priv->mm.wedged)))
973                 return -EIO;
974
975         if (unlikely(ring->tail + n > ring->effective_size)) {
976                 ret = intel_wrap_ring_buffer(ring);
977                 if (unlikely(ret))
978                         return ret;
979         }
980
981         if (unlikely(ring->space < n)) {
982                 ret = intel_wait_ring_buffer(ring, n);
983                 if (unlikely(ret))
984                         return ret;
985         }
986
987         ring->space -= n;
988         return 0;
989 }
990
991 void intel_ring_advance(struct intel_ring_buffer *ring)
992 {
993         ring->tail &= ring->size - 1;
994         ring->write_tail(ring, ring->tail);
995 }
996
997 static const struct intel_ring_buffer render_ring = {
998         .name                   = "render ring",
999         .id                     = RING_RENDER,
1000         .mmio_base              = RENDER_RING_BASE,
1001         .size                   = 32 * PAGE_SIZE,
1002         .init                   = init_render_ring,
1003         .write_tail             = ring_write_tail,
1004         .flush                  = render_ring_flush,
1005         .add_request            = render_ring_add_request,
1006         .get_seqno              = ring_get_seqno,
1007         .irq_get                = render_ring_get_irq,
1008         .irq_put                = render_ring_put_irq,
1009         .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
1010        .cleanup                 = render_ring_cleanup,
1011 };
1012
1013 /* ring buffer for bit-stream decoder */
1014
1015 static const struct intel_ring_buffer bsd_ring = {
1016         .name                   = "bsd ring",
1017         .id                     = RING_BSD,
1018         .mmio_base              = BSD_RING_BASE,
1019         .size                   = 32 * PAGE_SIZE,
1020         .init                   = init_ring_common,
1021         .write_tail             = ring_write_tail,
1022         .flush                  = bsd_ring_flush,
1023         .add_request            = ring_add_request,
1024         .get_seqno              = ring_get_seqno,
1025         .irq_get                = bsd_ring_get_irq,
1026         .irq_put                = bsd_ring_put_irq,
1027         .dispatch_execbuffer    = ring_dispatch_execbuffer,
1028 };
1029
1030
1031 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1032                                      u32 value)
1033 {
1034        drm_i915_private_t *dev_priv = ring->dev->dev_private;
1035
1036        /* Every tail move must follow the sequence below */
1037        I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1038                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1039                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1040        I915_WRITE(GEN6_BSD_RNCID, 0x0);
1041
1042        if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1043                                GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1044                        50))
1045                DRM_ERROR("timed out waiting for IDLE Indicator\n");
1046
1047        I915_WRITE_TAIL(ring, value);
1048        I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1049                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1050                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1051 }
1052
1053 static int gen6_ring_flush(struct intel_ring_buffer *ring,
1054                            u32 invalidate, u32 flush)
1055 {
1056         uint32_t cmd;
1057         int ret;
1058
1059         if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
1060                 return 0;
1061
1062         ret = intel_ring_begin(ring, 4);
1063         if (ret)
1064                 return ret;
1065
1066         cmd = MI_FLUSH_DW;
1067         if (invalidate & I915_GEM_GPU_DOMAINS)
1068                 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1069         intel_ring_emit(ring, cmd);
1070         intel_ring_emit(ring, 0);
1071         intel_ring_emit(ring, 0);
1072         intel_ring_emit(ring, MI_NOOP);
1073         intel_ring_advance(ring);
1074         return 0;
1075 }
1076
1077 static int
1078 gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1079                               u32 offset, u32 len)
1080 {
1081        int ret;
1082
1083        ret = intel_ring_begin(ring, 2);
1084        if (ret)
1085                return ret;
1086
1087        intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1088        /* bit0-7 is the length on GEN6+ */
1089        intel_ring_emit(ring, offset);
1090        intel_ring_advance(ring);
1091
1092        return 0;
1093 }
1094
1095 static bool
1096 gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1097 {
1098         return gen6_ring_get_irq(ring,
1099                                  GT_USER_INTERRUPT,
1100                                  GEN6_RENDER_USER_INTERRUPT);
1101 }
1102
1103 static void
1104 gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1105 {
1106         return gen6_ring_put_irq(ring,
1107                                  GT_USER_INTERRUPT,
1108                                  GEN6_RENDER_USER_INTERRUPT);
1109 }
1110
1111 static bool
1112 gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1113 {
1114         return gen6_ring_get_irq(ring,
1115                                  GT_GEN6_BSD_USER_INTERRUPT,
1116                                  GEN6_BSD_USER_INTERRUPT);
1117 }
1118
1119 static void
1120 gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1121 {
1122         return gen6_ring_put_irq(ring,
1123                                  GT_GEN6_BSD_USER_INTERRUPT,
1124                                  GEN6_BSD_USER_INTERRUPT);
1125 }
1126
1127 /* ring buffer for Video Codec for Gen6+ */
1128 static const struct intel_ring_buffer gen6_bsd_ring = {
1129         .name                   = "gen6 bsd ring",
1130         .id                     = RING_BSD,
1131         .mmio_base              = GEN6_BSD_RING_BASE,
1132         .size                   = 32 * PAGE_SIZE,
1133         .init                   = init_ring_common,
1134         .write_tail             = gen6_bsd_ring_write_tail,
1135         .flush                  = gen6_ring_flush,
1136         .add_request            = gen6_add_request,
1137         .get_seqno              = ring_get_seqno,
1138         .irq_get                = gen6_bsd_ring_get_irq,
1139         .irq_put                = gen6_bsd_ring_put_irq,
1140         .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1141 };
1142
1143 /* Blitter support (SandyBridge+) */
1144
1145 static bool
1146 blt_ring_get_irq(struct intel_ring_buffer *ring)
1147 {
1148         return gen6_ring_get_irq(ring,
1149                                  GT_BLT_USER_INTERRUPT,
1150                                  GEN6_BLITTER_USER_INTERRUPT);
1151 }
1152
1153 static void
1154 blt_ring_put_irq(struct intel_ring_buffer *ring)
1155 {
1156         gen6_ring_put_irq(ring,
1157                           GT_BLT_USER_INTERRUPT,
1158                           GEN6_BLITTER_USER_INTERRUPT);
1159 }
1160
1161
1162 /* Workaround for some stepping of SNB,
1163  * each time when BLT engine ring tail moved,
1164  * the first command in the ring to be parsed
1165  * should be MI_BATCH_BUFFER_START
1166  */
1167 #define NEED_BLT_WORKAROUND(dev) \
1168         (IS_GEN6(dev) && (dev->pdev->revision < 8))
1169
1170 static inline struct drm_i915_gem_object *
1171 to_blt_workaround(struct intel_ring_buffer *ring)
1172 {
1173         return ring->private;
1174 }
1175
1176 static int blt_ring_init(struct intel_ring_buffer *ring)
1177 {
1178         if (NEED_BLT_WORKAROUND(ring->dev)) {
1179                 struct drm_i915_gem_object *obj;
1180                 u32 *ptr;
1181                 int ret;
1182
1183                 obj = i915_gem_alloc_object(ring->dev, 4096);
1184                 if (obj == NULL)
1185                         return -ENOMEM;
1186
1187                 ret = i915_gem_object_pin(obj, 4096, true);
1188                 if (ret) {
1189                         drm_gem_object_unreference(&obj->base);
1190                         return ret;
1191                 }
1192
1193                 ptr = kmap(obj->pages[0]);
1194                 *ptr++ = MI_BATCH_BUFFER_END;
1195                 *ptr++ = MI_NOOP;
1196                 kunmap(obj->pages[0]);
1197
1198                 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1199                 if (ret) {
1200                         i915_gem_object_unpin(obj);
1201                         drm_gem_object_unreference(&obj->base);
1202                         return ret;
1203                 }
1204
1205                 ring->private = obj;
1206         }
1207
1208         return init_ring_common(ring);
1209 }
1210
1211 static int blt_ring_begin(struct intel_ring_buffer *ring,
1212                           int num_dwords)
1213 {
1214         if (ring->private) {
1215                 int ret = intel_ring_begin(ring, num_dwords+2);
1216                 if (ret)
1217                         return ret;
1218
1219                 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1220                 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1221
1222                 return 0;
1223         } else
1224                 return intel_ring_begin(ring, 4);
1225 }
1226
1227 static int blt_ring_flush(struct intel_ring_buffer *ring,
1228                           u32 invalidate, u32 flush)
1229 {
1230         uint32_t cmd;
1231         int ret;
1232
1233         if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
1234                 return 0;
1235
1236         ret = blt_ring_begin(ring, 4);
1237         if (ret)
1238                 return ret;
1239
1240         cmd = MI_FLUSH_DW;
1241         if (invalidate & I915_GEM_DOMAIN_RENDER)
1242                 cmd |= MI_INVALIDATE_TLB;
1243         intel_ring_emit(ring, cmd);
1244         intel_ring_emit(ring, 0);
1245         intel_ring_emit(ring, 0);
1246         intel_ring_emit(ring, MI_NOOP);
1247         intel_ring_advance(ring);
1248         return 0;
1249 }
1250
1251 static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1252 {
1253         if (!ring->private)
1254                 return;
1255
1256         i915_gem_object_unpin(ring->private);
1257         drm_gem_object_unreference(ring->private);
1258         ring->private = NULL;
1259 }
1260
1261 static const struct intel_ring_buffer gen6_blt_ring = {
1262        .name                    = "blt ring",
1263        .id                      = RING_BLT,
1264        .mmio_base               = BLT_RING_BASE,
1265        .size                    = 32 * PAGE_SIZE,
1266        .init                    = blt_ring_init,
1267        .write_tail              = ring_write_tail,
1268        .flush                   = blt_ring_flush,
1269        .add_request             = gen6_add_request,
1270        .get_seqno               = ring_get_seqno,
1271        .irq_get                 = blt_ring_get_irq,
1272        .irq_put                 = blt_ring_put_irq,
1273        .dispatch_execbuffer     = gen6_ring_dispatch_execbuffer,
1274        .cleanup                 = blt_ring_cleanup,
1275 };
1276
1277 int intel_init_render_ring_buffer(struct drm_device *dev)
1278 {
1279         drm_i915_private_t *dev_priv = dev->dev_private;
1280         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1281
1282         *ring = render_ring;
1283         if (INTEL_INFO(dev)->gen >= 6) {
1284                 ring->add_request = gen6_add_request;
1285                 ring->irq_get = gen6_render_ring_get_irq;
1286                 ring->irq_put = gen6_render_ring_put_irq;
1287         } else if (IS_GEN5(dev)) {
1288                 ring->add_request = pc_render_add_request;
1289                 ring->get_seqno = pc_render_get_seqno;
1290         }
1291
1292         if (!I915_NEED_GFX_HWS(dev)) {
1293                 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1294                 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1295         }
1296
1297         return intel_init_ring_buffer(dev, ring);
1298 }
1299
1300 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1301 {
1302         drm_i915_private_t *dev_priv = dev->dev_private;
1303         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1304
1305         *ring = render_ring;
1306         if (INTEL_INFO(dev)->gen >= 6) {
1307                 ring->add_request = gen6_add_request;
1308                 ring->irq_get = gen6_render_ring_get_irq;
1309                 ring->irq_put = gen6_render_ring_put_irq;
1310         } else if (IS_GEN5(dev)) {
1311                 ring->add_request = pc_render_add_request;
1312                 ring->get_seqno = pc_render_get_seqno;
1313         }
1314
1315         ring->dev = dev;
1316         INIT_LIST_HEAD(&ring->active_list);
1317         INIT_LIST_HEAD(&ring->request_list);
1318         INIT_LIST_HEAD(&ring->gpu_write_list);
1319
1320         ring->size = size;
1321         ring->effective_size = ring->size;
1322         if (IS_I830(ring->dev))
1323                 ring->effective_size -= 128;
1324
1325         ring->map.offset = start;
1326         ring->map.size = size;
1327         ring->map.type = 0;
1328         ring->map.flags = 0;
1329         ring->map.mtrr = 0;
1330
1331         drm_core_ioremap_wc(&ring->map, dev);
1332         if (ring->map.handle == NULL) {
1333                 DRM_ERROR("can not ioremap virtual address for"
1334                           " ring buffer\n");
1335                 return -ENOMEM;
1336         }
1337
1338         ring->virtual_start = (void __force __iomem *)ring->map.handle;
1339         return 0;
1340 }
1341
1342 int intel_init_bsd_ring_buffer(struct drm_device *dev)
1343 {
1344         drm_i915_private_t *dev_priv = dev->dev_private;
1345         struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1346
1347         if (IS_GEN6(dev))
1348                 *ring = gen6_bsd_ring;
1349         else
1350                 *ring = bsd_ring;
1351
1352         return intel_init_ring_buffer(dev, ring);
1353 }
1354
1355 int intel_init_blt_ring_buffer(struct drm_device *dev)
1356 {
1357         drm_i915_private_t *dev_priv = dev->dev_private;
1358         struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1359
1360         *ring = gen6_blt_ring;
1361
1362         return intel_init_ring_buffer(dev, ring);
1363 }