Merge branch 'linux-next' of git://git.infradead.org/ubifs-2.6 and git://git.infradea...
[pandora-kernel.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include "drmP.h"
31 #include "drm.h"
32 #include "i915_drv.h"
33 #include "i915_drm.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 static inline int ring_space(struct intel_ring_buffer *ring)
38 {
39         int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
40         if (space < 0)
41                 space += ring->size;
42         return space;
43 }
44
45 static u32 i915_gem_get_seqno(struct drm_device *dev)
46 {
47         drm_i915_private_t *dev_priv = dev->dev_private;
48         u32 seqno;
49
50         seqno = dev_priv->next_seqno;
51
52         /* reserve 0 for non-seqno */
53         if (++dev_priv->next_seqno == 0)
54                 dev_priv->next_seqno = 1;
55
56         return seqno;
57 }
58
59 static int
60 render_ring_flush(struct intel_ring_buffer *ring,
61                   u32   invalidate_domains,
62                   u32   flush_domains)
63 {
64         struct drm_device *dev = ring->dev;
65         u32 cmd;
66         int ret;
67
68         /*
69          * read/write caches:
70          *
71          * I915_GEM_DOMAIN_RENDER is always invalidated, but is
72          * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
73          * also flushed at 2d versus 3d pipeline switches.
74          *
75          * read-only caches:
76          *
77          * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
78          * MI_READ_FLUSH is set, and is always flushed on 965.
79          *
80          * I915_GEM_DOMAIN_COMMAND may not exist?
81          *
82          * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
83          * invalidated when MI_EXE_FLUSH is set.
84          *
85          * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
86          * invalidated with every MI_FLUSH.
87          *
88          * TLBs:
89          *
90          * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
91          * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
92          * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
93          * are flushed at any MI_FLUSH.
94          */
95
96         cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
97         if ((invalidate_domains|flush_domains) &
98             I915_GEM_DOMAIN_RENDER)
99                 cmd &= ~MI_NO_WRITE_FLUSH;
100         if (INTEL_INFO(dev)->gen < 4) {
101                 /*
102                  * On the 965, the sampler cache always gets flushed
103                  * and this bit is reserved.
104                  */
105                 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
106                         cmd |= MI_READ_FLUSH;
107         }
108         if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
109                 cmd |= MI_EXE_FLUSH;
110
111         if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
112             (IS_G4X(dev) || IS_GEN5(dev)))
113                 cmd |= MI_INVALIDATE_ISP;
114
115         ret = intel_ring_begin(ring, 2);
116         if (ret)
117                 return ret;
118
119         intel_ring_emit(ring, cmd);
120         intel_ring_emit(ring, MI_NOOP);
121         intel_ring_advance(ring);
122
123         return 0;
124 }
125
126 static void ring_write_tail(struct intel_ring_buffer *ring,
127                             u32 value)
128 {
129         drm_i915_private_t *dev_priv = ring->dev->dev_private;
130         I915_WRITE_TAIL(ring, value);
131 }
132
133 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
134 {
135         drm_i915_private_t *dev_priv = ring->dev->dev_private;
136         u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
137                         RING_ACTHD(ring->mmio_base) : ACTHD;
138
139         return I915_READ(acthd_reg);
140 }
141
142 static int init_ring_common(struct intel_ring_buffer *ring)
143 {
144         drm_i915_private_t *dev_priv = ring->dev->dev_private;
145         struct drm_i915_gem_object *obj = ring->obj;
146         u32 head;
147
148         /* Stop the ring if it's running. */
149         I915_WRITE_CTL(ring, 0);
150         I915_WRITE_HEAD(ring, 0);
151         ring->write_tail(ring, 0);
152
153         /* Initialize the ring. */
154         I915_WRITE_START(ring, obj->gtt_offset);
155         head = I915_READ_HEAD(ring) & HEAD_ADDR;
156
157         /* G45 ring initialization fails to reset head to zero */
158         if (head != 0) {
159                 DRM_DEBUG_KMS("%s head not reset to zero "
160                               "ctl %08x head %08x tail %08x start %08x\n",
161                               ring->name,
162                               I915_READ_CTL(ring),
163                               I915_READ_HEAD(ring),
164                               I915_READ_TAIL(ring),
165                               I915_READ_START(ring));
166
167                 I915_WRITE_HEAD(ring, 0);
168
169                 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
170                         DRM_ERROR("failed to set %s head to zero "
171                                   "ctl %08x head %08x tail %08x start %08x\n",
172                                   ring->name,
173                                   I915_READ_CTL(ring),
174                                   I915_READ_HEAD(ring),
175                                   I915_READ_TAIL(ring),
176                                   I915_READ_START(ring));
177                 }
178         }
179
180         I915_WRITE_CTL(ring,
181                         ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
182                         | RING_REPORT_64K | RING_VALID);
183
184         /* If the head is still not zero, the ring is dead */
185         if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
186             I915_READ_START(ring) != obj->gtt_offset ||
187             (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
188                 DRM_ERROR("%s initialization failed "
189                                 "ctl %08x head %08x tail %08x start %08x\n",
190                                 ring->name,
191                                 I915_READ_CTL(ring),
192                                 I915_READ_HEAD(ring),
193                                 I915_READ_TAIL(ring),
194                                 I915_READ_START(ring));
195                 return -EIO;
196         }
197
198         if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
199                 i915_kernel_lost_context(ring->dev);
200         else {
201                 ring->head = I915_READ_HEAD(ring);
202                 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
203                 ring->space = ring_space(ring);
204         }
205
206         return 0;
207 }
208
209 /*
210  * 965+ support PIPE_CONTROL commands, which provide finer grained control
211  * over cache flushing.
212  */
213 struct pipe_control {
214         struct drm_i915_gem_object *obj;
215         volatile u32 *cpu_page;
216         u32 gtt_offset;
217 };
218
219 static int
220 init_pipe_control(struct intel_ring_buffer *ring)
221 {
222         struct pipe_control *pc;
223         struct drm_i915_gem_object *obj;
224         int ret;
225
226         if (ring->private)
227                 return 0;
228
229         pc = kmalloc(sizeof(*pc), GFP_KERNEL);
230         if (!pc)
231                 return -ENOMEM;
232
233         obj = i915_gem_alloc_object(ring->dev, 4096);
234         if (obj == NULL) {
235                 DRM_ERROR("Failed to allocate seqno page\n");
236                 ret = -ENOMEM;
237                 goto err;
238         }
239
240         i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
241
242         ret = i915_gem_object_pin(obj, 4096, true);
243         if (ret)
244                 goto err_unref;
245
246         pc->gtt_offset = obj->gtt_offset;
247         pc->cpu_page =  kmap(obj->pages[0]);
248         if (pc->cpu_page == NULL)
249                 goto err_unpin;
250
251         pc->obj = obj;
252         ring->private = pc;
253         return 0;
254
255 err_unpin:
256         i915_gem_object_unpin(obj);
257 err_unref:
258         drm_gem_object_unreference(&obj->base);
259 err:
260         kfree(pc);
261         return ret;
262 }
263
264 static void
265 cleanup_pipe_control(struct intel_ring_buffer *ring)
266 {
267         struct pipe_control *pc = ring->private;
268         struct drm_i915_gem_object *obj;
269
270         if (!ring->private)
271                 return;
272
273         obj = pc->obj;
274         kunmap(obj->pages[0]);
275         i915_gem_object_unpin(obj);
276         drm_gem_object_unreference(&obj->base);
277
278         kfree(pc);
279         ring->private = NULL;
280 }
281
282 static int init_render_ring(struct intel_ring_buffer *ring)
283 {
284         struct drm_device *dev = ring->dev;
285         struct drm_i915_private *dev_priv = dev->dev_private;
286         int ret = init_ring_common(ring);
287
288         if (INTEL_INFO(dev)->gen > 3) {
289                 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
290                 if (IS_GEN6(dev) || IS_GEN7(dev))
291                         mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
292                 I915_WRITE(MI_MODE, mode);
293                 if (IS_GEN7(dev))
294                         I915_WRITE(GFX_MODE_GEN7,
295                                    GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
296                                    GFX_MODE_ENABLE(GFX_REPLAY_MODE));
297         }
298
299         if (INTEL_INFO(dev)->gen >= 6) {
300         } else if (IS_GEN5(dev)) {
301                 ret = init_pipe_control(ring);
302                 if (ret)
303                         return ret;
304         }
305
306         return ret;
307 }
308
309 static void render_ring_cleanup(struct intel_ring_buffer *ring)
310 {
311         if (!ring->private)
312                 return;
313
314         cleanup_pipe_control(ring);
315 }
316
317 static void
318 update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
319 {
320         struct drm_device *dev = ring->dev;
321         struct drm_i915_private *dev_priv = dev->dev_private;
322         int id;
323
324         /*
325          * cs -> 1 = vcs, 0 = bcs
326          * vcs -> 1 = bcs, 0 = cs,
327          * bcs -> 1 = cs, 0 = vcs.
328          */
329         id = ring - dev_priv->ring;
330         id += 2 - i;
331         id %= 3;
332
333         intel_ring_emit(ring,
334                         MI_SEMAPHORE_MBOX |
335                         MI_SEMAPHORE_REGISTER |
336                         MI_SEMAPHORE_UPDATE);
337         intel_ring_emit(ring, seqno);
338         intel_ring_emit(ring,
339                         RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
340 }
341
342 static int
343 gen6_add_request(struct intel_ring_buffer *ring,
344                  u32 *result)
345 {
346         u32 seqno;
347         int ret;
348
349         ret = intel_ring_begin(ring, 10);
350         if (ret)
351                 return ret;
352
353         seqno = i915_gem_get_seqno(ring->dev);
354         update_semaphore(ring, 0, seqno);
355         update_semaphore(ring, 1, seqno);
356
357         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
358         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
359         intel_ring_emit(ring, seqno);
360         intel_ring_emit(ring, MI_USER_INTERRUPT);
361         intel_ring_advance(ring);
362
363         *result = seqno;
364         return 0;
365 }
366
367 int
368 intel_ring_sync(struct intel_ring_buffer *ring,
369                 struct intel_ring_buffer *to,
370                 u32 seqno)
371 {
372         int ret;
373
374         ret = intel_ring_begin(ring, 4);
375         if (ret)
376                 return ret;
377
378         intel_ring_emit(ring,
379                         MI_SEMAPHORE_MBOX |
380                         MI_SEMAPHORE_REGISTER |
381                         intel_ring_sync_index(ring, to) << 17 |
382                         MI_SEMAPHORE_COMPARE);
383         intel_ring_emit(ring, seqno);
384         intel_ring_emit(ring, 0);
385         intel_ring_emit(ring, MI_NOOP);
386         intel_ring_advance(ring);
387
388         return 0;
389 }
390
391 #define PIPE_CONTROL_FLUSH(ring__, addr__)                                      \
392 do {                                                                    \
393         intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |           \
394                  PIPE_CONTROL_DEPTH_STALL | 2);                         \
395         intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
396         intel_ring_emit(ring__, 0);                                                     \
397         intel_ring_emit(ring__, 0);                                                     \
398 } while (0)
399
400 static int
401 pc_render_add_request(struct intel_ring_buffer *ring,
402                       u32 *result)
403 {
404         struct drm_device *dev = ring->dev;
405         u32 seqno = i915_gem_get_seqno(dev);
406         struct pipe_control *pc = ring->private;
407         u32 scratch_addr = pc->gtt_offset + 128;
408         int ret;
409
410         /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
411          * incoherent with writes to memory, i.e. completely fubar,
412          * so we need to use PIPE_NOTIFY instead.
413          *
414          * However, we also need to workaround the qword write
415          * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
416          * memory before requesting an interrupt.
417          */
418         ret = intel_ring_begin(ring, 32);
419         if (ret)
420                 return ret;
421
422         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
423                         PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
424         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
425         intel_ring_emit(ring, seqno);
426         intel_ring_emit(ring, 0);
427         PIPE_CONTROL_FLUSH(ring, scratch_addr);
428         scratch_addr += 128; /* write to separate cachelines */
429         PIPE_CONTROL_FLUSH(ring, scratch_addr);
430         scratch_addr += 128;
431         PIPE_CONTROL_FLUSH(ring, scratch_addr);
432         scratch_addr += 128;
433         PIPE_CONTROL_FLUSH(ring, scratch_addr);
434         scratch_addr += 128;
435         PIPE_CONTROL_FLUSH(ring, scratch_addr);
436         scratch_addr += 128;
437         PIPE_CONTROL_FLUSH(ring, scratch_addr);
438         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
439                         PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
440                         PIPE_CONTROL_NOTIFY);
441         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
442         intel_ring_emit(ring, seqno);
443         intel_ring_emit(ring, 0);
444         intel_ring_advance(ring);
445
446         *result = seqno;
447         return 0;
448 }
449
450 static int
451 render_ring_add_request(struct intel_ring_buffer *ring,
452                         u32 *result)
453 {
454         struct drm_device *dev = ring->dev;
455         u32 seqno = i915_gem_get_seqno(dev);
456         int ret;
457
458         ret = intel_ring_begin(ring, 4);
459         if (ret)
460                 return ret;
461
462         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
463         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
464         intel_ring_emit(ring, seqno);
465         intel_ring_emit(ring, MI_USER_INTERRUPT);
466         intel_ring_advance(ring);
467
468         *result = seqno;
469         return 0;
470 }
471
472 static u32
473 ring_get_seqno(struct intel_ring_buffer *ring)
474 {
475         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
476 }
477
478 static u32
479 pc_render_get_seqno(struct intel_ring_buffer *ring)
480 {
481         struct pipe_control *pc = ring->private;
482         return pc->cpu_page[0];
483 }
484
485 static void
486 ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
487 {
488         dev_priv->gt_irq_mask &= ~mask;
489         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
490         POSTING_READ(GTIMR);
491 }
492
493 static void
494 ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
495 {
496         dev_priv->gt_irq_mask |= mask;
497         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
498         POSTING_READ(GTIMR);
499 }
500
501 static void
502 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
503 {
504         dev_priv->irq_mask &= ~mask;
505         I915_WRITE(IMR, dev_priv->irq_mask);
506         POSTING_READ(IMR);
507 }
508
509 static void
510 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
511 {
512         dev_priv->irq_mask |= mask;
513         I915_WRITE(IMR, dev_priv->irq_mask);
514         POSTING_READ(IMR);
515 }
516
517 static bool
518 render_ring_get_irq(struct intel_ring_buffer *ring)
519 {
520         struct drm_device *dev = ring->dev;
521         drm_i915_private_t *dev_priv = dev->dev_private;
522
523         if (!dev->irq_enabled)
524                 return false;
525
526         spin_lock(&ring->irq_lock);
527         if (ring->irq_refcount++ == 0) {
528                 if (HAS_PCH_SPLIT(dev))
529                         ironlake_enable_irq(dev_priv,
530                                             GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
531                 else
532                         i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
533         }
534         spin_unlock(&ring->irq_lock);
535
536         return true;
537 }
538
539 static void
540 render_ring_put_irq(struct intel_ring_buffer *ring)
541 {
542         struct drm_device *dev = ring->dev;
543         drm_i915_private_t *dev_priv = dev->dev_private;
544
545         spin_lock(&ring->irq_lock);
546         if (--ring->irq_refcount == 0) {
547                 if (HAS_PCH_SPLIT(dev))
548                         ironlake_disable_irq(dev_priv,
549                                              GT_USER_INTERRUPT |
550                                              GT_PIPE_NOTIFY);
551                 else
552                         i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
553         }
554         spin_unlock(&ring->irq_lock);
555 }
556
557 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
558 {
559         struct drm_device *dev = ring->dev;
560         drm_i915_private_t *dev_priv = ring->dev->dev_private;
561         u32 mmio = 0;
562
563         /* The ring status page addresses are no longer next to the rest of
564          * the ring registers as of gen7.
565          */
566         if (IS_GEN7(dev)) {
567                 switch (ring->id) {
568                 case RING_RENDER:
569                         mmio = RENDER_HWS_PGA_GEN7;
570                         break;
571                 case RING_BLT:
572                         mmio = BLT_HWS_PGA_GEN7;
573                         break;
574                 case RING_BSD:
575                         mmio = BSD_HWS_PGA_GEN7;
576                         break;
577                 }
578         } else if (IS_GEN6(ring->dev)) {
579                 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
580         } else {
581                 mmio = RING_HWS_PGA(ring->mmio_base);
582         }
583
584         I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
585         POSTING_READ(mmio);
586 }
587
588 static int
589 bsd_ring_flush(struct intel_ring_buffer *ring,
590                u32     invalidate_domains,
591                u32     flush_domains)
592 {
593         int ret;
594
595         ret = intel_ring_begin(ring, 2);
596         if (ret)
597                 return ret;
598
599         intel_ring_emit(ring, MI_FLUSH);
600         intel_ring_emit(ring, MI_NOOP);
601         intel_ring_advance(ring);
602         return 0;
603 }
604
605 static int
606 ring_add_request(struct intel_ring_buffer *ring,
607                  u32 *result)
608 {
609         u32 seqno;
610         int ret;
611
612         ret = intel_ring_begin(ring, 4);
613         if (ret)
614                 return ret;
615
616         seqno = i915_gem_get_seqno(ring->dev);
617
618         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
619         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
620         intel_ring_emit(ring, seqno);
621         intel_ring_emit(ring, MI_USER_INTERRUPT);
622         intel_ring_advance(ring);
623
624         *result = seqno;
625         return 0;
626 }
627
628 static bool
629 gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
630 {
631         struct drm_device *dev = ring->dev;
632         drm_i915_private_t *dev_priv = dev->dev_private;
633
634         if (!dev->irq_enabled)
635                return false;
636
637         spin_lock(&ring->irq_lock);
638         if (ring->irq_refcount++ == 0) {
639                 ring->irq_mask &= ~rflag;
640                 I915_WRITE_IMR(ring, ring->irq_mask);
641                 ironlake_enable_irq(dev_priv, gflag);
642         }
643         spin_unlock(&ring->irq_lock);
644
645         return true;
646 }
647
648 static void
649 gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
650 {
651         struct drm_device *dev = ring->dev;
652         drm_i915_private_t *dev_priv = dev->dev_private;
653
654         spin_lock(&ring->irq_lock);
655         if (--ring->irq_refcount == 0) {
656                 ring->irq_mask |= rflag;
657                 I915_WRITE_IMR(ring, ring->irq_mask);
658                 ironlake_disable_irq(dev_priv, gflag);
659         }
660         spin_unlock(&ring->irq_lock);
661 }
662
663 static bool
664 bsd_ring_get_irq(struct intel_ring_buffer *ring)
665 {
666         struct drm_device *dev = ring->dev;
667         drm_i915_private_t *dev_priv = dev->dev_private;
668
669         if (!dev->irq_enabled)
670                 return false;
671
672         spin_lock(&ring->irq_lock);
673         if (ring->irq_refcount++ == 0) {
674                 if (IS_G4X(dev))
675                         i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
676                 else
677                         ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
678         }
679         spin_unlock(&ring->irq_lock);
680
681         return true;
682 }
683 static void
684 bsd_ring_put_irq(struct intel_ring_buffer *ring)
685 {
686         struct drm_device *dev = ring->dev;
687         drm_i915_private_t *dev_priv = dev->dev_private;
688
689         spin_lock(&ring->irq_lock);
690         if (--ring->irq_refcount == 0) {
691                 if (IS_G4X(dev))
692                         i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
693                 else
694                         ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
695         }
696         spin_unlock(&ring->irq_lock);
697 }
698
699 static int
700 ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
701 {
702         int ret;
703
704         ret = intel_ring_begin(ring, 2);
705         if (ret)
706                 return ret;
707
708         intel_ring_emit(ring,
709                         MI_BATCH_BUFFER_START | (2 << 6) |
710                         MI_BATCH_NON_SECURE_I965);
711         intel_ring_emit(ring, offset);
712         intel_ring_advance(ring);
713
714         return 0;
715 }
716
717 static int
718 render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
719                                 u32 offset, u32 len)
720 {
721         struct drm_device *dev = ring->dev;
722         int ret;
723
724         if (IS_I830(dev) || IS_845G(dev)) {
725                 ret = intel_ring_begin(ring, 4);
726                 if (ret)
727                         return ret;
728
729                 intel_ring_emit(ring, MI_BATCH_BUFFER);
730                 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
731                 intel_ring_emit(ring, offset + len - 8);
732                 intel_ring_emit(ring, 0);
733         } else {
734                 ret = intel_ring_begin(ring, 2);
735                 if (ret)
736                         return ret;
737
738                 if (INTEL_INFO(dev)->gen >= 4) {
739                         intel_ring_emit(ring,
740                                         MI_BATCH_BUFFER_START | (2 << 6) |
741                                         MI_BATCH_NON_SECURE_I965);
742                         intel_ring_emit(ring, offset);
743                 } else {
744                         intel_ring_emit(ring,
745                                         MI_BATCH_BUFFER_START | (2 << 6));
746                         intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
747                 }
748         }
749         intel_ring_advance(ring);
750
751         return 0;
752 }
753
754 static void cleanup_status_page(struct intel_ring_buffer *ring)
755 {
756         drm_i915_private_t *dev_priv = ring->dev->dev_private;
757         struct drm_i915_gem_object *obj;
758
759         obj = ring->status_page.obj;
760         if (obj == NULL)
761                 return;
762
763         kunmap(obj->pages[0]);
764         i915_gem_object_unpin(obj);
765         drm_gem_object_unreference(&obj->base);
766         ring->status_page.obj = NULL;
767
768         memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
769 }
770
771 static int init_status_page(struct intel_ring_buffer *ring)
772 {
773         struct drm_device *dev = ring->dev;
774         drm_i915_private_t *dev_priv = dev->dev_private;
775         struct drm_i915_gem_object *obj;
776         int ret;
777
778         obj = i915_gem_alloc_object(dev, 4096);
779         if (obj == NULL) {
780                 DRM_ERROR("Failed to allocate status page\n");
781                 ret = -ENOMEM;
782                 goto err;
783         }
784
785         i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
786
787         ret = i915_gem_object_pin(obj, 4096, true);
788         if (ret != 0) {
789                 goto err_unref;
790         }
791
792         ring->status_page.gfx_addr = obj->gtt_offset;
793         ring->status_page.page_addr = kmap(obj->pages[0]);
794         if (ring->status_page.page_addr == NULL) {
795                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
796                 goto err_unpin;
797         }
798         ring->status_page.obj = obj;
799         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
800
801         intel_ring_setup_status_page(ring);
802         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
803                         ring->name, ring->status_page.gfx_addr);
804
805         return 0;
806
807 err_unpin:
808         i915_gem_object_unpin(obj);
809 err_unref:
810         drm_gem_object_unreference(&obj->base);
811 err:
812         return ret;
813 }
814
815 int intel_init_ring_buffer(struct drm_device *dev,
816                            struct intel_ring_buffer *ring)
817 {
818         struct drm_i915_gem_object *obj;
819         int ret;
820
821         ring->dev = dev;
822         INIT_LIST_HEAD(&ring->active_list);
823         INIT_LIST_HEAD(&ring->request_list);
824         INIT_LIST_HEAD(&ring->gpu_write_list);
825
826         init_waitqueue_head(&ring->irq_queue);
827         spin_lock_init(&ring->irq_lock);
828         ring->irq_mask = ~0;
829
830         if (I915_NEED_GFX_HWS(dev)) {
831                 ret = init_status_page(ring);
832                 if (ret)
833                         return ret;
834         }
835
836         obj = i915_gem_alloc_object(dev, ring->size);
837         if (obj == NULL) {
838                 DRM_ERROR("Failed to allocate ringbuffer\n");
839                 ret = -ENOMEM;
840                 goto err_hws;
841         }
842
843         ring->obj = obj;
844
845         ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
846         if (ret)
847                 goto err_unref;
848
849         ring->map.size = ring->size;
850         ring->map.offset = dev->agp->base + obj->gtt_offset;
851         ring->map.type = 0;
852         ring->map.flags = 0;
853         ring->map.mtrr = 0;
854
855         drm_core_ioremap_wc(&ring->map, dev);
856         if (ring->map.handle == NULL) {
857                 DRM_ERROR("Failed to map ringbuffer.\n");
858                 ret = -EINVAL;
859                 goto err_unpin;
860         }
861
862         ring->virtual_start = ring->map.handle;
863         ret = ring->init(ring);
864         if (ret)
865                 goto err_unmap;
866
867         /* Workaround an erratum on the i830 which causes a hang if
868          * the TAIL pointer points to within the last 2 cachelines
869          * of the buffer.
870          */
871         ring->effective_size = ring->size;
872         if (IS_I830(ring->dev))
873                 ring->effective_size -= 128;
874
875         return 0;
876
877 err_unmap:
878         drm_core_ioremapfree(&ring->map, dev);
879 err_unpin:
880         i915_gem_object_unpin(obj);
881 err_unref:
882         drm_gem_object_unreference(&obj->base);
883         ring->obj = NULL;
884 err_hws:
885         cleanup_status_page(ring);
886         return ret;
887 }
888
889 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
890 {
891         struct drm_i915_private *dev_priv;
892         int ret;
893
894         if (ring->obj == NULL)
895                 return;
896
897         /* Disable the ring buffer. The ring must be idle at this point */
898         dev_priv = ring->dev->dev_private;
899         ret = intel_wait_ring_idle(ring);
900         if (ret)
901                 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
902                           ring->name, ret);
903
904         I915_WRITE_CTL(ring, 0);
905
906         drm_core_ioremapfree(&ring->map, ring->dev);
907
908         i915_gem_object_unpin(ring->obj);
909         drm_gem_object_unreference(&ring->obj->base);
910         ring->obj = NULL;
911
912         if (ring->cleanup)
913                 ring->cleanup(ring);
914
915         cleanup_status_page(ring);
916 }
917
918 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
919 {
920         unsigned int *virt;
921         int rem = ring->size - ring->tail;
922
923         if (ring->space < rem) {
924                 int ret = intel_wait_ring_buffer(ring, rem);
925                 if (ret)
926                         return ret;
927         }
928
929         virt = (unsigned int *)(ring->virtual_start + ring->tail);
930         rem /= 8;
931         while (rem--) {
932                 *virt++ = MI_NOOP;
933                 *virt++ = MI_NOOP;
934         }
935
936         ring->tail = 0;
937         ring->space = ring_space(ring);
938
939         return 0;
940 }
941
942 int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
943 {
944         struct drm_device *dev = ring->dev;
945         struct drm_i915_private *dev_priv = dev->dev_private;
946         unsigned long end;
947         u32 head;
948
949         /* If the reported head position has wrapped or hasn't advanced,
950          * fallback to the slow and accurate path.
951          */
952         head = intel_read_status_page(ring, 4);
953         if (head > ring->head) {
954                 ring->head = head;
955                 ring->space = ring_space(ring);
956                 if (ring->space >= n)
957                         return 0;
958         }
959
960         trace_i915_ring_wait_begin(ring);
961         end = jiffies + 3 * HZ;
962         do {
963                 ring->head = I915_READ_HEAD(ring);
964                 ring->space = ring_space(ring);
965                 if (ring->space >= n) {
966                         trace_i915_ring_wait_end(ring);
967                         return 0;
968                 }
969
970                 if (dev->primary->master) {
971                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
972                         if (master_priv->sarea_priv)
973                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
974                 }
975
976                 msleep(1);
977                 if (atomic_read(&dev_priv->mm.wedged))
978                         return -EAGAIN;
979         } while (!time_after(jiffies, end));
980         trace_i915_ring_wait_end(ring);
981         return -EBUSY;
982 }
983
984 int intel_ring_begin(struct intel_ring_buffer *ring,
985                      int num_dwords)
986 {
987         struct drm_i915_private *dev_priv = ring->dev->dev_private;
988         int n = 4*num_dwords;
989         int ret;
990
991         if (unlikely(atomic_read(&dev_priv->mm.wedged)))
992                 return -EIO;
993
994         if (unlikely(ring->tail + n > ring->effective_size)) {
995                 ret = intel_wrap_ring_buffer(ring);
996                 if (unlikely(ret))
997                         return ret;
998         }
999
1000         if (unlikely(ring->space < n)) {
1001                 ret = intel_wait_ring_buffer(ring, n);
1002                 if (unlikely(ret))
1003                         return ret;
1004         }
1005
1006         ring->space -= n;
1007         return 0;
1008 }
1009
1010 void intel_ring_advance(struct intel_ring_buffer *ring)
1011 {
1012         ring->tail &= ring->size - 1;
1013         ring->write_tail(ring, ring->tail);
1014 }
1015
1016 static const struct intel_ring_buffer render_ring = {
1017         .name                   = "render ring",
1018         .id                     = RING_RENDER,
1019         .mmio_base              = RENDER_RING_BASE,
1020         .size                   = 32 * PAGE_SIZE,
1021         .init                   = init_render_ring,
1022         .write_tail             = ring_write_tail,
1023         .flush                  = render_ring_flush,
1024         .add_request            = render_ring_add_request,
1025         .get_seqno              = ring_get_seqno,
1026         .irq_get                = render_ring_get_irq,
1027         .irq_put                = render_ring_put_irq,
1028         .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
1029        .cleanup                 = render_ring_cleanup,
1030 };
1031
1032 /* ring buffer for bit-stream decoder */
1033
1034 static const struct intel_ring_buffer bsd_ring = {
1035         .name                   = "bsd ring",
1036         .id                     = RING_BSD,
1037         .mmio_base              = BSD_RING_BASE,
1038         .size                   = 32 * PAGE_SIZE,
1039         .init                   = init_ring_common,
1040         .write_tail             = ring_write_tail,
1041         .flush                  = bsd_ring_flush,
1042         .add_request            = ring_add_request,
1043         .get_seqno              = ring_get_seqno,
1044         .irq_get                = bsd_ring_get_irq,
1045         .irq_put                = bsd_ring_put_irq,
1046         .dispatch_execbuffer    = ring_dispatch_execbuffer,
1047 };
1048
1049
1050 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1051                                      u32 value)
1052 {
1053        drm_i915_private_t *dev_priv = ring->dev->dev_private;
1054
1055        /* Every tail move must follow the sequence below */
1056        I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1057                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1058                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1059        I915_WRITE(GEN6_BSD_RNCID, 0x0);
1060
1061        if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1062                                GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1063                        50))
1064                DRM_ERROR("timed out waiting for IDLE Indicator\n");
1065
1066        I915_WRITE_TAIL(ring, value);
1067        I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1068                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1069                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1070 }
1071
1072 static int gen6_ring_flush(struct intel_ring_buffer *ring,
1073                            u32 invalidate, u32 flush)
1074 {
1075         uint32_t cmd;
1076         int ret;
1077
1078         ret = intel_ring_begin(ring, 4);
1079         if (ret)
1080                 return ret;
1081
1082         cmd = MI_FLUSH_DW;
1083         if (invalidate & I915_GEM_GPU_DOMAINS)
1084                 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1085         intel_ring_emit(ring, cmd);
1086         intel_ring_emit(ring, 0);
1087         intel_ring_emit(ring, 0);
1088         intel_ring_emit(ring, MI_NOOP);
1089         intel_ring_advance(ring);
1090         return 0;
1091 }
1092
1093 static int
1094 gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1095                               u32 offset, u32 len)
1096 {
1097        int ret;
1098
1099        ret = intel_ring_begin(ring, 2);
1100        if (ret)
1101                return ret;
1102
1103        intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1104        /* bit0-7 is the length on GEN6+ */
1105        intel_ring_emit(ring, offset);
1106        intel_ring_advance(ring);
1107
1108        return 0;
1109 }
1110
1111 static bool
1112 gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1113 {
1114         return gen6_ring_get_irq(ring,
1115                                  GT_USER_INTERRUPT,
1116                                  GEN6_RENDER_USER_INTERRUPT);
1117 }
1118
1119 static void
1120 gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1121 {
1122         return gen6_ring_put_irq(ring,
1123                                  GT_USER_INTERRUPT,
1124                                  GEN6_RENDER_USER_INTERRUPT);
1125 }
1126
1127 static bool
1128 gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1129 {
1130         return gen6_ring_get_irq(ring,
1131                                  GT_GEN6_BSD_USER_INTERRUPT,
1132                                  GEN6_BSD_USER_INTERRUPT);
1133 }
1134
1135 static void
1136 gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1137 {
1138         return gen6_ring_put_irq(ring,
1139                                  GT_GEN6_BSD_USER_INTERRUPT,
1140                                  GEN6_BSD_USER_INTERRUPT);
1141 }
1142
1143 /* ring buffer for Video Codec for Gen6+ */
1144 static const struct intel_ring_buffer gen6_bsd_ring = {
1145         .name                   = "gen6 bsd ring",
1146         .id                     = RING_BSD,
1147         .mmio_base              = GEN6_BSD_RING_BASE,
1148         .size                   = 32 * PAGE_SIZE,
1149         .init                   = init_ring_common,
1150         .write_tail             = gen6_bsd_ring_write_tail,
1151         .flush                  = gen6_ring_flush,
1152         .add_request            = gen6_add_request,
1153         .get_seqno              = ring_get_seqno,
1154         .irq_get                = gen6_bsd_ring_get_irq,
1155         .irq_put                = gen6_bsd_ring_put_irq,
1156         .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1157 };
1158
1159 /* Blitter support (SandyBridge+) */
1160
1161 static bool
1162 blt_ring_get_irq(struct intel_ring_buffer *ring)
1163 {
1164         return gen6_ring_get_irq(ring,
1165                                  GT_BLT_USER_INTERRUPT,
1166                                  GEN6_BLITTER_USER_INTERRUPT);
1167 }
1168
1169 static void
1170 blt_ring_put_irq(struct intel_ring_buffer *ring)
1171 {
1172         gen6_ring_put_irq(ring,
1173                           GT_BLT_USER_INTERRUPT,
1174                           GEN6_BLITTER_USER_INTERRUPT);
1175 }
1176
1177
1178 /* Workaround for some stepping of SNB,
1179  * each time when BLT engine ring tail moved,
1180  * the first command in the ring to be parsed
1181  * should be MI_BATCH_BUFFER_START
1182  */
1183 #define NEED_BLT_WORKAROUND(dev) \
1184         (IS_GEN6(dev) && (dev->pdev->revision < 8))
1185
1186 static inline struct drm_i915_gem_object *
1187 to_blt_workaround(struct intel_ring_buffer *ring)
1188 {
1189         return ring->private;
1190 }
1191
1192 static int blt_ring_init(struct intel_ring_buffer *ring)
1193 {
1194         if (NEED_BLT_WORKAROUND(ring->dev)) {
1195                 struct drm_i915_gem_object *obj;
1196                 u32 *ptr;
1197                 int ret;
1198
1199                 obj = i915_gem_alloc_object(ring->dev, 4096);
1200                 if (obj == NULL)
1201                         return -ENOMEM;
1202
1203                 ret = i915_gem_object_pin(obj, 4096, true);
1204                 if (ret) {
1205                         drm_gem_object_unreference(&obj->base);
1206                         return ret;
1207                 }
1208
1209                 ptr = kmap(obj->pages[0]);
1210                 *ptr++ = MI_BATCH_BUFFER_END;
1211                 *ptr++ = MI_NOOP;
1212                 kunmap(obj->pages[0]);
1213
1214                 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1215                 if (ret) {
1216                         i915_gem_object_unpin(obj);
1217                         drm_gem_object_unreference(&obj->base);
1218                         return ret;
1219                 }
1220
1221                 ring->private = obj;
1222         }
1223
1224         return init_ring_common(ring);
1225 }
1226
1227 static int blt_ring_begin(struct intel_ring_buffer *ring,
1228                           int num_dwords)
1229 {
1230         if (ring->private) {
1231                 int ret = intel_ring_begin(ring, num_dwords+2);
1232                 if (ret)
1233                         return ret;
1234
1235                 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1236                 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1237
1238                 return 0;
1239         } else
1240                 return intel_ring_begin(ring, 4);
1241 }
1242
1243 static int blt_ring_flush(struct intel_ring_buffer *ring,
1244                           u32 invalidate, u32 flush)
1245 {
1246         uint32_t cmd;
1247         int ret;
1248
1249         ret = blt_ring_begin(ring, 4);
1250         if (ret)
1251                 return ret;
1252
1253         cmd = MI_FLUSH_DW;
1254         if (invalidate & I915_GEM_DOMAIN_RENDER)
1255                 cmd |= MI_INVALIDATE_TLB;
1256         intel_ring_emit(ring, cmd);
1257         intel_ring_emit(ring, 0);
1258         intel_ring_emit(ring, 0);
1259         intel_ring_emit(ring, MI_NOOP);
1260         intel_ring_advance(ring);
1261         return 0;
1262 }
1263
1264 static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1265 {
1266         if (!ring->private)
1267                 return;
1268
1269         i915_gem_object_unpin(ring->private);
1270         drm_gem_object_unreference(ring->private);
1271         ring->private = NULL;
1272 }
1273
1274 static const struct intel_ring_buffer gen6_blt_ring = {
1275        .name                    = "blt ring",
1276        .id                      = RING_BLT,
1277        .mmio_base               = BLT_RING_BASE,
1278        .size                    = 32 * PAGE_SIZE,
1279        .init                    = blt_ring_init,
1280        .write_tail              = ring_write_tail,
1281        .flush                   = blt_ring_flush,
1282        .add_request             = gen6_add_request,
1283        .get_seqno               = ring_get_seqno,
1284        .irq_get                 = blt_ring_get_irq,
1285        .irq_put                 = blt_ring_put_irq,
1286        .dispatch_execbuffer     = gen6_ring_dispatch_execbuffer,
1287        .cleanup                 = blt_ring_cleanup,
1288 };
1289
1290 int intel_init_render_ring_buffer(struct drm_device *dev)
1291 {
1292         drm_i915_private_t *dev_priv = dev->dev_private;
1293         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1294
1295         *ring = render_ring;
1296         if (INTEL_INFO(dev)->gen >= 6) {
1297                 ring->add_request = gen6_add_request;
1298                 ring->irq_get = gen6_render_ring_get_irq;
1299                 ring->irq_put = gen6_render_ring_put_irq;
1300         } else if (IS_GEN5(dev)) {
1301                 ring->add_request = pc_render_add_request;
1302                 ring->get_seqno = pc_render_get_seqno;
1303         }
1304
1305         if (!I915_NEED_GFX_HWS(dev)) {
1306                 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1307                 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1308         }
1309
1310         return intel_init_ring_buffer(dev, ring);
1311 }
1312
1313 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1314 {
1315         drm_i915_private_t *dev_priv = dev->dev_private;
1316         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1317
1318         *ring = render_ring;
1319         if (INTEL_INFO(dev)->gen >= 6) {
1320                 ring->add_request = gen6_add_request;
1321                 ring->irq_get = gen6_render_ring_get_irq;
1322                 ring->irq_put = gen6_render_ring_put_irq;
1323         } else if (IS_GEN5(dev)) {
1324                 ring->add_request = pc_render_add_request;
1325                 ring->get_seqno = pc_render_get_seqno;
1326         }
1327
1328         if (!I915_NEED_GFX_HWS(dev))
1329                 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1330
1331         ring->dev = dev;
1332         INIT_LIST_HEAD(&ring->active_list);
1333         INIT_LIST_HEAD(&ring->request_list);
1334         INIT_LIST_HEAD(&ring->gpu_write_list);
1335
1336         ring->size = size;
1337         ring->effective_size = ring->size;
1338         if (IS_I830(ring->dev))
1339                 ring->effective_size -= 128;
1340
1341         ring->map.offset = start;
1342         ring->map.size = size;
1343         ring->map.type = 0;
1344         ring->map.flags = 0;
1345         ring->map.mtrr = 0;
1346
1347         drm_core_ioremap_wc(&ring->map, dev);
1348         if (ring->map.handle == NULL) {
1349                 DRM_ERROR("can not ioremap virtual address for"
1350                           " ring buffer\n");
1351                 return -ENOMEM;
1352         }
1353
1354         ring->virtual_start = (void __force __iomem *)ring->map.handle;
1355         return 0;
1356 }
1357
1358 int intel_init_bsd_ring_buffer(struct drm_device *dev)
1359 {
1360         drm_i915_private_t *dev_priv = dev->dev_private;
1361         struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1362
1363         if (IS_GEN6(dev) || IS_GEN7(dev))
1364                 *ring = gen6_bsd_ring;
1365         else
1366                 *ring = bsd_ring;
1367
1368         return intel_init_ring_buffer(dev, ring);
1369 }
1370
1371 int intel_init_blt_ring_buffer(struct drm_device *dev)
1372 {
1373         drm_i915_private_t *dev_priv = dev->dev_private;
1374         struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1375
1376         *ring = gen6_blt_ring;
1377
1378         return intel_init_ring_buffer(dev, ring);
1379 }