Merge branch 'stable-3.2' into pandora-3.2
[pandora-kernel.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include "drmP.h"
31 #include "drm.h"
32 #include "i915_drv.h"
33 #include "i915_drm.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 /*
38  * 965+ support PIPE_CONTROL commands, which provide finer grained control
39  * over cache flushing.
40  */
41 struct pipe_control {
42         struct drm_i915_gem_object *obj;
43         volatile u32 *cpu_page;
44         u32 gtt_offset;
45 };
46
47 static inline int ring_space(struct intel_ring_buffer *ring)
48 {
49         int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
50         if (space < 0)
51                 space += ring->size;
52         return space;
53 }
54
55 static int
56 render_ring_flush(struct intel_ring_buffer *ring,
57                   u32   invalidate_domains,
58                   u32   flush_domains)
59 {
60         struct drm_device *dev = ring->dev;
61         u32 cmd;
62         int ret;
63
64         /*
65          * read/write caches:
66          *
67          * I915_GEM_DOMAIN_RENDER is always invalidated, but is
68          * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
69          * also flushed at 2d versus 3d pipeline switches.
70          *
71          * read-only caches:
72          *
73          * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
74          * MI_READ_FLUSH is set, and is always flushed on 965.
75          *
76          * I915_GEM_DOMAIN_COMMAND may not exist?
77          *
78          * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
79          * invalidated when MI_EXE_FLUSH is set.
80          *
81          * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
82          * invalidated with every MI_FLUSH.
83          *
84          * TLBs:
85          *
86          * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
87          * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
88          * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
89          * are flushed at any MI_FLUSH.
90          */
91
92         cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
93         if ((invalidate_domains|flush_domains) &
94             I915_GEM_DOMAIN_RENDER)
95                 cmd &= ~MI_NO_WRITE_FLUSH;
96         if (INTEL_INFO(dev)->gen < 4) {
97                 /*
98                  * On the 965, the sampler cache always gets flushed
99                  * and this bit is reserved.
100                  */
101                 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
102                         cmd |= MI_READ_FLUSH;
103         }
104         if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
105                 cmd |= MI_EXE_FLUSH;
106
107         if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
108             (IS_G4X(dev) || IS_GEN5(dev)))
109                 cmd |= MI_INVALIDATE_ISP;
110
111         ret = intel_ring_begin(ring, 2);
112         if (ret)
113                 return ret;
114
115         intel_ring_emit(ring, cmd);
116         intel_ring_emit(ring, MI_NOOP);
117         intel_ring_advance(ring);
118
119         return 0;
120 }
121
122 /**
123  * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
124  * implementing two workarounds on gen6.  From section 1.4.7.1
125  * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
126  *
127  * [DevSNB-C+{W/A}] Before any depth stall flush (including those
128  * produced by non-pipelined state commands), software needs to first
129  * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
130  * 0.
131  *
132  * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
133  * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
134  *
135  * And the workaround for these two requires this workaround first:
136  *
137  * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
138  * BEFORE the pipe-control with a post-sync op and no write-cache
139  * flushes.
140  *
141  * And this last workaround is tricky because of the requirements on
142  * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
143  * volume 2 part 1:
144  *
145  *     "1 of the following must also be set:
146  *      - Render Target Cache Flush Enable ([12] of DW1)
147  *      - Depth Cache Flush Enable ([0] of DW1)
148  *      - Stall at Pixel Scoreboard ([1] of DW1)
149  *      - Depth Stall ([13] of DW1)
150  *      - Post-Sync Operation ([13] of DW1)
151  *      - Notify Enable ([8] of DW1)"
152  *
153  * The cache flushes require the workaround flush that triggered this
154  * one, so we can't use it.  Depth stall would trigger the same.
155  * Post-sync nonzero is what triggered this second workaround, so we
156  * can't use that one either.  Notify enable is IRQs, which aren't
157  * really our business.  That leaves only stall at scoreboard.
158  */
159 static int
160 intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
161 {
162         struct pipe_control *pc = ring->private;
163         u32 scratch_addr = pc->gtt_offset + 128;
164         int ret;
165
166
167         ret = intel_ring_begin(ring, 6);
168         if (ret)
169                 return ret;
170
171         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
172         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
173                         PIPE_CONTROL_STALL_AT_SCOREBOARD);
174         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
175         intel_ring_emit(ring, 0); /* low dword */
176         intel_ring_emit(ring, 0); /* high dword */
177         intel_ring_emit(ring, MI_NOOP);
178         intel_ring_advance(ring);
179
180         ret = intel_ring_begin(ring, 6);
181         if (ret)
182                 return ret;
183
184         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
185         intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
186         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
187         intel_ring_emit(ring, 0);
188         intel_ring_emit(ring, 0);
189         intel_ring_emit(ring, MI_NOOP);
190         intel_ring_advance(ring);
191
192         return 0;
193 }
194
195 static int
196 gen6_render_ring_flush(struct intel_ring_buffer *ring,
197                          u32 invalidate_domains, u32 flush_domains)
198 {
199         u32 flags = 0;
200         struct pipe_control *pc = ring->private;
201         u32 scratch_addr = pc->gtt_offset + 128;
202         int ret;
203
204         /* Force SNB workarounds for PIPE_CONTROL flushes */
205         intel_emit_post_sync_nonzero_flush(ring);
206
207         /* Just flush everything.  Experiments have shown that reducing the
208          * number of bits based on the write domains has little performance
209          * impact.
210          */
211         flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
212         flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
213         flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
214         flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
215         flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
216         flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
217         flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
218
219         ret = intel_ring_begin(ring, 6);
220         if (ret)
221                 return ret;
222
223         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
224         intel_ring_emit(ring, flags);
225         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
226         intel_ring_emit(ring, 0); /* lower dword */
227         intel_ring_emit(ring, 0); /* uppwer dword */
228         intel_ring_emit(ring, MI_NOOP);
229         intel_ring_advance(ring);
230
231         return 0;
232 }
233
234 static void ring_write_tail(struct intel_ring_buffer *ring,
235                             u32 value)
236 {
237         drm_i915_private_t *dev_priv = ring->dev->dev_private;
238         I915_WRITE_TAIL(ring, value);
239 }
240
241 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
242 {
243         drm_i915_private_t *dev_priv = ring->dev->dev_private;
244         u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
245                         RING_ACTHD(ring->mmio_base) : ACTHD;
246
247         return I915_READ(acthd_reg);
248 }
249
250 static int init_ring_common(struct intel_ring_buffer *ring)
251 {
252         struct drm_device *dev = ring->dev;
253         drm_i915_private_t *dev_priv = dev->dev_private;
254         struct drm_i915_gem_object *obj = ring->obj;
255         int ret = 0;
256         u32 head;
257
258         if (HAS_FORCE_WAKE(dev))
259                 gen6_gt_force_wake_get(dev_priv);
260
261         /* Stop the ring if it's running. */
262         I915_WRITE_CTL(ring, 0);
263         I915_WRITE_HEAD(ring, 0);
264         ring->write_tail(ring, 0);
265
266         head = I915_READ_HEAD(ring) & HEAD_ADDR;
267
268         /* G45 ring initialization fails to reset head to zero */
269         if (head != 0) {
270                 DRM_DEBUG_KMS("%s head not reset to zero "
271                               "ctl %08x head %08x tail %08x start %08x\n",
272                               ring->name,
273                               I915_READ_CTL(ring),
274                               I915_READ_HEAD(ring),
275                               I915_READ_TAIL(ring),
276                               I915_READ_START(ring));
277
278                 I915_WRITE_HEAD(ring, 0);
279
280                 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
281                         DRM_ERROR("failed to set %s head to zero "
282                                   "ctl %08x head %08x tail %08x start %08x\n",
283                                   ring->name,
284                                   I915_READ_CTL(ring),
285                                   I915_READ_HEAD(ring),
286                                   I915_READ_TAIL(ring),
287                                   I915_READ_START(ring));
288                 }
289         }
290
291         /* Initialize the ring. This must happen _after_ we've cleared the ring
292          * registers with the above sequence (the readback of the HEAD registers
293          * also enforces ordering), otherwise the hw might lose the new ring
294          * register values. */
295         I915_WRITE_START(ring, obj->gtt_offset);
296         I915_WRITE_CTL(ring,
297                         ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
298                         | RING_VALID);
299
300         /* If the head is still not zero, the ring is dead */
301         if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
302                      I915_READ_START(ring) == obj->gtt_offset &&
303                      (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
304                 DRM_ERROR("%s initialization failed "
305                                 "ctl %08x head %08x tail %08x start %08x\n",
306                                 ring->name,
307                                 I915_READ_CTL(ring),
308                                 I915_READ_HEAD(ring),
309                                 I915_READ_TAIL(ring),
310                                 I915_READ_START(ring));
311                 ret = -EIO;
312                 goto out;
313         }
314
315         if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
316                 i915_kernel_lost_context(ring->dev);
317         else {
318                 ring->head = I915_READ_HEAD(ring);
319                 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
320                 ring->space = ring_space(ring);
321         }
322
323 out:
324         if (HAS_FORCE_WAKE(dev))
325                 gen6_gt_force_wake_put(dev_priv);
326
327         return ret;
328 }
329
330 static int
331 init_pipe_control(struct intel_ring_buffer *ring)
332 {
333         struct pipe_control *pc;
334         struct drm_i915_gem_object *obj;
335         int ret;
336
337         if (ring->private)
338                 return 0;
339
340         pc = kmalloc(sizeof(*pc), GFP_KERNEL);
341         if (!pc)
342                 return -ENOMEM;
343
344         obj = i915_gem_alloc_object(ring->dev, 4096);
345         if (obj == NULL) {
346                 DRM_ERROR("Failed to allocate seqno page\n");
347                 ret = -ENOMEM;
348                 goto err;
349         }
350
351         i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
352
353         ret = i915_gem_object_pin(obj, 4096, true);
354         if (ret)
355                 goto err_unref;
356
357         pc->gtt_offset = obj->gtt_offset;
358         pc->cpu_page =  kmap(obj->pages[0]);
359         if (pc->cpu_page == NULL)
360                 goto err_unpin;
361
362         pc->obj = obj;
363         ring->private = pc;
364         return 0;
365
366 err_unpin:
367         i915_gem_object_unpin(obj);
368 err_unref:
369         drm_gem_object_unreference(&obj->base);
370 err:
371         kfree(pc);
372         return ret;
373 }
374
375 static void
376 cleanup_pipe_control(struct intel_ring_buffer *ring)
377 {
378         struct pipe_control *pc = ring->private;
379         struct drm_i915_gem_object *obj;
380
381         if (!ring->private)
382                 return;
383
384         obj = pc->obj;
385         kunmap(obj->pages[0]);
386         i915_gem_object_unpin(obj);
387         drm_gem_object_unreference(&obj->base);
388
389         kfree(pc);
390         ring->private = NULL;
391 }
392
393 static int init_render_ring(struct intel_ring_buffer *ring)
394 {
395         struct drm_device *dev = ring->dev;
396         struct drm_i915_private *dev_priv = dev->dev_private;
397         int ret = init_ring_common(ring);
398
399         if (INTEL_INFO(dev)->gen > 3) {
400                 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
401                 if (IS_GEN6(dev) || IS_GEN7(dev))
402                         mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
403                 I915_WRITE(MI_MODE, mode);
404                 if (IS_GEN7(dev))
405                         I915_WRITE(GFX_MODE_GEN7,
406                                    GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
407                                    GFX_MODE_ENABLE(GFX_REPLAY_MODE));
408         }
409
410         if (INTEL_INFO(dev)->gen >= 5) {
411                 ret = init_pipe_control(ring);
412                 if (ret)
413                         return ret;
414         }
415
416
417         if (IS_GEN6(dev)) {
418                 /* From the Sandybridge PRM, volume 1 part 3, page 24:
419                  * "If this bit is set, STCunit will have LRA as replacement
420                  *  policy. [...] This bit must be reset.  LRA replacement
421                  *  policy is not supported."
422                  */
423                 I915_WRITE(CACHE_MODE_0,
424                            CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
425         }
426
427         if (INTEL_INFO(dev)->gen >= 6) {
428                 I915_WRITE(INSTPM,
429                            INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
430         }
431
432         return ret;
433 }
434
435 static void render_ring_cleanup(struct intel_ring_buffer *ring)
436 {
437         if (!ring->private)
438                 return;
439
440         cleanup_pipe_control(ring);
441 }
442
443 static void
444 update_mboxes(struct intel_ring_buffer *ring,
445             u32 seqno,
446             u32 mmio_offset)
447 {
448         intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
449                               MI_SEMAPHORE_GLOBAL_GTT |
450                               MI_SEMAPHORE_REGISTER |
451                               MI_SEMAPHORE_UPDATE);
452         intel_ring_emit(ring, seqno);
453         intel_ring_emit(ring, mmio_offset);
454 }
455
456 /**
457  * gen6_add_request - Update the semaphore mailbox registers
458  * 
459  * @ring - ring that is adding a request
460  * @seqno - return seqno stuck into the ring
461  *
462  * Update the mailbox registers in the *other* rings with the current seqno.
463  * This acts like a signal in the canonical semaphore.
464  */
465 static int
466 gen6_add_request(struct intel_ring_buffer *ring,
467                  u32 *seqno)
468 {
469         u32 mbox1_reg;
470         u32 mbox2_reg;
471         int ret;
472
473         ret = intel_ring_begin(ring, 10);
474         if (ret)
475                 return ret;
476
477         mbox1_reg = ring->signal_mbox[0];
478         mbox2_reg = ring->signal_mbox[1];
479
480         *seqno = i915_gem_next_request_seqno(ring);
481
482         update_mboxes(ring, *seqno, mbox1_reg);
483         update_mboxes(ring, *seqno, mbox2_reg);
484         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
485         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
486         intel_ring_emit(ring, *seqno);
487         intel_ring_emit(ring, MI_USER_INTERRUPT);
488         intel_ring_advance(ring);
489
490         return 0;
491 }
492
493 /**
494  * intel_ring_sync - sync the waiter to the signaller on seqno
495  *
496  * @waiter - ring that is waiting
497  * @signaller - ring which has, or will signal
498  * @seqno - seqno which the waiter will block on
499  */
500 static int
501 intel_ring_sync(struct intel_ring_buffer *waiter,
502                 struct intel_ring_buffer *signaller,
503                 int ring,
504                 u32 seqno)
505 {
506         int ret;
507         u32 dw1 = MI_SEMAPHORE_MBOX |
508                   MI_SEMAPHORE_COMPARE |
509                   MI_SEMAPHORE_REGISTER;
510
511         ret = intel_ring_begin(waiter, 4);
512         if (ret)
513                 return ret;
514
515         intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
516         intel_ring_emit(waiter, seqno);
517         intel_ring_emit(waiter, 0);
518         intel_ring_emit(waiter, MI_NOOP);
519         intel_ring_advance(waiter);
520
521         return 0;
522 }
523
524 /* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
525 int
526 render_ring_sync_to(struct intel_ring_buffer *waiter,
527                     struct intel_ring_buffer *signaller,
528                     u32 seqno)
529 {
530         WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
531         return intel_ring_sync(waiter,
532                                signaller,
533                                RCS,
534                                seqno);
535 }
536
537 /* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
538 int
539 gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
540                       struct intel_ring_buffer *signaller,
541                       u32 seqno)
542 {
543         WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
544         return intel_ring_sync(waiter,
545                                signaller,
546                                VCS,
547                                seqno);
548 }
549
550 /* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
551 int
552 gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
553                       struct intel_ring_buffer *signaller,
554                       u32 seqno)
555 {
556         WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
557         return intel_ring_sync(waiter,
558                                signaller,
559                                BCS,
560                                seqno);
561 }
562
563
564
565 #define PIPE_CONTROL_FLUSH(ring__, addr__)                                      \
566 do {                                                                    \
567         intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |                \
568                  PIPE_CONTROL_DEPTH_STALL);                             \
569         intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
570         intel_ring_emit(ring__, 0);                                                     \
571         intel_ring_emit(ring__, 0);                                                     \
572 } while (0)
573
574 static int
575 pc_render_add_request(struct intel_ring_buffer *ring,
576                       u32 *result)
577 {
578         u32 seqno = i915_gem_next_request_seqno(ring);
579         struct pipe_control *pc = ring->private;
580         u32 scratch_addr = pc->gtt_offset + 128;
581         int ret;
582
583         /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
584          * incoherent with writes to memory, i.e. completely fubar,
585          * so we need to use PIPE_NOTIFY instead.
586          *
587          * However, we also need to workaround the qword write
588          * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
589          * memory before requesting an interrupt.
590          */
591         ret = intel_ring_begin(ring, 32);
592         if (ret)
593                 return ret;
594
595         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
596                         PIPE_CONTROL_WRITE_FLUSH |
597                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
598         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
599         intel_ring_emit(ring, seqno);
600         intel_ring_emit(ring, 0);
601         PIPE_CONTROL_FLUSH(ring, scratch_addr);
602         scratch_addr += 128; /* write to separate cachelines */
603         PIPE_CONTROL_FLUSH(ring, scratch_addr);
604         scratch_addr += 128;
605         PIPE_CONTROL_FLUSH(ring, scratch_addr);
606         scratch_addr += 128;
607         PIPE_CONTROL_FLUSH(ring, scratch_addr);
608         scratch_addr += 128;
609         PIPE_CONTROL_FLUSH(ring, scratch_addr);
610         scratch_addr += 128;
611         PIPE_CONTROL_FLUSH(ring, scratch_addr);
612         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
613                         PIPE_CONTROL_WRITE_FLUSH |
614                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
615                         PIPE_CONTROL_NOTIFY);
616         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
617         intel_ring_emit(ring, seqno);
618         intel_ring_emit(ring, 0);
619         intel_ring_advance(ring);
620
621         *result = seqno;
622         return 0;
623 }
624
625 static int
626 render_ring_add_request(struct intel_ring_buffer *ring,
627                         u32 *result)
628 {
629         u32 seqno = i915_gem_next_request_seqno(ring);
630         int ret;
631
632         ret = intel_ring_begin(ring, 4);
633         if (ret)
634                 return ret;
635
636         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
637         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
638         intel_ring_emit(ring, seqno);
639         intel_ring_emit(ring, MI_USER_INTERRUPT);
640         intel_ring_advance(ring);
641
642         *result = seqno;
643         return 0;
644 }
645
646 static u32
647 gen6_ring_get_seqno(struct intel_ring_buffer *ring)
648 {
649         struct drm_device *dev = ring->dev;
650
651         /* Workaround to force correct ordering between irq and seqno writes on
652          * ivb (and maybe also on snb) by reading from a CS register (like
653          * ACTHD) before reading the status page. */
654         if (IS_GEN7(dev))
655                 intel_ring_get_active_head(ring);
656         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
657 }
658
659 static u32
660 ring_get_seqno(struct intel_ring_buffer *ring)
661 {
662         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
663 }
664
665 static u32
666 pc_render_get_seqno(struct intel_ring_buffer *ring)
667 {
668         struct pipe_control *pc = ring->private;
669         return pc->cpu_page[0];
670 }
671
672 static void
673 ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
674 {
675         dev_priv->gt_irq_mask &= ~mask;
676         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
677         POSTING_READ(GTIMR);
678 }
679
680 static void
681 ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
682 {
683         dev_priv->gt_irq_mask |= mask;
684         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
685         POSTING_READ(GTIMR);
686 }
687
688 static void
689 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
690 {
691         dev_priv->irq_mask &= ~mask;
692         I915_WRITE(IMR, dev_priv->irq_mask);
693         POSTING_READ(IMR);
694 }
695
696 static void
697 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
698 {
699         dev_priv->irq_mask |= mask;
700         I915_WRITE(IMR, dev_priv->irq_mask);
701         POSTING_READ(IMR);
702 }
703
704 static bool
705 render_ring_get_irq(struct intel_ring_buffer *ring)
706 {
707         struct drm_device *dev = ring->dev;
708         drm_i915_private_t *dev_priv = dev->dev_private;
709
710         if (!dev->irq_enabled)
711                 return false;
712
713         spin_lock(&ring->irq_lock);
714         if (ring->irq_refcount++ == 0) {
715                 if (HAS_PCH_SPLIT(dev))
716                         ironlake_enable_irq(dev_priv,
717                                             GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
718                 else
719                         i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
720         }
721         spin_unlock(&ring->irq_lock);
722
723         return true;
724 }
725
726 static void
727 render_ring_put_irq(struct intel_ring_buffer *ring)
728 {
729         struct drm_device *dev = ring->dev;
730         drm_i915_private_t *dev_priv = dev->dev_private;
731
732         spin_lock(&ring->irq_lock);
733         if (--ring->irq_refcount == 0) {
734                 if (HAS_PCH_SPLIT(dev))
735                         ironlake_disable_irq(dev_priv,
736                                              GT_USER_INTERRUPT |
737                                              GT_PIPE_NOTIFY);
738                 else
739                         i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
740         }
741         spin_unlock(&ring->irq_lock);
742 }
743
744 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
745 {
746         struct drm_device *dev = ring->dev;
747         drm_i915_private_t *dev_priv = ring->dev->dev_private;
748         u32 mmio = 0;
749
750         /* The ring status page addresses are no longer next to the rest of
751          * the ring registers as of gen7.
752          */
753         if (IS_GEN7(dev)) {
754                 switch (ring->id) {
755                 case RING_RENDER:
756                         mmio = RENDER_HWS_PGA_GEN7;
757                         break;
758                 case RING_BLT:
759                         mmio = BLT_HWS_PGA_GEN7;
760                         break;
761                 case RING_BSD:
762                         mmio = BSD_HWS_PGA_GEN7;
763                         break;
764                 }
765         } else if (IS_GEN6(ring->dev)) {
766                 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
767         } else {
768                 mmio = RING_HWS_PGA(ring->mmio_base);
769         }
770
771         I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
772         POSTING_READ(mmio);
773 }
774
775 static int
776 bsd_ring_flush(struct intel_ring_buffer *ring,
777                u32     invalidate_domains,
778                u32     flush_domains)
779 {
780         int ret;
781
782         ret = intel_ring_begin(ring, 2);
783         if (ret)
784                 return ret;
785
786         intel_ring_emit(ring, MI_FLUSH);
787         intel_ring_emit(ring, MI_NOOP);
788         intel_ring_advance(ring);
789         return 0;
790 }
791
792 static int
793 ring_add_request(struct intel_ring_buffer *ring,
794                  u32 *result)
795 {
796         u32 seqno;
797         int ret;
798
799         ret = intel_ring_begin(ring, 4);
800         if (ret)
801                 return ret;
802
803         seqno = i915_gem_next_request_seqno(ring);
804
805         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
806         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
807         intel_ring_emit(ring, seqno);
808         intel_ring_emit(ring, MI_USER_INTERRUPT);
809         intel_ring_advance(ring);
810
811         *result = seqno;
812         return 0;
813 }
814
815 static bool
816 gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
817 {
818         struct drm_device *dev = ring->dev;
819         drm_i915_private_t *dev_priv = dev->dev_private;
820
821         if (!dev->irq_enabled)
822                return false;
823
824         /* It looks like we need to prevent the gt from suspending while waiting
825          * for an notifiy irq, otherwise irqs seem to get lost on at least the
826          * blt/bsd rings on ivb. */
827         if (IS_GEN7(dev))
828                 gen6_gt_force_wake_get(dev_priv);
829
830         spin_lock(&ring->irq_lock);
831         if (ring->irq_refcount++ == 0) {
832                 ring->irq_mask &= ~rflag;
833                 I915_WRITE_IMR(ring, ring->irq_mask);
834                 ironlake_enable_irq(dev_priv, gflag);
835         }
836         spin_unlock(&ring->irq_lock);
837
838         return true;
839 }
840
841 static void
842 gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
843 {
844         struct drm_device *dev = ring->dev;
845         drm_i915_private_t *dev_priv = dev->dev_private;
846
847         spin_lock(&ring->irq_lock);
848         if (--ring->irq_refcount == 0) {
849                 ring->irq_mask |= rflag;
850                 I915_WRITE_IMR(ring, ring->irq_mask);
851                 ironlake_disable_irq(dev_priv, gflag);
852         }
853         spin_unlock(&ring->irq_lock);
854
855         if (IS_GEN7(dev))
856                 gen6_gt_force_wake_put(dev_priv);
857 }
858
859 static bool
860 bsd_ring_get_irq(struct intel_ring_buffer *ring)
861 {
862         struct drm_device *dev = ring->dev;
863         drm_i915_private_t *dev_priv = dev->dev_private;
864
865         if (!dev->irq_enabled)
866                 return false;
867
868         spin_lock(&ring->irq_lock);
869         if (ring->irq_refcount++ == 0) {
870                 if (IS_G4X(dev))
871                         i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
872                 else
873                         ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
874         }
875         spin_unlock(&ring->irq_lock);
876
877         return true;
878 }
879 static void
880 bsd_ring_put_irq(struct intel_ring_buffer *ring)
881 {
882         struct drm_device *dev = ring->dev;
883         drm_i915_private_t *dev_priv = dev->dev_private;
884
885         spin_lock(&ring->irq_lock);
886         if (--ring->irq_refcount == 0) {
887                 if (IS_G4X(dev))
888                         i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
889                 else
890                         ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
891         }
892         spin_unlock(&ring->irq_lock);
893 }
894
895 static int
896 ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
897 {
898         int ret;
899
900         ret = intel_ring_begin(ring, 2);
901         if (ret)
902                 return ret;
903
904         intel_ring_emit(ring,
905                         MI_BATCH_BUFFER_START | (2 << 6) |
906                         MI_BATCH_NON_SECURE_I965);
907         intel_ring_emit(ring, offset);
908         intel_ring_advance(ring);
909
910         return 0;
911 }
912
913 static int
914 render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
915                                 u32 offset, u32 len)
916 {
917         struct drm_device *dev = ring->dev;
918         int ret;
919
920         if (IS_I830(dev) || IS_845G(dev)) {
921                 ret = intel_ring_begin(ring, 4);
922                 if (ret)
923                         return ret;
924
925                 intel_ring_emit(ring, MI_BATCH_BUFFER);
926                 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
927                 intel_ring_emit(ring, offset + len - 8);
928                 intel_ring_emit(ring, 0);
929         } else {
930                 ret = intel_ring_begin(ring, 2);
931                 if (ret)
932                         return ret;
933
934                 if (INTEL_INFO(dev)->gen >= 4) {
935                         intel_ring_emit(ring,
936                                         MI_BATCH_BUFFER_START | (2 << 6) |
937                                         MI_BATCH_NON_SECURE_I965);
938                         intel_ring_emit(ring, offset);
939                 } else {
940                         intel_ring_emit(ring,
941                                         MI_BATCH_BUFFER_START | (2 << 6));
942                         intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
943                 }
944         }
945         intel_ring_advance(ring);
946
947         return 0;
948 }
949
950 static void cleanup_status_page(struct intel_ring_buffer *ring)
951 {
952         drm_i915_private_t *dev_priv = ring->dev->dev_private;
953         struct drm_i915_gem_object *obj;
954
955         obj = ring->status_page.obj;
956         if (obj == NULL)
957                 return;
958
959         kunmap(obj->pages[0]);
960         i915_gem_object_unpin(obj);
961         drm_gem_object_unreference(&obj->base);
962         ring->status_page.obj = NULL;
963
964         memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
965 }
966
967 static int init_status_page(struct intel_ring_buffer *ring)
968 {
969         struct drm_device *dev = ring->dev;
970         drm_i915_private_t *dev_priv = dev->dev_private;
971         struct drm_i915_gem_object *obj;
972         int ret;
973
974         obj = i915_gem_alloc_object(dev, 4096);
975         if (obj == NULL) {
976                 DRM_ERROR("Failed to allocate status page\n");
977                 ret = -ENOMEM;
978                 goto err;
979         }
980
981         i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
982
983         ret = i915_gem_object_pin(obj, 4096, true);
984         if (ret != 0) {
985                 goto err_unref;
986         }
987
988         ring->status_page.gfx_addr = obj->gtt_offset;
989         ring->status_page.page_addr = kmap(obj->pages[0]);
990         if (ring->status_page.page_addr == NULL) {
991                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
992                 goto err_unpin;
993         }
994         ring->status_page.obj = obj;
995         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
996
997         intel_ring_setup_status_page(ring);
998         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
999                         ring->name, ring->status_page.gfx_addr);
1000
1001         return 0;
1002
1003 err_unpin:
1004         i915_gem_object_unpin(obj);
1005 err_unref:
1006         drm_gem_object_unreference(&obj->base);
1007 err:
1008         return ret;
1009 }
1010
1011 int intel_init_ring_buffer(struct drm_device *dev,
1012                            struct intel_ring_buffer *ring)
1013 {
1014         struct drm_i915_gem_object *obj;
1015         int ret;
1016
1017         ring->dev = dev;
1018         INIT_LIST_HEAD(&ring->active_list);
1019         INIT_LIST_HEAD(&ring->request_list);
1020         INIT_LIST_HEAD(&ring->gpu_write_list);
1021
1022         init_waitqueue_head(&ring->irq_queue);
1023         spin_lock_init(&ring->irq_lock);
1024         ring->irq_mask = ~0;
1025
1026         if (I915_NEED_GFX_HWS(dev)) {
1027                 ret = init_status_page(ring);
1028                 if (ret)
1029                         return ret;
1030         }
1031
1032         obj = i915_gem_alloc_object(dev, ring->size);
1033         if (obj == NULL) {
1034                 DRM_ERROR("Failed to allocate ringbuffer\n");
1035                 ret = -ENOMEM;
1036                 goto err_hws;
1037         }
1038
1039         ring->obj = obj;
1040
1041         ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
1042         if (ret)
1043                 goto err_unref;
1044
1045         ret = i915_gem_object_set_to_gtt_domain(obj, true);
1046         if (ret)
1047                 goto err_unpin;
1048
1049         ring->map.size = ring->size;
1050         ring->map.offset = dev->agp->base + obj->gtt_offset;
1051         ring->map.type = 0;
1052         ring->map.flags = 0;
1053         ring->map.mtrr = 0;
1054
1055         drm_core_ioremap_wc(&ring->map, dev);
1056         if (ring->map.handle == NULL) {
1057                 DRM_ERROR("Failed to map ringbuffer.\n");
1058                 ret = -EINVAL;
1059                 goto err_unpin;
1060         }
1061
1062         ring->virtual_start = ring->map.handle;
1063         ret = ring->init(ring);
1064         if (ret)
1065                 goto err_unmap;
1066
1067         /* Workaround an erratum on the i830 which causes a hang if
1068          * the TAIL pointer points to within the last 2 cachelines
1069          * of the buffer.
1070          */
1071         ring->effective_size = ring->size;
1072         if (IS_I830(ring->dev) || IS_845G(ring->dev))
1073                 ring->effective_size -= 128;
1074
1075         return 0;
1076
1077 err_unmap:
1078         drm_core_ioremapfree(&ring->map, dev);
1079 err_unpin:
1080         i915_gem_object_unpin(obj);
1081 err_unref:
1082         drm_gem_object_unreference(&obj->base);
1083         ring->obj = NULL;
1084 err_hws:
1085         cleanup_status_page(ring);
1086         return ret;
1087 }
1088
1089 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1090 {
1091         struct drm_i915_private *dev_priv;
1092         int ret;
1093
1094         if (ring->obj == NULL)
1095                 return;
1096
1097         /* Disable the ring buffer. The ring must be idle at this point */
1098         dev_priv = ring->dev->dev_private;
1099         ret = intel_wait_ring_idle(ring);
1100         if (ret)
1101                 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1102                           ring->name, ret);
1103
1104         I915_WRITE_CTL(ring, 0);
1105
1106         drm_core_ioremapfree(&ring->map, ring->dev);
1107
1108         i915_gem_object_unpin(ring->obj);
1109         drm_gem_object_unreference(&ring->obj->base);
1110         ring->obj = NULL;
1111
1112         if (ring->cleanup)
1113                 ring->cleanup(ring);
1114
1115         cleanup_status_page(ring);
1116 }
1117
1118 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1119 {
1120         unsigned int *virt;
1121         int rem = ring->size - ring->tail;
1122
1123         if (ring->space < rem) {
1124                 int ret = intel_wait_ring_buffer(ring, rem);
1125                 if (ret)
1126                         return ret;
1127         }
1128
1129         virt = (unsigned int *)(ring->virtual_start + ring->tail);
1130         rem /= 8;
1131         while (rem--) {
1132                 *virt++ = MI_NOOP;
1133                 *virt++ = MI_NOOP;
1134         }
1135
1136         ring->tail = 0;
1137         ring->space = ring_space(ring);
1138
1139         return 0;
1140 }
1141
1142 int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1143 {
1144         struct drm_device *dev = ring->dev;
1145         struct drm_i915_private *dev_priv = dev->dev_private;
1146         unsigned long end;
1147
1148         trace_i915_ring_wait_begin(ring);
1149         end = jiffies + 3 * HZ;
1150         do {
1151                 ring->head = I915_READ_HEAD(ring);
1152                 ring->space = ring_space(ring);
1153                 if (ring->space >= n) {
1154                         trace_i915_ring_wait_end(ring);
1155                         return 0;
1156                 }
1157
1158                 if (dev->primary->master) {
1159                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1160                         if (master_priv->sarea_priv)
1161                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1162                 }
1163
1164                 msleep(1);
1165                 if (atomic_read(&dev_priv->mm.wedged))
1166                         return -EAGAIN;
1167         } while (!time_after(jiffies, end));
1168         trace_i915_ring_wait_end(ring);
1169         return -EBUSY;
1170 }
1171
1172 int intel_ring_begin(struct intel_ring_buffer *ring,
1173                      int num_dwords)
1174 {
1175         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1176         int n = 4*num_dwords;
1177         int ret;
1178
1179         if (unlikely(atomic_read(&dev_priv->mm.wedged)))
1180                 return -EIO;
1181
1182         if (unlikely(ring->tail + n > ring->effective_size)) {
1183                 ret = intel_wrap_ring_buffer(ring);
1184                 if (unlikely(ret))
1185                         return ret;
1186         }
1187
1188         if (unlikely(ring->space < n)) {
1189                 ret = intel_wait_ring_buffer(ring, n);
1190                 if (unlikely(ret))
1191                         return ret;
1192         }
1193
1194         ring->space -= n;
1195         return 0;
1196 }
1197
1198 void intel_ring_advance(struct intel_ring_buffer *ring)
1199 {
1200         ring->tail &= ring->size - 1;
1201         ring->write_tail(ring, ring->tail);
1202 }
1203
1204 static const struct intel_ring_buffer render_ring = {
1205         .name                   = "render ring",
1206         .id                     = RING_RENDER,
1207         .mmio_base              = RENDER_RING_BASE,
1208         .size                   = 32 * PAGE_SIZE,
1209         .init                   = init_render_ring,
1210         .write_tail             = ring_write_tail,
1211         .flush                  = render_ring_flush,
1212         .add_request            = render_ring_add_request,
1213         .get_seqno              = ring_get_seqno,
1214         .irq_get                = render_ring_get_irq,
1215         .irq_put                = render_ring_put_irq,
1216         .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
1217         .cleanup                = render_ring_cleanup,
1218         .sync_to                = render_ring_sync_to,
1219         .semaphore_register     = {MI_SEMAPHORE_SYNC_INVALID,
1220                                    MI_SEMAPHORE_SYNC_RV,
1221                                    MI_SEMAPHORE_SYNC_RB},
1222         .signal_mbox            = {GEN6_VRSYNC, GEN6_BRSYNC},
1223 };
1224
1225 /* ring buffer for bit-stream decoder */
1226
1227 static const struct intel_ring_buffer bsd_ring = {
1228         .name                   = "bsd ring",
1229         .id                     = RING_BSD,
1230         .mmio_base              = BSD_RING_BASE,
1231         .size                   = 32 * PAGE_SIZE,
1232         .init                   = init_ring_common,
1233         .write_tail             = ring_write_tail,
1234         .flush                  = bsd_ring_flush,
1235         .add_request            = ring_add_request,
1236         .get_seqno              = ring_get_seqno,
1237         .irq_get                = bsd_ring_get_irq,
1238         .irq_put                = bsd_ring_put_irq,
1239         .dispatch_execbuffer    = ring_dispatch_execbuffer,
1240 };
1241
1242
1243 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1244                                      u32 value)
1245 {
1246         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1247
1248        /* Every tail move must follow the sequence below */
1249         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1250                 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1251                 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1252         I915_WRITE(GEN6_BSD_RNCID, 0x0);
1253
1254         if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1255                 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1256                 50))
1257         DRM_ERROR("timed out waiting for IDLE Indicator\n");
1258
1259         I915_WRITE_TAIL(ring, value);
1260         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1261                 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1262                 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1263 }
1264
1265 static int gen6_ring_flush(struct intel_ring_buffer *ring,
1266                            u32 invalidate, u32 flush)
1267 {
1268         uint32_t cmd;
1269         int ret;
1270
1271         ret = intel_ring_begin(ring, 4);
1272         if (ret)
1273                 return ret;
1274
1275         cmd = MI_FLUSH_DW;
1276         if (invalidate & I915_GEM_GPU_DOMAINS)
1277                 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1278         intel_ring_emit(ring, cmd);
1279         intel_ring_emit(ring, 0);
1280         intel_ring_emit(ring, 0);
1281         intel_ring_emit(ring, MI_NOOP);
1282         intel_ring_advance(ring);
1283         return 0;
1284 }
1285
1286 static int
1287 gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1288                               u32 offset, u32 len)
1289 {
1290         int ret;
1291
1292         ret = intel_ring_begin(ring, 2);
1293         if (ret)
1294                 return ret;
1295
1296         intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1297         /* bit0-7 is the length on GEN6+ */
1298         intel_ring_emit(ring, offset);
1299         intel_ring_advance(ring);
1300
1301         return 0;
1302 }
1303
1304 static bool
1305 gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1306 {
1307         return gen6_ring_get_irq(ring,
1308                                  GT_USER_INTERRUPT,
1309                                  GEN6_RENDER_USER_INTERRUPT);
1310 }
1311
1312 static void
1313 gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1314 {
1315         return gen6_ring_put_irq(ring,
1316                                  GT_USER_INTERRUPT,
1317                                  GEN6_RENDER_USER_INTERRUPT);
1318 }
1319
1320 static bool
1321 gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1322 {
1323         return gen6_ring_get_irq(ring,
1324                                  GT_GEN6_BSD_USER_INTERRUPT,
1325                                  GEN6_BSD_USER_INTERRUPT);
1326 }
1327
1328 static void
1329 gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1330 {
1331         return gen6_ring_put_irq(ring,
1332                                  GT_GEN6_BSD_USER_INTERRUPT,
1333                                  GEN6_BSD_USER_INTERRUPT);
1334 }
1335
1336 /* ring buffer for Video Codec for Gen6+ */
1337 static const struct intel_ring_buffer gen6_bsd_ring = {
1338         .name                   = "gen6 bsd ring",
1339         .id                     = RING_BSD,
1340         .mmio_base              = GEN6_BSD_RING_BASE,
1341         .size                   = 32 * PAGE_SIZE,
1342         .init                   = init_ring_common,
1343         .write_tail             = gen6_bsd_ring_write_tail,
1344         .flush                  = gen6_ring_flush,
1345         .add_request            = gen6_add_request,
1346         .get_seqno              = gen6_ring_get_seqno,
1347         .irq_get                = gen6_bsd_ring_get_irq,
1348         .irq_put                = gen6_bsd_ring_put_irq,
1349         .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1350         .sync_to                = gen6_bsd_ring_sync_to,
1351         .semaphore_register     = {MI_SEMAPHORE_SYNC_VR,
1352                                    MI_SEMAPHORE_SYNC_INVALID,
1353                                    MI_SEMAPHORE_SYNC_VB},
1354         .signal_mbox            = {GEN6_RVSYNC, GEN6_BVSYNC},
1355 };
1356
1357 /* Blitter support (SandyBridge+) */
1358
1359 static bool
1360 blt_ring_get_irq(struct intel_ring_buffer *ring)
1361 {
1362         return gen6_ring_get_irq(ring,
1363                                  GT_BLT_USER_INTERRUPT,
1364                                  GEN6_BLITTER_USER_INTERRUPT);
1365 }
1366
1367 static void
1368 blt_ring_put_irq(struct intel_ring_buffer *ring)
1369 {
1370         gen6_ring_put_irq(ring,
1371                           GT_BLT_USER_INTERRUPT,
1372                           GEN6_BLITTER_USER_INTERRUPT);
1373 }
1374
1375
1376 /* Workaround for some stepping of SNB,
1377  * each time when BLT engine ring tail moved,
1378  * the first command in the ring to be parsed
1379  * should be MI_BATCH_BUFFER_START
1380  */
1381 #define NEED_BLT_WORKAROUND(dev) \
1382         (IS_GEN6(dev) && (dev->pdev->revision < 8))
1383
1384 static inline struct drm_i915_gem_object *
1385 to_blt_workaround(struct intel_ring_buffer *ring)
1386 {
1387         return ring->private;
1388 }
1389
1390 static int blt_ring_init(struct intel_ring_buffer *ring)
1391 {
1392         if (NEED_BLT_WORKAROUND(ring->dev)) {
1393                 struct drm_i915_gem_object *obj;
1394                 u32 *ptr;
1395                 int ret;
1396
1397                 obj = i915_gem_alloc_object(ring->dev, 4096);
1398                 if (obj == NULL)
1399                         return -ENOMEM;
1400
1401                 ret = i915_gem_object_pin(obj, 4096, true);
1402                 if (ret) {
1403                         drm_gem_object_unreference(&obj->base);
1404                         return ret;
1405                 }
1406
1407                 ptr = kmap(obj->pages[0]);
1408                 *ptr++ = MI_BATCH_BUFFER_END;
1409                 *ptr++ = MI_NOOP;
1410                 kunmap(obj->pages[0]);
1411
1412                 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1413                 if (ret) {
1414                         i915_gem_object_unpin(obj);
1415                         drm_gem_object_unreference(&obj->base);
1416                         return ret;
1417                 }
1418
1419                 ring->private = obj;
1420         }
1421
1422         return init_ring_common(ring);
1423 }
1424
1425 static int blt_ring_begin(struct intel_ring_buffer *ring,
1426                           int num_dwords)
1427 {
1428         if (ring->private) {
1429                 int ret = intel_ring_begin(ring, num_dwords+2);
1430                 if (ret)
1431                         return ret;
1432
1433                 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1434                 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1435
1436                 return 0;
1437         } else
1438                 return intel_ring_begin(ring, 4);
1439 }
1440
1441 static int blt_ring_flush(struct intel_ring_buffer *ring,
1442                           u32 invalidate, u32 flush)
1443 {
1444         uint32_t cmd;
1445         int ret;
1446
1447         ret = blt_ring_begin(ring, 4);
1448         if (ret)
1449                 return ret;
1450
1451         cmd = MI_FLUSH_DW;
1452         if (invalidate & I915_GEM_DOMAIN_RENDER)
1453                 cmd |= MI_INVALIDATE_TLB;
1454         intel_ring_emit(ring, cmd);
1455         intel_ring_emit(ring, 0);
1456         intel_ring_emit(ring, 0);
1457         intel_ring_emit(ring, MI_NOOP);
1458         intel_ring_advance(ring);
1459         return 0;
1460 }
1461
1462 static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1463 {
1464         if (!ring->private)
1465                 return;
1466
1467         i915_gem_object_unpin(ring->private);
1468         drm_gem_object_unreference(ring->private);
1469         ring->private = NULL;
1470 }
1471
1472 static const struct intel_ring_buffer gen6_blt_ring = {
1473         .name                   = "blt ring",
1474         .id                     = RING_BLT,
1475         .mmio_base              = BLT_RING_BASE,
1476         .size                   = 32 * PAGE_SIZE,
1477         .init                   = blt_ring_init,
1478         .write_tail             = ring_write_tail,
1479         .flush                  = blt_ring_flush,
1480         .add_request            = gen6_add_request,
1481         .get_seqno              = gen6_ring_get_seqno,
1482         .irq_get                = blt_ring_get_irq,
1483         .irq_put                = blt_ring_put_irq,
1484         .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1485         .cleanup                = blt_ring_cleanup,
1486         .sync_to                = gen6_blt_ring_sync_to,
1487         .semaphore_register     = {MI_SEMAPHORE_SYNC_BR,
1488                                    MI_SEMAPHORE_SYNC_BV,
1489                                    MI_SEMAPHORE_SYNC_INVALID},
1490         .signal_mbox            = {GEN6_RBSYNC, GEN6_VBSYNC},
1491 };
1492
1493 int intel_init_render_ring_buffer(struct drm_device *dev)
1494 {
1495         drm_i915_private_t *dev_priv = dev->dev_private;
1496         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1497
1498         *ring = render_ring;
1499         if (INTEL_INFO(dev)->gen >= 6) {
1500                 ring->add_request = gen6_add_request;
1501                 ring->flush = gen6_render_ring_flush;
1502                 ring->irq_get = gen6_render_ring_get_irq;
1503                 ring->irq_put = gen6_render_ring_put_irq;
1504                 ring->get_seqno = gen6_ring_get_seqno;
1505         } else if (IS_GEN5(dev)) {
1506                 ring->add_request = pc_render_add_request;
1507                 ring->get_seqno = pc_render_get_seqno;
1508         }
1509
1510         if (!I915_NEED_GFX_HWS(dev)) {
1511                 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1512                 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1513         }
1514
1515         return intel_init_ring_buffer(dev, ring);
1516 }
1517
1518 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1519 {
1520         drm_i915_private_t *dev_priv = dev->dev_private;
1521         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1522
1523         *ring = render_ring;
1524         if (INTEL_INFO(dev)->gen >= 6) {
1525                 ring->add_request = gen6_add_request;
1526                 ring->irq_get = gen6_render_ring_get_irq;
1527                 ring->irq_put = gen6_render_ring_put_irq;
1528         } else if (IS_GEN5(dev)) {
1529                 ring->add_request = pc_render_add_request;
1530                 ring->get_seqno = pc_render_get_seqno;
1531         }
1532
1533         if (!I915_NEED_GFX_HWS(dev))
1534                 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1535
1536         ring->dev = dev;
1537         INIT_LIST_HEAD(&ring->active_list);
1538         INIT_LIST_HEAD(&ring->request_list);
1539         INIT_LIST_HEAD(&ring->gpu_write_list);
1540
1541         ring->size = size;
1542         ring->effective_size = ring->size;
1543         if (IS_I830(ring->dev))
1544                 ring->effective_size -= 128;
1545
1546         ring->map.offset = start;
1547         ring->map.size = size;
1548         ring->map.type = 0;
1549         ring->map.flags = 0;
1550         ring->map.mtrr = 0;
1551
1552         drm_core_ioremap_wc(&ring->map, dev);
1553         if (ring->map.handle == NULL) {
1554                 DRM_ERROR("can not ioremap virtual address for"
1555                           " ring buffer\n");
1556                 return -ENOMEM;
1557         }
1558
1559         ring->virtual_start = (void __force __iomem *)ring->map.handle;
1560         return 0;
1561 }
1562
1563 int intel_init_bsd_ring_buffer(struct drm_device *dev)
1564 {
1565         drm_i915_private_t *dev_priv = dev->dev_private;
1566         struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1567
1568         if (IS_GEN6(dev) || IS_GEN7(dev))
1569                 *ring = gen6_bsd_ring;
1570         else
1571                 *ring = bsd_ring;
1572
1573         return intel_init_ring_buffer(dev, ring);
1574 }
1575
1576 int intel_init_blt_ring_buffer(struct drm_device *dev)
1577 {
1578         drm_i915_private_t *dev_priv = dev->dev_private;
1579         struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1580
1581         *ring = gen6_blt_ring;
1582
1583         return intel_init_ring_buffer(dev, ring);
1584 }