Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
[pandora-kernel.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include "drmP.h"
31 #include "drm.h"
32 #include "i915_drv.h"
33 #include "i915_drm.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 static u32 i915_gem_get_seqno(struct drm_device *dev)
38 {
39         drm_i915_private_t *dev_priv = dev->dev_private;
40         u32 seqno;
41
42         seqno = dev_priv->next_seqno;
43
44         /* reserve 0 for non-seqno */
45         if (++dev_priv->next_seqno == 0)
46                 dev_priv->next_seqno = 1;
47
48         return seqno;
49 }
50
51 static void
52 render_ring_flush(struct drm_device *dev,
53                   struct intel_ring_buffer *ring,
54                   u32   invalidate_domains,
55                   u32   flush_domains)
56 {
57         drm_i915_private_t *dev_priv = dev->dev_private;
58         u32 cmd;
59
60 #if WATCH_EXEC
61         DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
62                   invalidate_domains, flush_domains);
63 #endif
64
65         trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
66                                      invalidate_domains, flush_domains);
67
68         if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
69                 /*
70                  * read/write caches:
71                  *
72                  * I915_GEM_DOMAIN_RENDER is always invalidated, but is
73                  * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
74                  * also flushed at 2d versus 3d pipeline switches.
75                  *
76                  * read-only caches:
77                  *
78                  * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
79                  * MI_READ_FLUSH is set, and is always flushed on 965.
80                  *
81                  * I915_GEM_DOMAIN_COMMAND may not exist?
82                  *
83                  * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
84                  * invalidated when MI_EXE_FLUSH is set.
85                  *
86                  * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
87                  * invalidated with every MI_FLUSH.
88                  *
89                  * TLBs:
90                  *
91                  * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
92                  * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
93                  * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
94                  * are flushed at any MI_FLUSH.
95                  */
96
97                 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
98                 if ((invalidate_domains|flush_domains) &
99                     I915_GEM_DOMAIN_RENDER)
100                         cmd &= ~MI_NO_WRITE_FLUSH;
101                 if (INTEL_INFO(dev)->gen < 4) {
102                         /*
103                          * On the 965, the sampler cache always gets flushed
104                          * and this bit is reserved.
105                          */
106                         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
107                                 cmd |= MI_READ_FLUSH;
108                 }
109                 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
110                         cmd |= MI_EXE_FLUSH;
111
112 #if WATCH_EXEC
113                 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
114 #endif
115                 intel_ring_begin(dev, ring, 2);
116                 intel_ring_emit(dev, ring, cmd);
117                 intel_ring_emit(dev, ring, MI_NOOP);
118                 intel_ring_advance(dev, ring);
119         }
120 }
121
122 static void ring_write_tail(struct drm_device *dev,
123                             struct intel_ring_buffer *ring,
124                             u32 value)
125 {
126         drm_i915_private_t *dev_priv = dev->dev_private;
127         I915_WRITE_TAIL(ring, value);
128 }
129
130 u32 intel_ring_get_active_head(struct drm_device *dev,
131                                struct intel_ring_buffer *ring)
132 {
133         drm_i915_private_t *dev_priv = dev->dev_private;
134         u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ?
135                         RING_ACTHD(ring->mmio_base) : ACTHD;
136
137         return I915_READ(acthd_reg);
138 }
139
140 static int init_ring_common(struct drm_device *dev,
141                             struct intel_ring_buffer *ring)
142 {
143         u32 head;
144         drm_i915_private_t *dev_priv = dev->dev_private;
145         struct drm_i915_gem_object *obj_priv;
146         obj_priv = to_intel_bo(ring->gem_object);
147
148         /* Stop the ring if it's running. */
149         I915_WRITE_CTL(ring, 0);
150         I915_WRITE_HEAD(ring, 0);
151         ring->write_tail(dev, ring, 0);
152
153         /* Initialize the ring. */
154         I915_WRITE_START(ring, obj_priv->gtt_offset);
155         head = I915_READ_HEAD(ring) & HEAD_ADDR;
156
157         /* G45 ring initialization fails to reset head to zero */
158         if (head != 0) {
159                 DRM_ERROR("%s head not reset to zero "
160                                 "ctl %08x head %08x tail %08x start %08x\n",
161                                 ring->name,
162                                 I915_READ_CTL(ring),
163                                 I915_READ_HEAD(ring),
164                                 I915_READ_TAIL(ring),
165                                 I915_READ_START(ring));
166
167                 I915_WRITE_HEAD(ring, 0);
168
169                 DRM_ERROR("%s head forced to zero "
170                                 "ctl %08x head %08x tail %08x start %08x\n",
171                                 ring->name,
172                                 I915_READ_CTL(ring),
173                                 I915_READ_HEAD(ring),
174                                 I915_READ_TAIL(ring),
175                                 I915_READ_START(ring));
176         }
177
178         I915_WRITE_CTL(ring,
179                         ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
180                         | RING_NO_REPORT | RING_VALID);
181
182         head = I915_READ_HEAD(ring) & HEAD_ADDR;
183         /* If the head is still not zero, the ring is dead */
184         if (head != 0) {
185                 DRM_ERROR("%s initialization failed "
186                                 "ctl %08x head %08x tail %08x start %08x\n",
187                                 ring->name,
188                                 I915_READ_CTL(ring),
189                                 I915_READ_HEAD(ring),
190                                 I915_READ_TAIL(ring),
191                                 I915_READ_START(ring));
192                 return -EIO;
193         }
194
195         if (!drm_core_check_feature(dev, DRIVER_MODESET))
196                 i915_kernel_lost_context(dev);
197         else {
198                 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
199                 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
200                 ring->space = ring->head - (ring->tail + 8);
201                 if (ring->space < 0)
202                         ring->space += ring->size;
203         }
204         return 0;
205 }
206
207 static int init_render_ring(struct drm_device *dev,
208                             struct intel_ring_buffer *ring)
209 {
210         drm_i915_private_t *dev_priv = dev->dev_private;
211         int ret = init_ring_common(dev, ring);
212         int mode;
213
214         if (INTEL_INFO(dev)->gen > 3) {
215                 mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
216                 if (IS_GEN6(dev))
217                         mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
218                 I915_WRITE(MI_MODE, mode);
219         }
220         return ret;
221 }
222
223 #define PIPE_CONTROL_FLUSH(addr)                                        \
224 do {                                                                    \
225         OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |          \
226                  PIPE_CONTROL_DEPTH_STALL | 2);                         \
227         OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT);                       \
228         OUT_RING(0);                                                    \
229         OUT_RING(0);                                                    \
230 } while (0)
231
232 /**
233  * Creates a new sequence number, emitting a write of it to the status page
234  * plus an interrupt, which will trigger i915_user_interrupt_handler.
235  *
236  * Must be called with struct_lock held.
237  *
238  * Returned sequence numbers are nonzero on success.
239  */
240 static u32
241 render_ring_add_request(struct drm_device *dev,
242                         struct intel_ring_buffer *ring,
243                         u32 flush_domains)
244 {
245         drm_i915_private_t *dev_priv = dev->dev_private;
246         u32 seqno;
247
248         seqno = i915_gem_get_seqno(dev);
249
250         if (IS_GEN6(dev)) {
251                 BEGIN_LP_RING(6);
252                 OUT_RING(GFX_OP_PIPE_CONTROL | 3);
253                 OUT_RING(PIPE_CONTROL_QW_WRITE |
254                          PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
255                          PIPE_CONTROL_NOTIFY);
256                 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
257                 OUT_RING(seqno);
258                 OUT_RING(0);
259                 OUT_RING(0);
260                 ADVANCE_LP_RING();
261         } else if (HAS_PIPE_CONTROL(dev)) {
262                 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
263
264                 /*
265                  * Workaround qword write incoherence by flushing the
266                  * PIPE_NOTIFY buffers out to memory before requesting
267                  * an interrupt.
268                  */
269                 BEGIN_LP_RING(32);
270                 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
271                          PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
272                 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
273                 OUT_RING(seqno);
274                 OUT_RING(0);
275                 PIPE_CONTROL_FLUSH(scratch_addr);
276                 scratch_addr += 128; /* write to separate cachelines */
277                 PIPE_CONTROL_FLUSH(scratch_addr);
278                 scratch_addr += 128;
279                 PIPE_CONTROL_FLUSH(scratch_addr);
280                 scratch_addr += 128;
281                 PIPE_CONTROL_FLUSH(scratch_addr);
282                 scratch_addr += 128;
283                 PIPE_CONTROL_FLUSH(scratch_addr);
284                 scratch_addr += 128;
285                 PIPE_CONTROL_FLUSH(scratch_addr);
286                 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
287                          PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
288                          PIPE_CONTROL_NOTIFY);
289                 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
290                 OUT_RING(seqno);
291                 OUT_RING(0);
292                 ADVANCE_LP_RING();
293         } else {
294                 BEGIN_LP_RING(4);
295                 OUT_RING(MI_STORE_DWORD_INDEX);
296                 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
297                 OUT_RING(seqno);
298
299                 OUT_RING(MI_USER_INTERRUPT);
300                 ADVANCE_LP_RING();
301         }
302         return seqno;
303 }
304
305 static u32
306 render_ring_get_seqno(struct drm_device *dev,
307                       struct intel_ring_buffer *ring)
308 {
309         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
310         if (HAS_PIPE_CONTROL(dev))
311                 return ((volatile u32 *)(dev_priv->seqno_page))[0];
312         else
313                 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
314 }
315
316 static void
317 render_ring_get_user_irq(struct drm_device *dev,
318                          struct intel_ring_buffer *ring)
319 {
320         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
321         unsigned long irqflags;
322
323         spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
324         if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
325                 if (HAS_PCH_SPLIT(dev))
326                         ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
327                 else
328                         i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
329         }
330         spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
331 }
332
333 static void
334 render_ring_put_user_irq(struct drm_device *dev,
335                          struct intel_ring_buffer *ring)
336 {
337         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
338         unsigned long irqflags;
339
340         spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
341         BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
342         if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
343                 if (HAS_PCH_SPLIT(dev))
344                         ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
345                 else
346                         i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
347         }
348         spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
349 }
350
351 void intel_ring_setup_status_page(struct drm_device *dev,
352                                   struct intel_ring_buffer *ring)
353 {
354         drm_i915_private_t *dev_priv = dev->dev_private;
355         if (IS_GEN6(dev)) {
356                 I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base),
357                            ring->status_page.gfx_addr);
358                 I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */
359         } else {
360                 I915_WRITE(RING_HWS_PGA(ring->mmio_base),
361                            ring->status_page.gfx_addr);
362                 I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */
363         }
364
365 }
366
367 static void
368 bsd_ring_flush(struct drm_device *dev,
369                 struct intel_ring_buffer *ring,
370                 u32     invalidate_domains,
371                 u32     flush_domains)
372 {
373         intel_ring_begin(dev, ring, 2);
374         intel_ring_emit(dev, ring, MI_FLUSH);
375         intel_ring_emit(dev, ring, MI_NOOP);
376         intel_ring_advance(dev, ring);
377 }
378
379 static int init_bsd_ring(struct drm_device *dev,
380                          struct intel_ring_buffer *ring)
381 {
382         return init_ring_common(dev, ring);
383 }
384
385 static u32
386 ring_add_request(struct drm_device *dev,
387                  struct intel_ring_buffer *ring,
388                  u32 flush_domains)
389 {
390         u32 seqno;
391
392         seqno = i915_gem_get_seqno(dev);
393
394         intel_ring_begin(dev, ring, 4);
395         intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
396         intel_ring_emit(dev, ring,
397                         I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
398         intel_ring_emit(dev, ring, seqno);
399         intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
400         intel_ring_advance(dev, ring);
401
402         DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
403
404         return seqno;
405 }
406
407 static void
408 bsd_ring_get_user_irq(struct drm_device *dev,
409                       struct intel_ring_buffer *ring)
410 {
411         /* do nothing */
412 }
413 static void
414 bsd_ring_put_user_irq(struct drm_device *dev,
415                       struct intel_ring_buffer *ring)
416 {
417         /* do nothing */
418 }
419
420 static u32
421 ring_status_page_get_seqno(struct drm_device *dev,
422                            struct intel_ring_buffer *ring)
423 {
424         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
425 }
426
427 static int
428 ring_dispatch_gem_execbuffer(struct drm_device *dev,
429                              struct intel_ring_buffer *ring,
430                              struct drm_i915_gem_execbuffer2 *exec,
431                              struct drm_clip_rect *cliprects,
432                              uint64_t exec_offset)
433 {
434         uint32_t exec_start;
435         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
436         intel_ring_begin(dev, ring, 2);
437         intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
438                         (2 << 6) | MI_BATCH_NON_SECURE_I965);
439         intel_ring_emit(dev, ring, exec_start);
440         intel_ring_advance(dev, ring);
441         return 0;
442 }
443
444 static int
445 render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
446                                     struct intel_ring_buffer *ring,
447                                     struct drm_i915_gem_execbuffer2 *exec,
448                                     struct drm_clip_rect *cliprects,
449                                     uint64_t exec_offset)
450 {
451         drm_i915_private_t *dev_priv = dev->dev_private;
452         int nbox = exec->num_cliprects;
453         int i = 0, count;
454         uint32_t exec_start, exec_len;
455         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
456         exec_len = (uint32_t) exec->batch_len;
457
458         trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
459
460         count = nbox ? nbox : 1;
461
462         for (i = 0; i < count; i++) {
463                 if (i < nbox) {
464                         int ret = i915_emit_box(dev, cliprects, i,
465                                                 exec->DR1, exec->DR4);
466                         if (ret)
467                                 return ret;
468                 }
469
470                 if (IS_I830(dev) || IS_845G(dev)) {
471                         intel_ring_begin(dev, ring, 4);
472                         intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
473                         intel_ring_emit(dev, ring,
474                                         exec_start | MI_BATCH_NON_SECURE);
475                         intel_ring_emit(dev, ring, exec_start + exec_len - 4);
476                         intel_ring_emit(dev, ring, 0);
477                 } else {
478                         intel_ring_begin(dev, ring, 2);
479                         if (INTEL_INFO(dev)->gen >= 4) {
480                                 intel_ring_emit(dev, ring,
481                                                 MI_BATCH_BUFFER_START | (2 << 6)
482                                                 | MI_BATCH_NON_SECURE_I965);
483                                 intel_ring_emit(dev, ring, exec_start);
484                         } else {
485                                 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
486                                                 | (2 << 6));
487                                 intel_ring_emit(dev, ring, exec_start |
488                                                 MI_BATCH_NON_SECURE);
489                         }
490                 }
491                 intel_ring_advance(dev, ring);
492         }
493
494         if (IS_G4X(dev) || IS_GEN5(dev)) {
495                 intel_ring_begin(dev, ring, 2);
496                 intel_ring_emit(dev, ring, MI_FLUSH |
497                                 MI_NO_WRITE_FLUSH |
498                                 MI_INVALIDATE_ISP );
499                 intel_ring_emit(dev, ring, MI_NOOP);
500                 intel_ring_advance(dev, ring);
501         }
502         /* XXX breadcrumb */
503
504         return 0;
505 }
506
507 static void cleanup_status_page(struct drm_device *dev,
508                                 struct intel_ring_buffer *ring)
509 {
510         drm_i915_private_t *dev_priv = dev->dev_private;
511         struct drm_gem_object *obj;
512         struct drm_i915_gem_object *obj_priv;
513
514         obj = ring->status_page.obj;
515         if (obj == NULL)
516                 return;
517         obj_priv = to_intel_bo(obj);
518
519         kunmap(obj_priv->pages[0]);
520         i915_gem_object_unpin(obj);
521         drm_gem_object_unreference(obj);
522         ring->status_page.obj = NULL;
523
524         memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
525 }
526
527 static int init_status_page(struct drm_device *dev,
528                             struct intel_ring_buffer *ring)
529 {
530         drm_i915_private_t *dev_priv = dev->dev_private;
531         struct drm_gem_object *obj;
532         struct drm_i915_gem_object *obj_priv;
533         int ret;
534
535         obj = i915_gem_alloc_object(dev, 4096);
536         if (obj == NULL) {
537                 DRM_ERROR("Failed to allocate status page\n");
538                 ret = -ENOMEM;
539                 goto err;
540         }
541         obj_priv = to_intel_bo(obj);
542         obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
543
544         ret = i915_gem_object_pin(obj, 4096);
545         if (ret != 0) {
546                 goto err_unref;
547         }
548
549         ring->status_page.gfx_addr = obj_priv->gtt_offset;
550         ring->status_page.page_addr = kmap(obj_priv->pages[0]);
551         if (ring->status_page.page_addr == NULL) {
552                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
553                 goto err_unpin;
554         }
555         ring->status_page.obj = obj;
556         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
557
558         intel_ring_setup_status_page(dev, ring);
559         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
560                         ring->name, ring->status_page.gfx_addr);
561
562         return 0;
563
564 err_unpin:
565         i915_gem_object_unpin(obj);
566 err_unref:
567         drm_gem_object_unreference(obj);
568 err:
569         return ret;
570 }
571
572 int intel_init_ring_buffer(struct drm_device *dev,
573                            struct intel_ring_buffer *ring)
574 {
575         struct drm_i915_private *dev_priv = dev->dev_private;
576         struct drm_i915_gem_object *obj_priv;
577         struct drm_gem_object *obj;
578         int ret;
579
580         ring->dev = dev;
581         INIT_LIST_HEAD(&ring->active_list);
582         INIT_LIST_HEAD(&ring->request_list);
583         INIT_LIST_HEAD(&ring->gpu_write_list);
584
585         if (I915_NEED_GFX_HWS(dev)) {
586                 ret = init_status_page(dev, ring);
587                 if (ret)
588                         return ret;
589         }
590
591         obj = i915_gem_alloc_object(dev, ring->size);
592         if (obj == NULL) {
593                 DRM_ERROR("Failed to allocate ringbuffer\n");
594                 ret = -ENOMEM;
595                 goto err_hws;
596         }
597
598         ring->gem_object = obj;
599
600         ret = i915_gem_object_pin(obj, PAGE_SIZE);
601         if (ret)
602                 goto err_unref;
603
604         obj_priv = to_intel_bo(obj);
605         ring->map.size = ring->size;
606         ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
607         ring->map.type = 0;
608         ring->map.flags = 0;
609         ring->map.mtrr = 0;
610
611         drm_core_ioremap_wc(&ring->map, dev);
612         if (ring->map.handle == NULL) {
613                 DRM_ERROR("Failed to map ringbuffer.\n");
614                 ret = -EINVAL;
615                 goto err_unpin;
616         }
617
618         ring->virtual_start = ring->map.handle;
619         ret = ring->init(dev, ring);
620         if (ret)
621                 goto err_unmap;
622
623         if (!drm_core_check_feature(dev, DRIVER_MODESET))
624                 i915_kernel_lost_context(dev);
625         else {
626                 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
627                 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
628                 ring->space = ring->head - (ring->tail + 8);
629                 if (ring->space < 0)
630                         ring->space += ring->size;
631         }
632         return ret;
633
634 err_unmap:
635         drm_core_ioremapfree(&ring->map, dev);
636 err_unpin:
637         i915_gem_object_unpin(obj);
638 err_unref:
639         drm_gem_object_unreference(obj);
640         ring->gem_object = NULL;
641 err_hws:
642         cleanup_status_page(dev, ring);
643         return ret;
644 }
645
646 void intel_cleanup_ring_buffer(struct drm_device *dev,
647                                struct intel_ring_buffer *ring)
648 {
649         if (ring->gem_object == NULL)
650                 return;
651
652         drm_core_ioremapfree(&ring->map, dev);
653
654         i915_gem_object_unpin(ring->gem_object);
655         drm_gem_object_unreference(ring->gem_object);
656         ring->gem_object = NULL;
657         cleanup_status_page(dev, ring);
658 }
659
660 static int intel_wrap_ring_buffer(struct drm_device *dev,
661                                   struct intel_ring_buffer *ring)
662 {
663         unsigned int *virt;
664         int rem;
665         rem = ring->size - ring->tail;
666
667         if (ring->space < rem) {
668                 int ret = intel_wait_ring_buffer(dev, ring, rem);
669                 if (ret)
670                         return ret;
671         }
672
673         virt = (unsigned int *)(ring->virtual_start + ring->tail);
674         rem /= 8;
675         while (rem--) {
676                 *virt++ = MI_NOOP;
677                 *virt++ = MI_NOOP;
678         }
679
680         ring->tail = 0;
681         ring->space = ring->head - 8;
682
683         return 0;
684 }
685
686 int intel_wait_ring_buffer(struct drm_device *dev,
687                            struct intel_ring_buffer *ring, int n)
688 {
689         unsigned long end;
690         drm_i915_private_t *dev_priv = dev->dev_private;
691
692         trace_i915_ring_wait_begin (dev);
693         end = jiffies + 3 * HZ;
694         do {
695                 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
696                 ring->space = ring->head - (ring->tail + 8);
697                 if (ring->space < 0)
698                         ring->space += ring->size;
699                 if (ring->space >= n) {
700                         trace_i915_ring_wait_end (dev);
701                         return 0;
702                 }
703
704                 if (dev->primary->master) {
705                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
706                         if (master_priv->sarea_priv)
707                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
708                 }
709
710                 msleep(1);
711         } while (!time_after(jiffies, end));
712         trace_i915_ring_wait_end (dev);
713         return -EBUSY;
714 }
715
716 void intel_ring_begin(struct drm_device *dev,
717                       struct intel_ring_buffer *ring,
718                       int num_dwords)
719 {
720         int n = 4*num_dwords;
721         if (unlikely(ring->tail + n > ring->size))
722                 intel_wrap_ring_buffer(dev, ring);
723         if (unlikely(ring->space < n))
724                 intel_wait_ring_buffer(dev, ring, n);
725
726         ring->space -= n;
727 }
728
729 void intel_ring_advance(struct drm_device *dev,
730                         struct intel_ring_buffer *ring)
731 {
732         ring->tail &= ring->size - 1;
733         ring->write_tail(dev, ring, ring->tail);
734 }
735
736 static const struct intel_ring_buffer render_ring = {
737         .name                   = "render ring",
738         .id                     = RING_RENDER,
739         .mmio_base              = RENDER_RING_BASE,
740         .size                   = 32 * PAGE_SIZE,
741         .init                   = init_render_ring,
742         .write_tail             = ring_write_tail,
743         .flush                  = render_ring_flush,
744         .add_request            = render_ring_add_request,
745         .get_seqno              = render_ring_get_seqno,
746         .user_irq_get           = render_ring_get_user_irq,
747         .user_irq_put           = render_ring_put_user_irq,
748         .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
749 };
750
751 /* ring buffer for bit-stream decoder */
752
753 static const struct intel_ring_buffer bsd_ring = {
754         .name                   = "bsd ring",
755         .id                     = RING_BSD,
756         .mmio_base              = BSD_RING_BASE,
757         .size                   = 32 * PAGE_SIZE,
758         .init                   = init_bsd_ring,
759         .write_tail             = ring_write_tail,
760         .flush                  = bsd_ring_flush,
761         .add_request            = ring_add_request,
762         .get_seqno              = ring_status_page_get_seqno,
763         .user_irq_get           = bsd_ring_get_user_irq,
764         .user_irq_put           = bsd_ring_put_user_irq,
765         .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer,
766 };
767
768
769 static void gen6_bsd_ring_write_tail(struct drm_device *dev,
770                                      struct intel_ring_buffer *ring,
771                                      u32 value)
772 {
773        drm_i915_private_t *dev_priv = dev->dev_private;
774
775        /* Every tail move must follow the sequence below */
776        I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
777                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
778                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
779        I915_WRITE(GEN6_BSD_RNCID, 0x0);
780
781        if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
782                                GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
783                        50))
784                DRM_ERROR("timed out waiting for IDLE Indicator\n");
785
786        I915_WRITE_TAIL(ring, value);
787        I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
788                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
789                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
790 }
791
792 static void gen6_ring_flush(struct drm_device *dev,
793                             struct intel_ring_buffer *ring,
794                             u32 invalidate_domains,
795                             u32 flush_domains)
796 {
797        intel_ring_begin(dev, ring, 4);
798        intel_ring_emit(dev, ring, MI_FLUSH_DW);
799        intel_ring_emit(dev, ring, 0);
800        intel_ring_emit(dev, ring, 0);
801        intel_ring_emit(dev, ring, 0);
802        intel_ring_advance(dev, ring);
803 }
804
805 static int
806 gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev,
807                                   struct intel_ring_buffer *ring,
808                                   struct drm_i915_gem_execbuffer2 *exec,
809                                   struct drm_clip_rect *cliprects,
810                                   uint64_t exec_offset)
811 {
812        uint32_t exec_start;
813
814        exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
815
816        intel_ring_begin(dev, ring, 2);
817        intel_ring_emit(dev, ring,
818                        MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
819        /* bit0-7 is the length on GEN6+ */
820        intel_ring_emit(dev, ring, exec_start);
821        intel_ring_advance(dev, ring);
822
823        return 0;
824 }
825
826 /* ring buffer for Video Codec for Gen6+ */
827 static const struct intel_ring_buffer gen6_bsd_ring = {
828        .name                    = "gen6 bsd ring",
829        .id                      = RING_BSD,
830        .mmio_base               = GEN6_BSD_RING_BASE,
831        .size                    = 32 * PAGE_SIZE,
832        .init                    = init_bsd_ring,
833        .write_tail              = gen6_bsd_ring_write_tail,
834        .flush                   = gen6_ring_flush,
835        .add_request             = ring_add_request,
836        .get_seqno               = ring_status_page_get_seqno,
837        .user_irq_get            = bsd_ring_get_user_irq,
838        .user_irq_put            = bsd_ring_put_user_irq,
839        .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
840 };
841
842 /* Blitter support (SandyBridge+) */
843
844 static void
845 blt_ring_get_user_irq(struct drm_device *dev,
846                       struct intel_ring_buffer *ring)
847 {
848         /* do nothing */
849 }
850 static void
851 blt_ring_put_user_irq(struct drm_device *dev,
852                       struct intel_ring_buffer *ring)
853 {
854         /* do nothing */
855 }
856
857 static const struct intel_ring_buffer gen6_blt_ring = {
858        .name                    = "blt ring",
859        .id                      = RING_BLT,
860        .mmio_base               = BLT_RING_BASE,
861        .size                    = 32 * PAGE_SIZE,
862        .init                    = init_ring_common,
863        .write_tail              = ring_write_tail,
864        .flush                   = gen6_ring_flush,
865        .add_request             = ring_add_request,
866        .get_seqno               = ring_status_page_get_seqno,
867        .user_irq_get            = blt_ring_get_user_irq,
868        .user_irq_put            = blt_ring_put_user_irq,
869        .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
870 };
871
872 int intel_init_render_ring_buffer(struct drm_device *dev)
873 {
874         drm_i915_private_t *dev_priv = dev->dev_private;
875
876         dev_priv->render_ring = render_ring;
877
878         if (!I915_NEED_GFX_HWS(dev)) {
879                 dev_priv->render_ring.status_page.page_addr
880                         = dev_priv->status_page_dmah->vaddr;
881                 memset(dev_priv->render_ring.status_page.page_addr,
882                                 0, PAGE_SIZE);
883         }
884
885         return intel_init_ring_buffer(dev, &dev_priv->render_ring);
886 }
887
888 int intel_init_bsd_ring_buffer(struct drm_device *dev)
889 {
890         drm_i915_private_t *dev_priv = dev->dev_private;
891
892         if (IS_GEN6(dev))
893                 dev_priv->bsd_ring = gen6_bsd_ring;
894         else
895                 dev_priv->bsd_ring = bsd_ring;
896
897         return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
898 }
899
900 int intel_init_blt_ring_buffer(struct drm_device *dev)
901 {
902         drm_i915_private_t *dev_priv = dev->dev_private;
903
904         dev_priv->blt_ring = gen6_blt_ring;
905
906         return intel_init_ring_buffer(dev, &dev_priv->blt_ring);
907 }