drm/i915: Remove unused intel_ringbuffer->ring_flag
[pandora-kernel.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include "drmP.h"
31 #include "drm.h"
32 #include "i915_drv.h"
33 #include "i915_drm.h"
34 #include "i915_trace.h"
35
36 static u32 i915_gem_get_seqno(struct drm_device *dev)
37 {
38         drm_i915_private_t *dev_priv = dev->dev_private;
39         u32 seqno;
40
41         seqno = dev_priv->next_seqno;
42
43         /* reserve 0 for non-seqno */
44         if (++dev_priv->next_seqno == 0)
45                 dev_priv->next_seqno = 1;
46
47         return seqno;
48 }
49
50 static void
51 render_ring_flush(struct drm_device *dev,
52                 struct intel_ring_buffer *ring,
53                 u32     invalidate_domains,
54                 u32     flush_domains)
55 {
56         drm_i915_private_t *dev_priv = dev->dev_private;
57         u32 cmd;
58
59 #if WATCH_EXEC
60         DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
61                   invalidate_domains, flush_domains);
62 #endif
63
64         trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
65                                      invalidate_domains, flush_domains);
66
67         if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
68                 /*
69                  * read/write caches:
70                  *
71                  * I915_GEM_DOMAIN_RENDER is always invalidated, but is
72                  * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
73                  * also flushed at 2d versus 3d pipeline switches.
74                  *
75                  * read-only caches:
76                  *
77                  * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
78                  * MI_READ_FLUSH is set, and is always flushed on 965.
79                  *
80                  * I915_GEM_DOMAIN_COMMAND may not exist?
81                  *
82                  * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
83                  * invalidated when MI_EXE_FLUSH is set.
84                  *
85                  * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
86                  * invalidated with every MI_FLUSH.
87                  *
88                  * TLBs:
89                  *
90                  * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
91                  * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
92                  * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
93                  * are flushed at any MI_FLUSH.
94                  */
95
96                 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
97                 if ((invalidate_domains|flush_domains) &
98                     I915_GEM_DOMAIN_RENDER)
99                         cmd &= ~MI_NO_WRITE_FLUSH;
100                 if (!IS_I965G(dev)) {
101                         /*
102                          * On the 965, the sampler cache always gets flushed
103                          * and this bit is reserved.
104                          */
105                         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
106                                 cmd |= MI_READ_FLUSH;
107                 }
108                 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
109                         cmd |= MI_EXE_FLUSH;
110
111 #if WATCH_EXEC
112                 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
113 #endif
114                 intel_ring_begin(dev, ring, 2);
115                 intel_ring_emit(dev, ring, cmd);
116                 intel_ring_emit(dev, ring, MI_NOOP);
117                 intel_ring_advance(dev, ring);
118         }
119
120         i915_gem_process_flushing_list(dev, flush_domains, ring);
121 }
122
123 static unsigned int render_ring_get_head(struct drm_device *dev,
124                 struct intel_ring_buffer *ring)
125 {
126         drm_i915_private_t *dev_priv = dev->dev_private;
127         return I915_READ(PRB0_HEAD) & HEAD_ADDR;
128 }
129
130 static unsigned int render_ring_get_tail(struct drm_device *dev,
131                 struct intel_ring_buffer *ring)
132 {
133         drm_i915_private_t *dev_priv = dev->dev_private;
134         return I915_READ(PRB0_TAIL) & TAIL_ADDR;
135 }
136
137 static unsigned int render_ring_get_active_head(struct drm_device *dev,
138                 struct intel_ring_buffer *ring)
139 {
140         drm_i915_private_t *dev_priv = dev->dev_private;
141         u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
142
143         return I915_READ(acthd_reg);
144 }
145
146 static void render_ring_advance_ring(struct drm_device *dev,
147                 struct intel_ring_buffer *ring)
148 {
149         drm_i915_private_t *dev_priv = dev->dev_private;
150         I915_WRITE(PRB0_TAIL, ring->tail);
151 }
152
153 static int init_ring_common(struct drm_device *dev,
154                 struct intel_ring_buffer *ring)
155 {
156         u32 head;
157         drm_i915_private_t *dev_priv = dev->dev_private;
158         struct drm_i915_gem_object *obj_priv;
159         obj_priv = to_intel_bo(ring->gem_object);
160
161         /* Stop the ring if it's running. */
162         I915_WRITE(ring->regs.ctl, 0);
163         I915_WRITE(ring->regs.head, 0);
164         I915_WRITE(ring->regs.tail, 0);
165
166         /* Initialize the ring. */
167         I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
168         head = ring->get_head(dev, ring);
169
170         /* G45 ring initialization fails to reset head to zero */
171         if (head != 0) {
172                 DRM_ERROR("%s head not reset to zero "
173                                 "ctl %08x head %08x tail %08x start %08x\n",
174                                 ring->name,
175                                 I915_READ(ring->regs.ctl),
176                                 I915_READ(ring->regs.head),
177                                 I915_READ(ring->regs.tail),
178                                 I915_READ(ring->regs.start));
179
180                 I915_WRITE(ring->regs.head, 0);
181
182                 DRM_ERROR("%s head forced to zero "
183                                 "ctl %08x head %08x tail %08x start %08x\n",
184                                 ring->name,
185                                 I915_READ(ring->regs.ctl),
186                                 I915_READ(ring->regs.head),
187                                 I915_READ(ring->regs.tail),
188                                 I915_READ(ring->regs.start));
189         }
190
191         I915_WRITE(ring->regs.ctl,
192                         ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
193                         | RING_NO_REPORT | RING_VALID);
194
195         head = I915_READ(ring->regs.head) & HEAD_ADDR;
196         /* If the head is still not zero, the ring is dead */
197         if (head != 0) {
198                 DRM_ERROR("%s initialization failed "
199                                 "ctl %08x head %08x tail %08x start %08x\n",
200                                 ring->name,
201                                 I915_READ(ring->regs.ctl),
202                                 I915_READ(ring->regs.head),
203                                 I915_READ(ring->regs.tail),
204                                 I915_READ(ring->regs.start));
205                 return -EIO;
206         }
207
208         if (!drm_core_check_feature(dev, DRIVER_MODESET))
209                 i915_kernel_lost_context(dev);
210         else {
211                 ring->head = ring->get_head(dev, ring);
212                 ring->tail = ring->get_tail(dev, ring);
213                 ring->space = ring->head - (ring->tail + 8);
214                 if (ring->space < 0)
215                         ring->space += ring->size;
216         }
217         return 0;
218 }
219
220 static int init_render_ring(struct drm_device *dev,
221                 struct intel_ring_buffer *ring)
222 {
223         drm_i915_private_t *dev_priv = dev->dev_private;
224         int ret = init_ring_common(dev, ring);
225         int mode;
226
227         if (IS_I9XX(dev) && !IS_GEN3(dev)) {
228                 mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
229                 if (IS_GEN6(dev))
230                         mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
231                 I915_WRITE(MI_MODE, mode);
232         }
233         return ret;
234 }
235
236 #define PIPE_CONTROL_FLUSH(addr)                                        \
237 do {                                                                    \
238         OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |          \
239                  PIPE_CONTROL_DEPTH_STALL | 2);                         \
240         OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT);                       \
241         OUT_RING(0);                                                    \
242         OUT_RING(0);                                                    \
243 } while (0)
244
245 /**
246  * Creates a new sequence number, emitting a write of it to the status page
247  * plus an interrupt, which will trigger i915_user_interrupt_handler.
248  *
249  * Must be called with struct_lock held.
250  *
251  * Returned sequence numbers are nonzero on success.
252  */
253 static u32
254 render_ring_add_request(struct drm_device *dev,
255                 struct intel_ring_buffer *ring,
256                 struct drm_file *file_priv,
257                 u32 flush_domains)
258 {
259         drm_i915_private_t *dev_priv = dev->dev_private;
260         u32 seqno;
261
262         seqno = i915_gem_get_seqno(dev);
263
264         if (IS_GEN6(dev)) {
265                 BEGIN_LP_RING(6);
266                 OUT_RING(GFX_OP_PIPE_CONTROL | 3);
267                 OUT_RING(PIPE_CONTROL_QW_WRITE |
268                          PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
269                          PIPE_CONTROL_NOTIFY);
270                 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
271                 OUT_RING(seqno);
272                 OUT_RING(0);
273                 OUT_RING(0);
274                 ADVANCE_LP_RING();
275         } else if (HAS_PIPE_CONTROL(dev)) {
276                 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
277
278                 /*
279                  * Workaround qword write incoherence by flushing the
280                  * PIPE_NOTIFY buffers out to memory before requesting
281                  * an interrupt.
282                  */
283                 BEGIN_LP_RING(32);
284                 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
285                          PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
286                 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
287                 OUT_RING(seqno);
288                 OUT_RING(0);
289                 PIPE_CONTROL_FLUSH(scratch_addr);
290                 scratch_addr += 128; /* write to separate cachelines */
291                 PIPE_CONTROL_FLUSH(scratch_addr);
292                 scratch_addr += 128;
293                 PIPE_CONTROL_FLUSH(scratch_addr);
294                 scratch_addr += 128;
295                 PIPE_CONTROL_FLUSH(scratch_addr);
296                 scratch_addr += 128;
297                 PIPE_CONTROL_FLUSH(scratch_addr);
298                 scratch_addr += 128;
299                 PIPE_CONTROL_FLUSH(scratch_addr);
300                 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
301                          PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
302                          PIPE_CONTROL_NOTIFY);
303                 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
304                 OUT_RING(seqno);
305                 OUT_RING(0);
306                 ADVANCE_LP_RING();
307         } else {
308                 BEGIN_LP_RING(4);
309                 OUT_RING(MI_STORE_DWORD_INDEX);
310                 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
311                 OUT_RING(seqno);
312
313                 OUT_RING(MI_USER_INTERRUPT);
314                 ADVANCE_LP_RING();
315         }
316         return seqno;
317 }
318
319 static u32
320 render_ring_get_gem_seqno(struct drm_device *dev,
321                 struct intel_ring_buffer *ring)
322 {
323         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
324         if (HAS_PIPE_CONTROL(dev))
325                 return ((volatile u32 *)(dev_priv->seqno_page))[0];
326         else
327                 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
328 }
329
330 static void
331 render_ring_get_user_irq(struct drm_device *dev,
332                 struct intel_ring_buffer *ring)
333 {
334         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
335         unsigned long irqflags;
336
337         spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
338         if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
339                 if (HAS_PCH_SPLIT(dev))
340                         ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
341                 else
342                         i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
343         }
344         spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
345 }
346
347 static void
348 render_ring_put_user_irq(struct drm_device *dev,
349                 struct intel_ring_buffer *ring)
350 {
351         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
352         unsigned long irqflags;
353
354         spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
355         BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
356         if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
357                 if (HAS_PCH_SPLIT(dev))
358                         ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
359                 else
360                         i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
361         }
362         spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
363 }
364
365 static void render_setup_status_page(struct drm_device *dev,
366         struct  intel_ring_buffer *ring)
367 {
368         drm_i915_private_t *dev_priv = dev->dev_private;
369         if (IS_GEN6(dev)) {
370                 I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
371                 I915_READ(HWS_PGA_GEN6); /* posting read */
372         } else {
373                 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
374                 I915_READ(HWS_PGA); /* posting read */
375         }
376
377 }
378
379 void
380 bsd_ring_flush(struct drm_device *dev,
381                 struct intel_ring_buffer *ring,
382                 u32     invalidate_domains,
383                 u32     flush_domains)
384 {
385         intel_ring_begin(dev, ring, 2);
386         intel_ring_emit(dev, ring, MI_FLUSH);
387         intel_ring_emit(dev, ring, MI_NOOP);
388         intel_ring_advance(dev, ring);
389
390         i915_gem_process_flushing_list(dev, flush_domains, ring);
391 }
392
393 static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
394                 struct intel_ring_buffer *ring)
395 {
396         drm_i915_private_t *dev_priv = dev->dev_private;
397         return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
398 }
399
400 static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
401                 struct intel_ring_buffer *ring)
402 {
403         drm_i915_private_t *dev_priv = dev->dev_private;
404         return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
405 }
406
407 static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
408                 struct intel_ring_buffer *ring)
409 {
410         drm_i915_private_t *dev_priv = dev->dev_private;
411         return I915_READ(BSD_RING_ACTHD);
412 }
413
414 static inline void bsd_ring_advance_ring(struct drm_device *dev,
415                 struct intel_ring_buffer *ring)
416 {
417         drm_i915_private_t *dev_priv = dev->dev_private;
418         I915_WRITE(BSD_RING_TAIL, ring->tail);
419 }
420
421 static int init_bsd_ring(struct drm_device *dev,
422                 struct intel_ring_buffer *ring)
423 {
424         return init_ring_common(dev, ring);
425 }
426
427 static u32
428 bsd_ring_add_request(struct drm_device *dev,
429                 struct intel_ring_buffer *ring,
430                 struct drm_file *file_priv,
431                 u32 flush_domains)
432 {
433         u32 seqno;
434
435         seqno = i915_gem_get_seqno(dev);
436
437         intel_ring_begin(dev, ring, 4);
438         intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
439         intel_ring_emit(dev, ring,
440                         I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
441         intel_ring_emit(dev, ring, seqno);
442         intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
443         intel_ring_advance(dev, ring);
444
445         DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
446
447         return seqno;
448 }
449
450 static void bsd_setup_status_page(struct drm_device *dev,
451                 struct  intel_ring_buffer *ring)
452 {
453         drm_i915_private_t *dev_priv = dev->dev_private;
454         I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
455         I915_READ(BSD_HWS_PGA);
456 }
457
458 static void
459 bsd_ring_get_user_irq(struct drm_device *dev,
460                 struct intel_ring_buffer *ring)
461 {
462         /* do nothing */
463 }
464 static void
465 bsd_ring_put_user_irq(struct drm_device *dev,
466                 struct intel_ring_buffer *ring)
467 {
468         /* do nothing */
469 }
470
471 static u32
472 bsd_ring_get_gem_seqno(struct drm_device *dev,
473                 struct intel_ring_buffer *ring)
474 {
475         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
476 }
477
478 static int
479 bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
480                 struct intel_ring_buffer *ring,
481                 struct drm_i915_gem_execbuffer2 *exec,
482                 struct drm_clip_rect *cliprects,
483                 uint64_t exec_offset)
484 {
485         uint32_t exec_start;
486         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
487         intel_ring_begin(dev, ring, 2);
488         intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
489                         (2 << 6) | MI_BATCH_NON_SECURE_I965);
490         intel_ring_emit(dev, ring, exec_start);
491         intel_ring_advance(dev, ring);
492         return 0;
493 }
494
495
496 static int
497 render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
498                 struct intel_ring_buffer *ring,
499                 struct drm_i915_gem_execbuffer2 *exec,
500                 struct drm_clip_rect *cliprects,
501                 uint64_t exec_offset)
502 {
503         drm_i915_private_t *dev_priv = dev->dev_private;
504         int nbox = exec->num_cliprects;
505         int i = 0, count;
506         uint32_t exec_start, exec_len;
507         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
508         exec_len = (uint32_t) exec->batch_len;
509
510         trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
511
512         count = nbox ? nbox : 1;
513
514         for (i = 0; i < count; i++) {
515                 if (i < nbox) {
516                         int ret = i915_emit_box(dev, cliprects, i,
517                                                 exec->DR1, exec->DR4);
518                         if (ret)
519                                 return ret;
520                 }
521
522                 if (IS_I830(dev) || IS_845G(dev)) {
523                         intel_ring_begin(dev, ring, 4);
524                         intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
525                         intel_ring_emit(dev, ring,
526                                         exec_start | MI_BATCH_NON_SECURE);
527                         intel_ring_emit(dev, ring, exec_start + exec_len - 4);
528                         intel_ring_emit(dev, ring, 0);
529                 } else {
530                         intel_ring_begin(dev, ring, 4);
531                         if (IS_I965G(dev)) {
532                                 intel_ring_emit(dev, ring,
533                                                 MI_BATCH_BUFFER_START | (2 << 6)
534                                                 | MI_BATCH_NON_SECURE_I965);
535                                 intel_ring_emit(dev, ring, exec_start);
536                         } else {
537                                 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
538                                                 | (2 << 6));
539                                 intel_ring_emit(dev, ring, exec_start |
540                                                 MI_BATCH_NON_SECURE);
541                         }
542                 }
543                 intel_ring_advance(dev, ring);
544         }
545
546         if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
547                 intel_ring_begin(dev, ring, 2);
548                 intel_ring_emit(dev, ring, MI_FLUSH |
549                                 MI_NO_WRITE_FLUSH |
550                                 MI_INVALIDATE_ISP );
551                 intel_ring_emit(dev, ring, MI_NOOP);
552                 intel_ring_advance(dev, ring);
553         }
554         /* XXX breadcrumb */
555
556         return 0;
557 }
558
559 static void cleanup_status_page(struct drm_device *dev,
560                 struct intel_ring_buffer *ring)
561 {
562         drm_i915_private_t *dev_priv = dev->dev_private;
563         struct drm_gem_object *obj;
564         struct drm_i915_gem_object *obj_priv;
565
566         obj = ring->status_page.obj;
567         if (obj == NULL)
568                 return;
569         obj_priv = to_intel_bo(obj);
570
571         kunmap(obj_priv->pages[0]);
572         i915_gem_object_unpin(obj);
573         drm_gem_object_unreference(obj);
574         ring->status_page.obj = NULL;
575
576         memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
577 }
578
579 static int init_status_page(struct drm_device *dev,
580                 struct intel_ring_buffer *ring)
581 {
582         drm_i915_private_t *dev_priv = dev->dev_private;
583         struct drm_gem_object *obj;
584         struct drm_i915_gem_object *obj_priv;
585         int ret;
586
587         obj = i915_gem_alloc_object(dev, 4096);
588         if (obj == NULL) {
589                 DRM_ERROR("Failed to allocate status page\n");
590                 ret = -ENOMEM;
591                 goto err;
592         }
593         obj_priv = to_intel_bo(obj);
594         obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
595
596         ret = i915_gem_object_pin(obj, 4096);
597         if (ret != 0) {
598                 goto err_unref;
599         }
600
601         ring->status_page.gfx_addr = obj_priv->gtt_offset;
602         ring->status_page.page_addr = kmap(obj_priv->pages[0]);
603         if (ring->status_page.page_addr == NULL) {
604                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
605                 goto err_unpin;
606         }
607         ring->status_page.obj = obj;
608         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
609
610         ring->setup_status_page(dev, ring);
611         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
612                         ring->name, ring->status_page.gfx_addr);
613
614         return 0;
615
616 err_unpin:
617         i915_gem_object_unpin(obj);
618 err_unref:
619         drm_gem_object_unreference(obj);
620 err:
621         return ret;
622 }
623
624
625 int intel_init_ring_buffer(struct drm_device *dev,
626                 struct intel_ring_buffer *ring)
627 {
628         struct drm_i915_gem_object *obj_priv;
629         struct drm_gem_object *obj;
630         int ret;
631
632         ring->dev = dev;
633
634         if (I915_NEED_GFX_HWS(dev)) {
635                 ret = init_status_page(dev, ring);
636                 if (ret)
637                         return ret;
638         }
639
640         obj = i915_gem_alloc_object(dev, ring->size);
641         if (obj == NULL) {
642                 DRM_ERROR("Failed to allocate ringbuffer\n");
643                 ret = -ENOMEM;
644                 goto err_hws;
645         }
646
647         ring->gem_object = obj;
648
649         ret = i915_gem_object_pin(obj, ring->alignment);
650         if (ret)
651                 goto err_unref;
652
653         obj_priv = to_intel_bo(obj);
654         ring->map.size = ring->size;
655         ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
656         ring->map.type = 0;
657         ring->map.flags = 0;
658         ring->map.mtrr = 0;
659
660         drm_core_ioremap_wc(&ring->map, dev);
661         if (ring->map.handle == NULL) {
662                 DRM_ERROR("Failed to map ringbuffer.\n");
663                 ret = -EINVAL;
664                 goto err_unpin;
665         }
666
667         ring->virtual_start = ring->map.handle;
668         ret = ring->init(dev, ring);
669         if (ret)
670                 goto err_unmap;
671
672         if (!drm_core_check_feature(dev, DRIVER_MODESET))
673                 i915_kernel_lost_context(dev);
674         else {
675                 ring->head = ring->get_head(dev, ring);
676                 ring->tail = ring->get_tail(dev, ring);
677                 ring->space = ring->head - (ring->tail + 8);
678                 if (ring->space < 0)
679                         ring->space += ring->size;
680         }
681         INIT_LIST_HEAD(&ring->active_list);
682         INIT_LIST_HEAD(&ring->request_list);
683         return ret;
684
685 err_unmap:
686         drm_core_ioremapfree(&ring->map, dev);
687 err_unpin:
688         i915_gem_object_unpin(obj);
689 err_unref:
690         drm_gem_object_unreference(obj);
691         ring->gem_object = NULL;
692 err_hws:
693         cleanup_status_page(dev, ring);
694         return ret;
695 }
696
697 void intel_cleanup_ring_buffer(struct drm_device *dev,
698                 struct intel_ring_buffer *ring)
699 {
700         if (ring->gem_object == NULL)
701                 return;
702
703         drm_core_ioremapfree(&ring->map, dev);
704
705         i915_gem_object_unpin(ring->gem_object);
706         drm_gem_object_unreference(ring->gem_object);
707         ring->gem_object = NULL;
708         cleanup_status_page(dev, ring);
709 }
710
711 int intel_wrap_ring_buffer(struct drm_device *dev,
712                 struct intel_ring_buffer *ring)
713 {
714         unsigned int *virt;
715         int rem;
716         rem = ring->size - ring->tail;
717
718         if (ring->space < rem) {
719                 int ret = intel_wait_ring_buffer(dev, ring, rem);
720                 if (ret)
721                         return ret;
722         }
723
724         virt = (unsigned int *)(ring->virtual_start + ring->tail);
725         rem /= 8;
726         while (rem--) {
727                 *virt++ = MI_NOOP;
728                 *virt++ = MI_NOOP;
729         }
730
731         ring->tail = 0;
732         ring->space = ring->head - 8;
733
734         return 0;
735 }
736
737 int intel_wait_ring_buffer(struct drm_device *dev,
738                 struct intel_ring_buffer *ring, int n)
739 {
740         unsigned long end;
741
742         trace_i915_ring_wait_begin (dev);
743         end = jiffies + 3 * HZ;
744         do {
745                 ring->head = ring->get_head(dev, ring);
746                 ring->space = ring->head - (ring->tail + 8);
747                 if (ring->space < 0)
748                         ring->space += ring->size;
749                 if (ring->space >= n) {
750                         trace_i915_ring_wait_end (dev);
751                         return 0;
752                 }
753
754                 if (dev->primary->master) {
755                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
756                         if (master_priv->sarea_priv)
757                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
758                 }
759
760                 yield();
761         } while (!time_after(jiffies, end));
762         trace_i915_ring_wait_end (dev);
763         return -EBUSY;
764 }
765
766 void intel_ring_begin(struct drm_device *dev,
767                 struct intel_ring_buffer *ring, int num_dwords)
768 {
769         int n = 4*num_dwords;
770         if (unlikely(ring->tail + n > ring->size))
771                 intel_wrap_ring_buffer(dev, ring);
772         if (unlikely(ring->space < n))
773                 intel_wait_ring_buffer(dev, ring, n);
774
775         ring->space -= n;
776 }
777
778 void intel_ring_advance(struct drm_device *dev,
779                 struct intel_ring_buffer *ring)
780 {
781         ring->tail &= ring->size - 1;
782         ring->advance_ring(dev, ring);
783 }
784
785 void intel_fill_struct(struct drm_device *dev,
786                 struct intel_ring_buffer *ring,
787                 void *data,
788                 unsigned int len)
789 {
790         unsigned int *virt = ring->virtual_start + ring->tail;
791         BUG_ON((len&~(4-1)) != 0);
792         intel_ring_begin(dev, ring, len/4);
793         memcpy(virt, data, len);
794         ring->tail += len;
795         ring->tail &= ring->size - 1;
796         ring->space -= len;
797         intel_ring_advance(dev, ring);
798 }
799
800 struct intel_ring_buffer render_ring = {
801         .name                   = "render ring",
802         .regs                   = {
803                 .ctl = PRB0_CTL,
804                 .head = PRB0_HEAD,
805                 .tail = PRB0_TAIL,
806                 .start = PRB0_START
807         },
808         .size                   = 32 * PAGE_SIZE,
809         .alignment              = PAGE_SIZE,
810         .virtual_start          = NULL,
811         .dev                    = NULL,
812         .gem_object             = NULL,
813         .head                   = 0,
814         .tail                   = 0,
815         .space                  = 0,
816         .user_irq_refcount      = 0,
817         .irq_gem_seqno          = 0,
818         .waiting_gem_seqno      = 0,
819         .setup_status_page      = render_setup_status_page,
820         .init                   = init_render_ring,
821         .get_head               = render_ring_get_head,
822         .get_tail               = render_ring_get_tail,
823         .get_active_head        = render_ring_get_active_head,
824         .advance_ring           = render_ring_advance_ring,
825         .flush                  = render_ring_flush,
826         .add_request            = render_ring_add_request,
827         .get_gem_seqno          = render_ring_get_gem_seqno,
828         .user_irq_get           = render_ring_get_user_irq,
829         .user_irq_put           = render_ring_put_user_irq,
830         .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
831         .status_page            = {NULL, 0, NULL},
832         .map                    = {0,}
833 };
834
835 /* ring buffer for bit-stream decoder */
836
837 struct intel_ring_buffer bsd_ring = {
838         .name                   = "bsd ring",
839         .regs                   = {
840                 .ctl = BSD_RING_CTL,
841                 .head = BSD_RING_HEAD,
842                 .tail = BSD_RING_TAIL,
843                 .start = BSD_RING_START
844         },
845         .size                   = 32 * PAGE_SIZE,
846         .alignment              = PAGE_SIZE,
847         .virtual_start          = NULL,
848         .dev                    = NULL,
849         .gem_object             = NULL,
850         .head                   = 0,
851         .tail                   = 0,
852         .space                  = 0,
853         .user_irq_refcount      = 0,
854         .irq_gem_seqno          = 0,
855         .waiting_gem_seqno      = 0,
856         .setup_status_page      = bsd_setup_status_page,
857         .init                   = init_bsd_ring,
858         .get_head               = bsd_ring_get_head,
859         .get_tail               = bsd_ring_get_tail,
860         .get_active_head        = bsd_ring_get_active_head,
861         .advance_ring           = bsd_ring_advance_ring,
862         .flush                  = bsd_ring_flush,
863         .add_request            = bsd_ring_add_request,
864         .get_gem_seqno          = bsd_ring_get_gem_seqno,
865         .user_irq_get           = bsd_ring_get_user_irq,
866         .user_irq_put           = bsd_ring_put_user_irq,
867         .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
868         .status_page            = {NULL, 0, NULL},
869         .map                    = {0,}
870 };