1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
4 struct intel_hw_status_page {
5 u32 __iomem *page_addr;
7 struct drm_i915_gem_object *obj;
10 #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
12 #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL(ring->mmio_base))
13 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
15 #define I915_READ_START(ring) I915_RING_READ(RING_START(ring->mmio_base))
16 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
18 #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD(ring->mmio_base))
19 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
21 #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base))
22 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
24 struct drm_i915_gem_execbuffer2;
25 struct intel_ring_buffer {
34 struct drm_device *dev;
35 struct drm_i915_gem_object *obj;
41 struct intel_hw_status_page status_page;
43 u32 irq_seqno; /* last seq seem at irq time */
45 int user_irq_refcount;
46 void (*user_irq_get)(struct intel_ring_buffer *ring);
47 void (*user_irq_put)(struct intel_ring_buffer *ring);
49 int (*init)(struct intel_ring_buffer *ring);
51 void (*write_tail)(struct intel_ring_buffer *ring,
53 void (*flush)(struct intel_ring_buffer *ring,
54 u32 invalidate_domains,
56 int (*add_request)(struct intel_ring_buffer *ring,
58 u32 (*get_seqno)(struct intel_ring_buffer *ring);
59 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
60 u32 offset, u32 length);
61 void (*cleanup)(struct intel_ring_buffer *ring);
64 * List of objects currently involved in rendering from the
67 * Includes buffers having the contents of their GPU caches
68 * flushed, not necessarily primitives. last_rendering_seqno
69 * represents when the rendering involved will be completed.
71 * A reference is held on the buffer while on this list.
73 struct list_head active_list;
76 * List of breadcrumbs associated with GPU requests currently
79 struct list_head request_list;
82 * List of objects currently pending a GPU write flush.
84 * All elements on this list will belong to either the
85 * active_list or flushing_list, last_rendering_seqno can
86 * be used to differentiate between the two elements.
88 struct list_head gpu_write_list;
91 * Do we have some not yet emitted requests outstanding?
93 u32 outstanding_lazy_request;
95 wait_queue_head_t irq_queue;
102 intel_read_status_page(struct intel_ring_buffer *ring,
105 return ioread32(ring->status_page.page_addr + reg);
108 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
109 int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
110 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
112 static inline void intel_ring_emit(struct intel_ring_buffer *ring,
115 iowrite32(data, ring->virtual_start + ring->tail);
119 void intel_ring_advance(struct intel_ring_buffer *ring);
121 u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
123 int intel_init_render_ring_buffer(struct drm_device *dev);
124 int intel_init_bsd_ring_buffer(struct drm_device *dev);
125 int intel_init_blt_ring_buffer(struct drm_device *dev);
127 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
128 void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
130 #endif /* _INTEL_RINGBUFFER_H_ */