drm/i915/ringbuffer: Handle cliprects in the caller
[pandora-kernel.git] / drivers / gpu / drm / i915 / intel_ringbuffer.h
1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
3
4 struct  intel_hw_status_page {
5         u32     __iomem *page_addr;
6         unsigned int    gfx_addr;
7         struct          drm_i915_gem_object *obj;
8 };
9
10 #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
11
12 #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL(ring->mmio_base))
13 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
14
15 #define I915_READ_START(ring) I915_RING_READ(RING_START(ring->mmio_base))
16 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
17
18 #define I915_READ_HEAD(ring)  I915_RING_READ(RING_HEAD(ring->mmio_base))
19 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
20
21 #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base))
22 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
23
24 struct drm_i915_gem_execbuffer2;
25 struct  intel_ring_buffer {
26         const char      *name;
27         enum intel_ring_id {
28                 RING_RENDER = 0x1,
29                 RING_BSD = 0x2,
30                 RING_BLT = 0x4,
31         } id;
32         u32             mmio_base;
33         void            *virtual_start;
34         struct          drm_device *dev;
35         struct          drm_i915_gem_object *obj;
36
37         unsigned int    head;
38         unsigned int    tail;
39         int             space;
40         int             size;
41         struct intel_hw_status_page status_page;
42
43         u32             irq_seqno;              /* last seq seem at irq time */
44         u32             waiting_seqno;
45         int             user_irq_refcount;
46         void            (*user_irq_get)(struct intel_ring_buffer *ring);
47         void            (*user_irq_put)(struct intel_ring_buffer *ring);
48
49         int             (*init)(struct intel_ring_buffer *ring);
50
51         void            (*write_tail)(struct intel_ring_buffer *ring,
52                                       u32 value);
53         void            (*flush)(struct intel_ring_buffer *ring,
54                                  u32    invalidate_domains,
55                                  u32    flush_domains);
56         int             (*add_request)(struct intel_ring_buffer *ring,
57                                        u32 *seqno);
58         u32             (*get_seqno)(struct intel_ring_buffer *ring);
59         int             (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
60                                                u32 offset, u32 length);
61         void            (*cleanup)(struct intel_ring_buffer *ring);
62
63         /**
64          * List of objects currently involved in rendering from the
65          * ringbuffer.
66          *
67          * Includes buffers having the contents of their GPU caches
68          * flushed, not necessarily primitives.  last_rendering_seqno
69          * represents when the rendering involved will be completed.
70          *
71          * A reference is held on the buffer while on this list.
72          */
73         struct list_head active_list;
74
75         /**
76          * List of breadcrumbs associated with GPU requests currently
77          * outstanding.
78          */
79         struct list_head request_list;
80
81         /**
82          * List of objects currently pending a GPU write flush.
83          *
84          * All elements on this list will belong to either the
85          * active_list or flushing_list, last_rendering_seqno can
86          * be used to differentiate between the two elements.
87          */
88         struct list_head gpu_write_list;
89
90         /**
91          * Do we have some not yet emitted requests outstanding?
92          */
93         u32 outstanding_lazy_request;
94
95         wait_queue_head_t irq_queue;
96         drm_local_map_t map;
97
98         void *private;
99 };
100
101 static inline u32
102 intel_read_status_page(struct intel_ring_buffer *ring,
103                        int reg)
104 {
105         return ioread32(ring->status_page.page_addr + reg);
106 }
107
108 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
109 int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
110 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
111
112 static inline void intel_ring_emit(struct intel_ring_buffer *ring,
113                                    u32 data)
114 {
115         iowrite32(data, ring->virtual_start + ring->tail);
116         ring->tail += 4;
117 }
118
119 void intel_ring_advance(struct intel_ring_buffer *ring);
120
121 u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
122
123 int intel_init_render_ring_buffer(struct drm_device *dev);
124 int intel_init_bsd_ring_buffer(struct drm_device *dev);
125 int intel_init_blt_ring_buffer(struct drm_device *dev);
126
127 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
128 void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
129
130 #endif /* _INTEL_RINGBUFFER_H_ */