drm/i915: Use drm_i915_gem_object as the preferred type
[pandora-kernel.git] / drivers / gpu / drm / i915 / intel_ringbuffer.h
1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
3
4 struct  intel_hw_status_page {
5         u32     __iomem *page_addr;
6         unsigned int    gfx_addr;
7         struct          drm_i915_gem_object *obj;
8 };
9
10 #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
11
12 #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL(ring->mmio_base))
13 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
14
15 #define I915_READ_START(ring) I915_RING_READ(RING_START(ring->mmio_base))
16 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
17
18 #define I915_READ_HEAD(ring)  I915_RING_READ(RING_HEAD(ring->mmio_base))
19 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
20
21 #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base))
22 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
23
24 struct drm_i915_gem_execbuffer2;
25 struct  intel_ring_buffer {
26         const char      *name;
27         enum intel_ring_id {
28                 RING_RENDER = 0x1,
29                 RING_BSD = 0x2,
30                 RING_BLT = 0x4,
31         } id;
32         u32             mmio_base;
33         void            *virtual_start;
34         struct          drm_device *dev;
35         struct          drm_i915_gem_object *obj;
36
37         unsigned int    head;
38         unsigned int    tail;
39         int             space;
40         int             size;
41         struct intel_hw_status_page status_page;
42
43         u32             irq_seqno;              /* last seq seem at irq time */
44         u32             waiting_seqno;
45         int             user_irq_refcount;
46         void            (*user_irq_get)(struct intel_ring_buffer *ring);
47         void            (*user_irq_put)(struct intel_ring_buffer *ring);
48
49         int             (*init)(struct intel_ring_buffer *ring);
50
51         void            (*write_tail)(struct intel_ring_buffer *ring,
52                                       u32 value);
53         void            (*flush)(struct intel_ring_buffer *ring,
54                                  u32    invalidate_domains,
55                                  u32    flush_domains);
56         int             (*add_request)(struct intel_ring_buffer *ring,
57                                        u32 *seqno);
58         u32             (*get_seqno)(struct intel_ring_buffer *ring);
59         int             (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
60                                                struct drm_i915_gem_execbuffer2 *exec,
61                                                struct drm_clip_rect *cliprects,
62                                                uint64_t exec_offset);
63         void            (*cleanup)(struct intel_ring_buffer *ring);
64
65         /**
66          * List of objects currently involved in rendering from the
67          * ringbuffer.
68          *
69          * Includes buffers having the contents of their GPU caches
70          * flushed, not necessarily primitives.  last_rendering_seqno
71          * represents when the rendering involved will be completed.
72          *
73          * A reference is held on the buffer while on this list.
74          */
75         struct list_head active_list;
76
77         /**
78          * List of breadcrumbs associated with GPU requests currently
79          * outstanding.
80          */
81         struct list_head request_list;
82
83         /**
84          * List of objects currently pending a GPU write flush.
85          *
86          * All elements on this list will belong to either the
87          * active_list or flushing_list, last_rendering_seqno can
88          * be used to differentiate between the two elements.
89          */
90         struct list_head gpu_write_list;
91
92         /**
93          * Do we have some not yet emitted requests outstanding?
94          */
95         u32 outstanding_lazy_request;
96
97         wait_queue_head_t irq_queue;
98         drm_local_map_t map;
99
100         void *private;
101 };
102
103 static inline u32
104 intel_read_status_page(struct intel_ring_buffer *ring,
105                        int reg)
106 {
107         return ioread32(ring->status_page.page_addr + reg);
108 }
109
110 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
111 int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
112 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
113
114 static inline void intel_ring_emit(struct intel_ring_buffer *ring,
115                                    u32 data)
116 {
117         iowrite32(data, ring->virtual_start + ring->tail);
118         ring->tail += 4;
119 }
120
121 void intel_ring_advance(struct intel_ring_buffer *ring);
122
123 u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
124
125 int intel_init_render_ring_buffer(struct drm_device *dev);
126 int intel_init_bsd_ring_buffer(struct drm_device *dev);
127 int intel_init_blt_ring_buffer(struct drm_device *dev);
128
129 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
130 void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
131
132 #endif /* _INTEL_RINGBUFFER_H_ */