drm/i915: Move the eviction logic to its own file.
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem_evict.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uuk>
26  *
27  */
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "i915_drv.h"
32 #include "i915_drm.h"
33
34 static inline int
35 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
36 {
37         return obj_priv->madv == I915_MADV_DONTNEED;
38 }
39
40 static int
41 i915_gem_scan_inactive_list_and_evict(struct drm_device *dev, int min_size,
42                                       unsigned alignment, int *found)
43 {
44         drm_i915_private_t *dev_priv = dev->dev_private;
45         struct drm_gem_object *obj;
46         struct drm_i915_gem_object *obj_priv;
47         struct drm_gem_object *best = NULL;
48         struct drm_gem_object *first = NULL;
49
50         /* Try to find the smallest clean object */
51         list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
52                 struct drm_gem_object *obj = &obj_priv->base;
53                 if (obj->size >= min_size) {
54                         if ((!obj_priv->dirty ||
55                              i915_gem_object_is_purgeable(obj_priv)) &&
56                             (!best || obj->size < best->size)) {
57                                 best = obj;
58                                 if (best->size == min_size)
59                                         break;
60                         }
61                         if (!first)
62                             first = obj;
63                 }
64         }
65
66         obj = best ? best : first;
67
68         if (!obj) {
69                 *found = 0;
70                 return 0;
71         }
72
73         *found = 1;
74
75 #if WATCH_LRU
76         DRM_INFO("%s: evicting %p\n", __func__, obj);
77 #endif
78         obj_priv = to_intel_bo(obj);
79         BUG_ON(obj_priv->pin_count != 0);
80         BUG_ON(obj_priv->active);
81
82         /* Wait on the rendering and unbind the buffer. */
83         return i915_gem_object_unbind(obj);
84 }
85
86 static void
87 i915_gem_flush_ring(struct drm_device *dev,
88                uint32_t invalidate_domains,
89                uint32_t flush_domains,
90                struct intel_ring_buffer *ring)
91 {
92         if (flush_domains & I915_GEM_DOMAIN_CPU)
93                 drm_agp_chipset_flush(dev);
94         ring->flush(dev, ring,
95                         invalidate_domains,
96                         flush_domains);
97 }
98
99 int
100 i915_gem_evict_something(struct drm_device *dev,
101                          int min_size, unsigned alignment)
102 {
103         drm_i915_private_t *dev_priv = dev->dev_private;
104         int ret, found;
105
106         struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
107         struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
108         for (;;) {
109                 i915_gem_retire_requests(dev);
110
111                 /* If there's an inactive buffer available now, grab it
112                  * and be done.
113                  */
114                 ret = i915_gem_scan_inactive_list_and_evict(dev, min_size,
115                                                             alignment,
116                                                             &found);
117                 if (found)
118                         return ret;
119
120                 /* If we didn't get anything, but the ring is still processing
121                  * things, wait for the next to finish and hopefully leave us
122                  * a buffer to evict.
123                  */
124                 if (!list_empty(&render_ring->request_list)) {
125                         struct drm_i915_gem_request *request;
126
127                         request = list_first_entry(&render_ring->request_list,
128                                                    struct drm_i915_gem_request,
129                                                    list);
130
131                         ret = i915_do_wait_request(dev, request->seqno, true, request->ring);
132                         if (ret)
133                                 return ret;
134
135                         continue;
136                 }
137
138                 if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
139                         struct drm_i915_gem_request *request;
140
141                         request = list_first_entry(&bsd_ring->request_list,
142                                                    struct drm_i915_gem_request,
143                                                    list);
144
145                         ret = i915_do_wait_request(dev, request->seqno, true, request->ring);
146                         if (ret)
147                                 return ret;
148
149                         continue;
150                 }
151
152                 /* If we didn't have anything on the request list but there
153                  * are buffers awaiting a flush, emit one and try again.
154                  * When we wait on it, those buffers waiting for that flush
155                  * will get moved to inactive.
156                  */
157                 if (!list_empty(&dev_priv->mm.flushing_list)) {
158                         struct drm_gem_object *obj = NULL;
159                         struct drm_i915_gem_object *obj_priv;
160
161                         /* Find an object that we can immediately reuse */
162                         list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
163                                 obj = &obj_priv->base;
164                                 if (obj->size >= min_size)
165                                         break;
166
167                                 obj = NULL;
168                         }
169
170                         if (obj != NULL) {
171                                 uint32_t seqno;
172
173                                 i915_gem_flush_ring(dev,
174                                                obj->write_domain,
175                                                obj->write_domain,
176                                                obj_priv->ring);
177                                 seqno = i915_add_request(dev, NULL,
178                                                 obj->write_domain,
179                                                 obj_priv->ring);
180                                 if (seqno == 0)
181                                         return -ENOMEM;
182                                 continue;
183                         }
184                 }
185
186                 /* If we didn't do any of the above, there's no single buffer
187                  * large enough to swap out for the new one, so just evict
188                  * everything and start again. (This should be rare.)
189                  */
190                 if (!list_empty(&dev_priv->mm.inactive_list))
191                         return i915_gem_evict_inactive(dev);
192                 else
193                         return i915_gem_evict_everything(dev);
194         }
195 }
196
197 int
198 i915_gem_evict_everything(struct drm_device *dev)
199 {
200         drm_i915_private_t *dev_priv = dev->dev_private;
201         int ret;
202         bool lists_empty;
203
204         spin_lock(&dev_priv->mm.active_list_lock);
205         lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
206                        list_empty(&dev_priv->mm.flushing_list) &&
207                        list_empty(&dev_priv->render_ring.active_list) &&
208                        (!HAS_BSD(dev)
209                         || list_empty(&dev_priv->bsd_ring.active_list)));
210         spin_unlock(&dev_priv->mm.active_list_lock);
211
212         if (lists_empty)
213                 return -ENOSPC;
214
215         /* Flush everything (on to the inactive lists) and evict */
216         ret = i915_gpu_idle(dev);
217         if (ret)
218                 return ret;
219
220         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
221
222         ret = i915_gem_evict_inactive(dev);
223         if (ret)
224                 return ret;
225
226         spin_lock(&dev_priv->mm.active_list_lock);
227         lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
228                        list_empty(&dev_priv->mm.flushing_list) &&
229                        list_empty(&dev_priv->render_ring.active_list) &&
230                        (!HAS_BSD(dev)
231                         || list_empty(&dev_priv->bsd_ring.active_list)));
232         spin_unlock(&dev_priv->mm.active_list_lock);
233         BUG_ON(!lists_empty);
234
235         return 0;
236 }
237
238 /** Unbinds all inactive objects. */
239 int
240 i915_gem_evict_inactive(struct drm_device *dev)
241 {
242         drm_i915_private_t *dev_priv = dev->dev_private;
243
244         while (!list_empty(&dev_priv->mm.inactive_list)) {
245                 struct drm_gem_object *obj;
246                 int ret;
247
248                 obj = &list_first_entry(&dev_priv->mm.inactive_list,
249                                         struct drm_i915_gem_object,
250                                         list)->base;
251
252                 ret = i915_gem_object_unbind(obj);
253                 if (ret != 0) {
254                         DRM_ERROR("Error unbinding object: %d\n", ret);
255                         return ret;
256                 }
257         }
258
259         return 0;
260 }