Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem_evict.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uuk>
26  *
27  */
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "i915_drv.h"
32 #include "i915_drm.h"
33
34 static struct drm_i915_gem_object *
35 i915_gem_next_active_object(struct drm_device *dev,
36                             struct list_head **render_iter,
37                             struct list_head **bsd_iter)
38 {
39         drm_i915_private_t *dev_priv = dev->dev_private;
40         struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
41
42         if (*render_iter != &dev_priv->render_ring.active_list)
43                 render_obj = list_entry(*render_iter,
44                                         struct drm_i915_gem_object,
45                                         list);
46
47         if (HAS_BSD(dev)) {
48                 if (*bsd_iter != &dev_priv->bsd_ring.active_list)
49                         bsd_obj = list_entry(*bsd_iter,
50                                              struct drm_i915_gem_object,
51                                              list);
52
53                 if (render_obj == NULL) {
54                         *bsd_iter = (*bsd_iter)->next;
55                         return bsd_obj;
56                 }
57
58                 if (bsd_obj == NULL) {
59                         *render_iter = (*render_iter)->next;
60                         return render_obj;
61                 }
62
63                 /* XXX can we handle seqno wrapping? */
64                 if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
65                         *render_iter = (*render_iter)->next;
66                         return render_obj;
67                 } else {
68                         *bsd_iter = (*bsd_iter)->next;
69                         return bsd_obj;
70                 }
71         } else {
72                 *render_iter = (*render_iter)->next;
73                 return render_obj;
74         }
75 }
76
77 static bool
78 mark_free(struct drm_i915_gem_object *obj_priv,
79            struct list_head *unwind)
80 {
81         list_add(&obj_priv->evict_list, unwind);
82         drm_gem_object_reference(&obj_priv->base);
83         return drm_mm_scan_add_block(obj_priv->gtt_space);
84 }
85
86 #define i915_for_each_active_object(OBJ, R, B) \
87         *(R) = dev_priv->render_ring.active_list.next; \
88         *(B) = dev_priv->bsd_ring.active_list.next; \
89         while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
90
91 int
92 i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
93 {
94         drm_i915_private_t *dev_priv = dev->dev_private;
95         struct list_head eviction_list, unwind_list;
96         struct drm_i915_gem_object *obj_priv;
97         struct list_head *render_iter, *bsd_iter;
98         int ret = 0;
99
100         i915_gem_retire_requests(dev);
101
102         /* Re-check for free space after retiring requests */
103         if (drm_mm_search_free(&dev_priv->mm.gtt_space,
104                                min_size, alignment, 0))
105                 return 0;
106
107         /*
108          * The goal is to evict objects and amalgamate space in LRU order.
109          * The oldest idle objects reside on the inactive list, which is in
110          * retirement order. The next objects to retire are those on the (per
111          * ring) active list that do not have an outstanding flush. Once the
112          * hardware reports completion (the seqno is updated after the
113          * batchbuffer has been finished) the clean buffer objects would
114          * be retired to the inactive list. Any dirty objects would be added
115          * to the tail of the flushing list. So after processing the clean
116          * active objects we need to emit a MI_FLUSH to retire the flushing
117          * list, hence the retirement order of the flushing list is in
118          * advance of the dirty objects on the active lists.
119          *
120          * The retirement sequence is thus:
121          *   1. Inactive objects (already retired)
122          *   2. Clean active objects
123          *   3. Flushing list
124          *   4. Dirty active objects.
125          *
126          * On each list, the oldest objects lie at the HEAD with the freshest
127          * object on the TAIL.
128          */
129
130         INIT_LIST_HEAD(&unwind_list);
131         drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
132
133         /* First see if there is a large enough contiguous idle region... */
134         list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
135                 if (mark_free(obj_priv, &unwind_list))
136                         goto found;
137         }
138
139         /* Now merge in the soon-to-be-expired objects... */
140         i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
141                 /* Does the object require an outstanding flush? */
142                 if (obj_priv->base.write_domain || obj_priv->pin_count)
143                         continue;
144
145                 if (mark_free(obj_priv, &unwind_list))
146                         goto found;
147         }
148
149         /* Finally add anything with a pending flush (in order of retirement) */
150         list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
151                 if (obj_priv->pin_count)
152                         continue;
153
154                 if (mark_free(obj_priv, &unwind_list))
155                         goto found;
156         }
157         i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
158                 if (! obj_priv->base.write_domain || obj_priv->pin_count)
159                         continue;
160
161                 if (mark_free(obj_priv, &unwind_list))
162                         goto found;
163         }
164
165         /* Nothing found, clean up and bail out! */
166         list_for_each_entry(obj_priv, &unwind_list, evict_list) {
167                 ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
168                 BUG_ON(ret);
169                 drm_gem_object_unreference(&obj_priv->base);
170         }
171
172         /* We expect the caller to unpin, evict all and try again, or give up.
173          * So calling i915_gem_evict_everything() is unnecessary.
174          */
175         return -ENOSPC;
176
177 found:
178         /* drm_mm doesn't allow any other other operations while
179          * scanning, therefore store to be evicted objects on a
180          * temporary list. */
181         INIT_LIST_HEAD(&eviction_list);
182         while (!list_empty(&unwind_list)) {
183                 obj_priv = list_first_entry(&unwind_list,
184                                             struct drm_i915_gem_object,
185                                             evict_list);
186                 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
187                         list_move(&obj_priv->evict_list, &eviction_list);
188                         continue;
189                 }
190                 list_del(&obj_priv->evict_list);
191                 drm_gem_object_unreference(&obj_priv->base);
192         }
193
194         /* Unbinding will emit any required flushes */
195         while (!list_empty(&eviction_list)) {
196                 obj_priv = list_first_entry(&eviction_list,
197                                             struct drm_i915_gem_object,
198                                             evict_list);
199                 if (ret == 0)
200                         ret = i915_gem_object_unbind(&obj_priv->base);
201                 list_del(&obj_priv->evict_list);
202                 drm_gem_object_unreference(&obj_priv->base);
203         }
204
205         return ret;
206 }
207
208 int
209 i915_gem_evict_everything(struct drm_device *dev)
210 {
211         drm_i915_private_t *dev_priv = dev->dev_private;
212         int ret;
213         bool lists_empty;
214
215         spin_lock(&dev_priv->mm.active_list_lock);
216         lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
217                        list_empty(&dev_priv->mm.flushing_list) &&
218                        list_empty(&dev_priv->render_ring.active_list) &&
219                        (!HAS_BSD(dev)
220                         || list_empty(&dev_priv->bsd_ring.active_list)));
221         spin_unlock(&dev_priv->mm.active_list_lock);
222
223         if (lists_empty)
224                 return -ENOSPC;
225
226         /* Flush everything (on to the inactive lists) and evict */
227         ret = i915_gpu_idle(dev);
228         if (ret)
229                 return ret;
230
231         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
232
233         ret = i915_gem_evict_inactive(dev);
234         if (ret)
235                 return ret;
236
237         spin_lock(&dev_priv->mm.active_list_lock);
238         lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
239                        list_empty(&dev_priv->mm.flushing_list) &&
240                        list_empty(&dev_priv->render_ring.active_list) &&
241                        (!HAS_BSD(dev)
242                         || list_empty(&dev_priv->bsd_ring.active_list)));
243         spin_unlock(&dev_priv->mm.active_list_lock);
244         BUG_ON(!lists_empty);
245
246         return 0;
247 }
248
249 /** Unbinds all inactive objects. */
250 int
251 i915_gem_evict_inactive(struct drm_device *dev)
252 {
253         drm_i915_private_t *dev_priv = dev->dev_private;
254
255         while (!list_empty(&dev_priv->mm.inactive_list)) {
256                 struct drm_gem_object *obj;
257                 int ret;
258
259                 obj = &list_first_entry(&dev_priv->mm.inactive_list,
260                                         struct drm_i915_gem_object,
261                                         list)->base;
262
263                 ret = i915_gem_object_unbind(obj);
264                 if (ret != 0) {
265                         DRM_ERROR("Error unbinding object: %d\n", ret);
266                         return ret;
267                 }
268         }
269
270         return 0;
271 }