Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem_evict.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uuk>
26  *
27  */
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "i915_drv.h"
32 #include "i915_drm.h"
33
34 static struct drm_i915_gem_object *
35 i915_gem_next_active_object(struct drm_device *dev,
36                             struct list_head **render_iter,
37                             struct list_head **bsd_iter)
38 {
39         drm_i915_private_t *dev_priv = dev->dev_private;
40         struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
41
42         if (*render_iter != &dev_priv->render_ring.active_list)
43                 render_obj = list_entry(*render_iter,
44                                         struct drm_i915_gem_object,
45                                         list);
46
47         if (HAS_BSD(dev)) {
48                 if (*bsd_iter != &dev_priv->bsd_ring.active_list)
49                         bsd_obj = list_entry(*bsd_iter,
50                                              struct drm_i915_gem_object,
51                                              list);
52
53                 if (render_obj == NULL) {
54                         *bsd_iter = (*bsd_iter)->next;
55                         return bsd_obj;
56                 }
57
58                 if (bsd_obj == NULL) {
59                         *render_iter = (*render_iter)->next;
60                         return render_obj;
61                 }
62
63                 /* XXX can we handle seqno wrapping? */
64                 if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
65                         *render_iter = (*render_iter)->next;
66                         return render_obj;
67                 } else {
68                         *bsd_iter = (*bsd_iter)->next;
69                         return bsd_obj;
70                 }
71         } else {
72                 *render_iter = (*render_iter)->next;
73                 return render_obj;
74         }
75 }
76
77 static bool
78 mark_free(struct drm_i915_gem_object *obj_priv,
79            struct list_head *unwind)
80 {
81         list_add(&obj_priv->evict_list, unwind);
82         return drm_mm_scan_add_block(obj_priv->gtt_space);
83 }
84
85 #define i915_for_each_active_object(OBJ, R, B) \
86         *(R) = dev_priv->render_ring.active_list.next; \
87         *(B) = dev_priv->bsd_ring.active_list.next; \
88         while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
89
90 int
91 i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
92 {
93         drm_i915_private_t *dev_priv = dev->dev_private;
94         struct list_head eviction_list, unwind_list;
95         struct drm_i915_gem_object *obj_priv, *tmp_obj_priv;
96         struct list_head *render_iter, *bsd_iter;
97         int ret = 0;
98
99         i915_gem_retire_requests(dev);
100
101         /* Re-check for free space after retiring requests */
102         if (drm_mm_search_free(&dev_priv->mm.gtt_space,
103                                min_size, alignment, 0))
104                 return 0;
105
106         /*
107          * The goal is to evict objects and amalgamate space in LRU order.
108          * The oldest idle objects reside on the inactive list, which is in
109          * retirement order. The next objects to retire are those on the (per
110          * ring) active list that do not have an outstanding flush. Once the
111          * hardware reports completion (the seqno is updated after the
112          * batchbuffer has been finished) the clean buffer objects would
113          * be retired to the inactive list. Any dirty objects would be added
114          * to the tail of the flushing list. So after processing the clean
115          * active objects we need to emit a MI_FLUSH to retire the flushing
116          * list, hence the retirement order of the flushing list is in
117          * advance of the dirty objects on the active lists.
118          *
119          * The retirement sequence is thus:
120          *   1. Inactive objects (already retired)
121          *   2. Clean active objects
122          *   3. Flushing list
123          *   4. Dirty active objects.
124          *
125          * On each list, the oldest objects lie at the HEAD with the freshest
126          * object on the TAIL.
127          */
128
129         INIT_LIST_HEAD(&unwind_list);
130         drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
131
132         /* First see if there is a large enough contiguous idle region... */
133         list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
134                 if (mark_free(obj_priv, &unwind_list))
135                         goto found;
136         }
137
138         /* Now merge in the soon-to-be-expired objects... */
139         i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
140                 /* Does the object require an outstanding flush? */
141                 if (obj_priv->base.write_domain || obj_priv->pin_count)
142                         continue;
143
144                 if (mark_free(obj_priv, &unwind_list))
145                         goto found;
146         }
147
148         /* Finally add anything with a pending flush (in order of retirement) */
149         list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
150                 if (obj_priv->pin_count)
151                         continue;
152
153                 if (mark_free(obj_priv, &unwind_list))
154                         goto found;
155         }
156         i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
157                 if (! obj_priv->base.write_domain || obj_priv->pin_count)
158                         continue;
159
160                 if (mark_free(obj_priv, &unwind_list))
161                         goto found;
162         }
163
164         /* Nothing found, clean up and bail out! */
165         list_for_each_entry(obj_priv, &unwind_list, evict_list) {
166                 ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
167                 BUG_ON(ret);
168         }
169
170         /* We expect the caller to unpin, evict all and try again, or give up.
171          * So calling i915_gem_evict_everything() is unnecessary.
172          */
173         return -ENOSPC;
174
175 found:
176         INIT_LIST_HEAD(&eviction_list);
177         list_for_each_entry_safe(obj_priv, tmp_obj_priv,
178                                  &unwind_list, evict_list) {
179                 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
180                         /* drm_mm doesn't allow any other other operations while
181                          * scanning, therefore store to be evicted objects on a
182                          * temporary list. */
183                         list_move(&obj_priv->evict_list, &eviction_list);
184                 }
185         }
186
187         /* Unbinding will emit any required flushes */
188         list_for_each_entry_safe(obj_priv, tmp_obj_priv,
189                                  &eviction_list, evict_list) {
190 #if WATCH_LRU
191                 DRM_INFO("%s: evicting %p\n", __func__, obj);
192 #endif
193                 ret = i915_gem_object_unbind(&obj_priv->base);
194                 if (ret)
195                         return ret;
196         }
197
198         /* The just created free hole should be on the top of the free stack
199          * maintained by drm_mm, so this BUG_ON actually executes in O(1).
200          * Furthermore all accessed data has just recently been used, so it
201          * should be really fast, too. */
202         BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size,
203                                    alignment, 0));
204
205         return 0;
206 }
207
208 int
209 i915_gem_evict_everything(struct drm_device *dev)
210 {
211         drm_i915_private_t *dev_priv = dev->dev_private;
212         int ret;
213         bool lists_empty;
214
215         spin_lock(&dev_priv->mm.active_list_lock);
216         lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
217                        list_empty(&dev_priv->mm.flushing_list) &&
218                        list_empty(&dev_priv->render_ring.active_list) &&
219                        (!HAS_BSD(dev)
220                         || list_empty(&dev_priv->bsd_ring.active_list)));
221         spin_unlock(&dev_priv->mm.active_list_lock);
222
223         if (lists_empty)
224                 return -ENOSPC;
225
226         /* Flush everything (on to the inactive lists) and evict */
227         ret = i915_gpu_idle(dev);
228         if (ret)
229                 return ret;
230
231         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
232
233         ret = i915_gem_evict_inactive(dev);
234         if (ret)
235                 return ret;
236
237         spin_lock(&dev_priv->mm.active_list_lock);
238         lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
239                        list_empty(&dev_priv->mm.flushing_list) &&
240                        list_empty(&dev_priv->render_ring.active_list) &&
241                        (!HAS_BSD(dev)
242                         || list_empty(&dev_priv->bsd_ring.active_list)));
243         spin_unlock(&dev_priv->mm.active_list_lock);
244         BUG_ON(!lists_empty);
245
246         return 0;
247 }
248
249 /** Unbinds all inactive objects. */
250 int
251 i915_gem_evict_inactive(struct drm_device *dev)
252 {
253         drm_i915_private_t *dev_priv = dev->dev_private;
254
255         while (!list_empty(&dev_priv->mm.inactive_list)) {
256                 struct drm_gem_object *obj;
257                 int ret;
258
259                 obj = &list_first_entry(&dev_priv->mm.inactive_list,
260                                         struct drm_i915_gem_object,
261                                         list)->base;
262
263                 ret = i915_gem_object_unbind(obj);
264                 if (ret != 0) {
265                         DRM_ERROR("Error unbinding object: %d\n", ret);
266                         return ret;
267                 }
268         }
269
270         return 0;
271 }