2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
35 mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
37 list_add(&obj->exec_list, unwind);
38 drm_gem_object_reference(&obj->base);
39 return drm_mm_scan_add_block(obj->gtt_space);
43 i915_gem_evict_something(struct drm_device *dev, int min_size,
44 unsigned alignment, bool mappable)
46 drm_i915_private_t *dev_priv = dev->dev_private;
47 struct list_head eviction_list, unwind_list;
48 struct drm_i915_gem_object *obj;
51 i915_gem_retire_requests(dev);
53 /* Re-check for free space after retiring requests */
55 if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
56 min_size, alignment, 0,
57 dev_priv->mm.gtt_mappable_end,
61 if (drm_mm_search_free(&dev_priv->mm.gtt_space,
62 min_size, alignment, 0))
67 * The goal is to evict objects and amalgamate space in LRU order.
68 * The oldest idle objects reside on the inactive list, which is in
69 * retirement order. The next objects to retire are those on the (per
70 * ring) active list that do not have an outstanding flush. Once the
71 * hardware reports completion (the seqno is updated after the
72 * batchbuffer has been finished) the clean buffer objects would
73 * be retired to the inactive list. Any dirty objects would be added
74 * to the tail of the flushing list. So after processing the clean
75 * active objects we need to emit a MI_FLUSH to retire the flushing
76 * list, hence the retirement order of the flushing list is in
77 * advance of the dirty objects on the active lists.
79 * The retirement sequence is thus:
80 * 1. Inactive objects (already retired)
81 * 2. Clean active objects
83 * 4. Dirty active objects.
85 * On each list, the oldest objects lie at the HEAD with the freshest
89 INIT_LIST_HEAD(&unwind_list);
91 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
93 dev_priv->mm.gtt_mappable_end);
95 drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
97 /* First see if there is a large enough contiguous idle region... */
98 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
99 if (mark_free(obj, &unwind_list))
103 /* Now merge in the soon-to-be-expired objects... */
104 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
105 /* Does the object require an outstanding flush? */
106 if (obj->base.write_domain || obj->pin_count)
109 if (mark_free(obj, &unwind_list))
113 /* Finally add anything with a pending flush (in order of retirement) */
114 list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
118 if (mark_free(obj, &unwind_list))
121 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
122 if (! obj->base.write_domain || obj->pin_count)
125 if (mark_free(obj, &unwind_list))
129 /* Nothing found, clean up and bail out! */
130 while (!list_empty(&unwind_list)) {
131 obj = list_first_entry(&unwind_list,
132 struct drm_i915_gem_object,
135 ret = drm_mm_scan_remove_block(obj->gtt_space);
138 list_del_init(&obj->exec_list);
139 drm_gem_object_unreference(&obj->base);
142 /* We expect the caller to unpin, evict all and try again, or give up.
143 * So calling i915_gem_evict_everything() is unnecessary.
148 /* drm_mm doesn't allow any other other operations while
149 * scanning, therefore store to be evicted objects on a
151 INIT_LIST_HEAD(&eviction_list);
152 while (!list_empty(&unwind_list)) {
153 obj = list_first_entry(&unwind_list,
154 struct drm_i915_gem_object,
156 if (drm_mm_scan_remove_block(obj->gtt_space)) {
157 list_move(&obj->exec_list, &eviction_list);
160 list_del_init(&obj->exec_list);
161 drm_gem_object_unreference(&obj->base);
164 /* Unbinding will emit any required flushes */
165 while (!list_empty(&eviction_list)) {
166 obj = list_first_entry(&eviction_list,
167 struct drm_i915_gem_object,
170 ret = i915_gem_object_unbind(obj);
172 list_del_init(&obj->exec_list);
173 drm_gem_object_unreference(&obj->base);
180 i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
182 drm_i915_private_t *dev_priv = dev->dev_private;
186 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
187 list_empty(&dev_priv->mm.flushing_list) &&
188 list_empty(&dev_priv->mm.active_list));
192 /* Flush everything (on to the inactive lists) and evict */
193 ret = i915_gpu_idle(dev);
197 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
199 return i915_gem_evict_inactive(dev, purgeable_only);
202 /** Unbinds all inactive objects. */
204 i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
206 drm_i915_private_t *dev_priv = dev->dev_private;
207 struct drm_i915_gem_object *obj, *next;
209 list_for_each_entry_safe(obj, next,
210 &dev_priv->mm.inactive_list, mm_list) {
211 if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
212 int ret = i915_gem_object_unbind(obj);