2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
35 mark_free(struct drm_i915_gem_object *obj_priv,
36 struct list_head *unwind)
38 list_add(&obj_priv->evict_list, unwind);
39 drm_gem_object_reference(&obj_priv->base);
40 return drm_mm_scan_add_block(obj_priv->gtt_space);
44 i915_gem_evict_something(struct drm_device *dev, int min_size,
45 unsigned alignment, bool mappable)
47 drm_i915_private_t *dev_priv = dev->dev_private;
48 struct list_head eviction_list, unwind_list;
49 struct drm_i915_gem_object *obj_priv;
52 i915_gem_retire_requests(dev);
54 /* Re-check for free space after retiring requests */
56 if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
57 min_size, alignment, 0,
58 dev_priv->mm.gtt_mappable_end,
62 if (drm_mm_search_free(&dev_priv->mm.gtt_space,
63 min_size, alignment, 0))
68 * The goal is to evict objects and amalgamate space in LRU order.
69 * The oldest idle objects reside on the inactive list, which is in
70 * retirement order. The next objects to retire are those on the (per
71 * ring) active list that do not have an outstanding flush. Once the
72 * hardware reports completion (the seqno is updated after the
73 * batchbuffer has been finished) the clean buffer objects would
74 * be retired to the inactive list. Any dirty objects would be added
75 * to the tail of the flushing list. So after processing the clean
76 * active objects we need to emit a MI_FLUSH to retire the flushing
77 * list, hence the retirement order of the flushing list is in
78 * advance of the dirty objects on the active lists.
80 * The retirement sequence is thus:
81 * 1. Inactive objects (already retired)
82 * 2. Clean active objects
84 * 4. Dirty active objects.
86 * On each list, the oldest objects lie at the HEAD with the freshest
90 INIT_LIST_HEAD(&unwind_list);
92 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
94 dev_priv->mm.gtt_mappable_end);
96 drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
98 /* First see if there is a large enough contiguous idle region... */
99 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
100 if (mark_free(obj_priv, &unwind_list))
104 /* Now merge in the soon-to-be-expired objects... */
105 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
106 /* Does the object require an outstanding flush? */
107 if (obj_priv->base.write_domain || obj_priv->pin_count)
110 if (mark_free(obj_priv, &unwind_list))
114 /* Finally add anything with a pending flush (in order of retirement) */
115 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
116 if (obj_priv->pin_count)
119 if (mark_free(obj_priv, &unwind_list))
122 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
123 if (! obj_priv->base.write_domain || obj_priv->pin_count)
126 if (mark_free(obj_priv, &unwind_list))
130 /* Nothing found, clean up and bail out! */
131 list_for_each_entry(obj_priv, &unwind_list, evict_list) {
132 ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
134 drm_gem_object_unreference(&obj_priv->base);
137 /* We expect the caller to unpin, evict all and try again, or give up.
138 * So calling i915_gem_evict_everything() is unnecessary.
143 /* drm_mm doesn't allow any other other operations while
144 * scanning, therefore store to be evicted objects on a
146 INIT_LIST_HEAD(&eviction_list);
147 while (!list_empty(&unwind_list)) {
148 obj_priv = list_first_entry(&unwind_list,
149 struct drm_i915_gem_object,
151 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
152 list_move(&obj_priv->evict_list, &eviction_list);
155 list_del(&obj_priv->evict_list);
156 drm_gem_object_unreference(&obj_priv->base);
159 /* Unbinding will emit any required flushes */
160 while (!list_empty(&eviction_list)) {
161 obj_priv = list_first_entry(&eviction_list,
162 struct drm_i915_gem_object,
165 ret = i915_gem_object_unbind(&obj_priv->base);
166 list_del(&obj_priv->evict_list);
167 drm_gem_object_unreference(&obj_priv->base);
174 i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
176 drm_i915_private_t *dev_priv = dev->dev_private;
180 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
181 list_empty(&dev_priv->mm.flushing_list) &&
182 list_empty(&dev_priv->mm.active_list));
186 /* Flush everything (on to the inactive lists) and evict */
187 ret = i915_gpu_idle(dev);
191 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
193 return i915_gem_evict_inactive(dev, purgeable_only);
196 /** Unbinds all inactive objects. */
198 i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
200 drm_i915_private_t *dev_priv = dev->dev_private;
201 struct drm_i915_gem_object *obj, *next;
203 list_for_each_entry_safe(obj, next,
204 &dev_priv->mm.inactive_list, mm_list) {
205 if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
206 int ret = i915_gem_object_unbind(&obj->base);