2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
35 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
37 return obj_priv->madv == I915_MADV_DONTNEED;
41 i915_gem_scan_inactive_list_and_evict(struct drm_device *dev, int min_size,
42 unsigned alignment, int *found)
44 drm_i915_private_t *dev_priv = dev->dev_private;
45 struct drm_gem_object *obj;
46 struct drm_i915_gem_object *obj_priv;
47 struct drm_gem_object *best = NULL;
48 struct drm_gem_object *first = NULL;
50 /* Try to find the smallest clean object */
51 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
52 struct drm_gem_object *obj = &obj_priv->base;
53 if (obj->size >= min_size) {
54 if ((!obj_priv->dirty ||
55 i915_gem_object_is_purgeable(obj_priv)) &&
56 (!best || obj->size < best->size)) {
58 if (best->size == min_size)
66 obj = best ? best : first;
76 DRM_INFO("%s: evicting %p\n", __func__, obj);
78 obj_priv = to_intel_bo(obj);
79 BUG_ON(obj_priv->pin_count != 0);
80 BUG_ON(obj_priv->active);
82 /* Wait on the rendering and unbind the buffer. */
83 return i915_gem_object_unbind(obj);
87 i915_gem_flush_ring(struct drm_device *dev,
88 uint32_t invalidate_domains,
89 uint32_t flush_domains,
90 struct intel_ring_buffer *ring)
92 if (flush_domains & I915_GEM_DOMAIN_CPU)
93 drm_agp_chipset_flush(dev);
94 ring->flush(dev, ring,
100 i915_gem_evict_something(struct drm_device *dev,
101 int min_size, unsigned alignment)
103 drm_i915_private_t *dev_priv = dev->dev_private;
106 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
107 struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
109 i915_gem_retire_requests(dev);
111 /* If there's an inactive buffer available now, grab it
114 ret = i915_gem_scan_inactive_list_and_evict(dev, min_size,
120 /* If we didn't get anything, but the ring is still processing
121 * things, wait for the next to finish and hopefully leave us
124 if (!list_empty(&render_ring->request_list)) {
125 struct drm_i915_gem_request *request;
127 request = list_first_entry(&render_ring->request_list,
128 struct drm_i915_gem_request,
131 ret = i915_do_wait_request(dev, request->seqno, true, request->ring);
138 if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
139 struct drm_i915_gem_request *request;
141 request = list_first_entry(&bsd_ring->request_list,
142 struct drm_i915_gem_request,
145 ret = i915_do_wait_request(dev, request->seqno, true, request->ring);
152 /* If we didn't have anything on the request list but there
153 * are buffers awaiting a flush, emit one and try again.
154 * When we wait on it, those buffers waiting for that flush
155 * will get moved to inactive.
157 if (!list_empty(&dev_priv->mm.flushing_list)) {
158 struct drm_gem_object *obj = NULL;
159 struct drm_i915_gem_object *obj_priv;
161 /* Find an object that we can immediately reuse */
162 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
163 obj = &obj_priv->base;
164 if (obj->size >= min_size)
173 i915_gem_flush_ring(dev,
177 seqno = i915_add_request(dev, NULL,
186 /* If we didn't do any of the above, there's no single buffer
187 * large enough to swap out for the new one, so just evict
188 * everything and start again. (This should be rare.)
190 if (!list_empty(&dev_priv->mm.inactive_list))
191 return i915_gem_evict_inactive(dev);
193 return i915_gem_evict_everything(dev);
198 i915_gem_evict_everything(struct drm_device *dev)
200 drm_i915_private_t *dev_priv = dev->dev_private;
204 spin_lock(&dev_priv->mm.active_list_lock);
205 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
206 list_empty(&dev_priv->mm.flushing_list) &&
207 list_empty(&dev_priv->render_ring.active_list) &&
209 || list_empty(&dev_priv->bsd_ring.active_list)));
210 spin_unlock(&dev_priv->mm.active_list_lock);
215 /* Flush everything (on to the inactive lists) and evict */
216 ret = i915_gpu_idle(dev);
220 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
222 ret = i915_gem_evict_inactive(dev);
226 spin_lock(&dev_priv->mm.active_list_lock);
227 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
228 list_empty(&dev_priv->mm.flushing_list) &&
229 list_empty(&dev_priv->render_ring.active_list) &&
231 || list_empty(&dev_priv->bsd_ring.active_list)));
232 spin_unlock(&dev_priv->mm.active_list_lock);
233 BUG_ON(!lists_empty);
238 /** Unbinds all inactive objects. */
240 i915_gem_evict_inactive(struct drm_device *dev)
242 drm_i915_private_t *dev_priv = dev->dev_private;
244 while (!list_empty(&dev_priv->mm.inactive_list)) {
245 struct drm_gem_object *obj;
248 obj = &list_first_entry(&dev_priv->mm.inactive_list,
249 struct drm_i915_gem_object,
252 ret = i915_gem_object_unbind(obj);
254 DRM_ERROR("Error unbinding object: %d\n", ret);