drm/i915: range-restricted eviction support
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 16 Sep 2010 13:45:15 +0000 (15:45 +0200)
committerChris Wilson <chris@chris-wilson.co.uk>
Wed, 27 Oct 2010 22:31:04 +0000 (23:31 +0100)
Add a mappable parameter to i915_gem_evict_something to distinguish
the two cases (non-restricted vs. mappable gtt allocations). No
functional changes because the mappable limit is set to the end of
the gtt currently.

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c

index f168e82..dc0a21a 100644 (file)
@@ -535,6 +535,8 @@ typedef struct drm_i915_private {
                struct drm_mm vram;
                /** Memory allocator for GTT */
                struct drm_mm gtt_space;
+               /** End of mappable part of GTT */
+               unsigned long gtt_mappable_end;
 
                struct io_mapping *gtt_mapping;
                int gtt_mtrr;
@@ -1067,7 +1069,8 @@ void i915_gem_shrinker_init(void);
 void i915_gem_shrinker_exit(void);
 
 /* i915_gem_evict.c */
-int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment);
+int i915_gem_evict_something(struct drm_device *dev, int min_size,
+                            unsigned alignment, bool mappable);
 int i915_gem_evict_everything(struct drm_device *dev);
 int i915_gem_evict_inactive(struct drm_device *dev);
 
index d0aaf97..254eb0c 100644 (file)
@@ -187,6 +187,7 @@ int i915_gem_do_init(struct drm_device *dev,
                    end - start);
 
        dev_priv->mm.gtt_total = end - start;
+       dev_priv->mm.gtt_mappable_end = end;
 
        return 0;
 }
@@ -413,7 +414,8 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
                struct drm_device *dev = obj->dev;
 
                ret = i915_gem_evict_something(dev, obj->size,
-                                              i915_gem_get_gtt_alignment(obj));
+                                              i915_gem_get_gtt_alignment(obj),
+                                              false);
                if (ret)
                        return ret;
 
@@ -2672,7 +2674,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
                /* If the gtt is empty and we're still having trouble
                 * fitting our object in, we're out of memory.
                 */
-               ret = i915_gem_evict_something(dev, obj->size, alignment);
+               ret = i915_gem_evict_something(dev, obj->size, alignment, true);
                if (ret)
                        return ret;
 
@@ -2687,7 +2689,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
                if (ret == -ENOMEM) {
                        /* first try to clear up some space from the GTT */
                        ret = i915_gem_evict_something(dev, obj->size,
-                                                      alignment);
+                                                      alignment, true);
                        if (ret) {
                                /* now try to shrink everyone else */
                                if (gfpmask) {
@@ -2717,7 +2719,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
                drm_mm_put_block(obj_priv->gtt_space);
                obj_priv->gtt_space = NULL;
 
-               ret = i915_gem_evict_something(dev, obj->size, alignment);
+               ret = i915_gem_evict_something(dev, obj->size, alignment, true);
                if (ret)
                        return ret;
 
index 43a4013..3a4215f 100644 (file)
@@ -41,7 +41,8 @@ mark_free(struct drm_i915_gem_object *obj_priv,
 }
 
 int
-i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
+i915_gem_evict_something(struct drm_device *dev, int min_size,
+                        unsigned alignment, bool mappable)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct list_head eviction_list, unwind_list;
@@ -51,9 +52,17 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
        i915_gem_retire_requests(dev);
 
        /* Re-check for free space after retiring requests */
-       if (drm_mm_search_free(&dev_priv->mm.gtt_space,
-                              min_size, alignment, 0))
-               return 0;
+       if (mappable) {
+               if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
+                                               min_size, alignment, 0,
+                                               dev_priv->mm.gtt_mappable_end,
+                                               0))
+                       return 0;
+       } else {
+               if (drm_mm_search_free(&dev_priv->mm.gtt_space,
+                                      min_size, alignment, 0))
+                       return 0;
+       }
 
        /*
         * The goal is to evict objects and amalgamate space in LRU order.
@@ -79,7 +88,12 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
         */
 
        INIT_LIST_HEAD(&unwind_list);
-       drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
+       if (mappable)
+               drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
+                                           alignment, 0,
+                                           dev_priv->mm.gtt_mappable_end);
+       else
+               drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
 
        /* First see if there is a large enough contiguous idle region... */
        list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {