Merge branch 'topic/pcm-drain-nonblock' into for-linus
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
index fd2b8bd..80e5ba4 100644 (file)
@@ -978,6 +978,7 @@ int
 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *file_priv)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_set_domain *args = data;
        struct drm_gem_object *obj;
        uint32_t read_domains = args->read_domains;
@@ -1006,12 +1007,22 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 
        mutex_lock(&dev->struct_mutex);
 #if WATCH_BUF
-       DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
+       DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
                 obj, obj->size, read_domains, write_domain);
 #endif
        if (read_domains & I915_GEM_DOMAIN_GTT) {
+               struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
                ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
 
+               /* Update the LRU on the fence for the CPU access that's
+                * about to occur.
+                */
+               if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+                       list_move_tail(&obj_priv->fence_list,
+                                      &dev_priv->mm.fence_list);
+               }
+
                /* Silently promote "you're not bound, there was nothing to do"
                 * to success, since the client was just asking us to
                 * make sure everything was done.
@@ -1050,7 +1061,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
        }
 
 #if WATCH_BUF
-       DRM_INFO("%s: sw_finish %d (%p %d)\n",
+       DRM_INFO("%s: sw_finish %d (%p %zd)\n",
                 __func__, args->handle, obj, obj->size);
 #endif
        obj_priv = obj->driver_private;
@@ -1155,8 +1166,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        }
 
        /* Need a new fence register? */
-       if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
-           obj_priv->tiling_mode != I915_TILING_NONE) {
+       if (obj_priv->tiling_mode != I915_TILING_NONE) {
                ret = i915_gem_object_get_fence_reg(obj);
                if (ret) {
                        mutex_unlock(&dev->struct_mutex);
@@ -1252,6 +1262,31 @@ out_free_list:
        return ret;
 }
 
+/**
+ * i915_gem_release_mmap - remove physical page mappings
+ * @obj: obj in question
+ *
+ * Preserve the reservation of the mmaping with the DRM core code, but
+ * relinquish ownership of the pages back to the system.
+ *
+ * It is vital that we remove the page mapping if we have mapped a tiled
+ * object through the GTT and then lose the fence register due to
+ * resource pressure. Similarly if the object has been moved out of the
+ * aperture, than pages mapped into userspace must be revoked. Removing the
+ * mapping will then trigger a page fault on the next user access, allowing
+ * fixup by i915_gem_fault().
+ */
+void
+i915_gem_release_mmap(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+       if (dev->dev_mapping)
+               unmap_mapping_range(dev->dev_mapping,
+                                   obj_priv->mmap_offset, obj->size, 1);
+}
+
 static void
 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
 {
@@ -1545,7 +1580,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
        }
 
        if (was_empty && !dev_priv->mm.suspended)
-               schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
+               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
        return seqno;
 }
 
@@ -1694,7 +1729,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
        i915_gem_retire_requests(dev);
        if (!dev_priv->mm.suspended &&
            !list_empty(&dev_priv->mm.request_list))
-               schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
+               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
        mutex_unlock(&dev->struct_mutex);
 }
 
@@ -1861,7 +1896,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
-       loff_t offset;
        int ret = 0;
 
 #if WATCH_BUF
@@ -1898,9 +1932,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
        BUG_ON(obj_priv->active);
 
        /* blow away mappings if mapped through GTT */
-       offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT;
-       if (dev->dev_mapping)
-               unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1);
+       i915_gem_release_mmap(obj);
 
        if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
                i915_gem_clear_fence_reg(obj);
@@ -2186,6 +2218,12 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
        struct drm_i915_gem_object *old_obj_priv = NULL;
        int i, ret, avail;
 
+       /* Just update our place in the LRU if our fence is getting used. */
+       if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+               list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+               return 0;
+       }
+
        switch (obj_priv->tiling_mode) {
        case I915_TILING_NONE:
                WARN(1, "allocating a fence for non-tiled object?\n");
@@ -2207,7 +2245,6 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
        }
 
        /* First try to find a free reg */
-try_again:
        avail = 0;
        for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
                reg = &dev_priv->fence_regs[i];
@@ -2221,67 +2258,62 @@ try_again:
 
        /* None available, try to steal one or wait for a user to finish */
        if (i == dev_priv->num_fence_regs) {
-               uint32_t seqno = dev_priv->mm.next_gem_seqno;
-               loff_t offset;
+               struct drm_gem_object *old_obj = NULL;
 
                if (avail == 0)
                        return -ENOSPC;
 
-               for (i = dev_priv->fence_reg_start;
-                    i < dev_priv->num_fence_regs; i++) {
-                       uint32_t this_seqno;
-
-                       reg = &dev_priv->fence_regs[i];
-                       old_obj_priv = reg->obj->driver_private;
+               list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
+                                   fence_list) {
+                       old_obj = old_obj_priv->obj;
 
                        if (old_obj_priv->pin_count)
                                continue;
 
+                       /* Take a reference, as otherwise the wait_rendering
+                        * below may cause the object to get freed out from
+                        * under us.
+                        */
+                       drm_gem_object_reference(old_obj);
+
                        /* i915 uses fences for GPU access to tiled buffers */
                        if (IS_I965G(dev) || !old_obj_priv->active)
                                break;
 
-                       /* find the seqno of the first available fence */
-                       this_seqno = old_obj_priv->last_rendering_seqno;
-                       if (this_seqno != 0 &&
-                           reg->obj->write_domain == 0 &&
-                           i915_seqno_passed(seqno, this_seqno))
-                               seqno = this_seqno;
-               }
-
-               /*
-                * Now things get ugly... we have to wait for one of the
-                * objects to finish before trying again.
-                */
-               if (i == dev_priv->num_fence_regs) {
-                       if (seqno == dev_priv->mm.next_gem_seqno) {
-                               i915_gem_flush(dev,
-                                              I915_GEM_GPU_DOMAINS,
-                                              I915_GEM_GPU_DOMAINS);
-                               seqno = i915_add_request(dev, NULL,
-                                                        I915_GEM_GPU_DOMAINS);
-                               if (seqno == 0)
-                                       return -ENOMEM;
+                       /* This brings the object to the head of the LRU if it
+                        * had been written to.  The only way this should
+                        * result in us waiting longer than the expected
+                        * optimal amount of time is if there was a
+                        * fence-using buffer later that was read-only.
+                        */
+                       i915_gem_object_flush_gpu_write_domain(old_obj);
+                       ret = i915_gem_object_wait_rendering(old_obj);
+                       if (ret != 0) {
+                               drm_gem_object_unreference(old_obj);
+                               return ret;
                        }
 
-                       ret = i915_wait_request(dev, seqno);
-                       if (ret)
-                               return ret;
-                       goto try_again;
+                       break;
                }
 
                /*
                 * Zap this virtual mapping so we can set up a fence again
                 * for this object next time we need it.
                 */
-               offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT;
-               if (dev->dev_mapping)
-                       unmap_mapping_range(dev->dev_mapping, offset,
-                                           reg->obj->size, 1);
+               i915_gem_release_mmap(old_obj);
+
+               i = old_obj_priv->fence_reg;
+               reg = &dev_priv->fence_regs[i];
+
                old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
+               list_del_init(&old_obj_priv->fence_list);
+
+               drm_gem_object_unreference(old_obj);
        }
 
        obj_priv->fence_reg = i;
+       list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+
        reg->obj = obj;
 
        if (IS_I965G(dev))
@@ -2324,6 +2356,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
 
        dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
        obj_priv->fence_reg = I915_FENCE_REG_NONE;
+       list_del_init(&obj_priv->fence_list);
 }
 
 /**
@@ -2423,7 +2456,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
        }
 
 #if WATCH_BUF
-       DRM_INFO("Binding object of size %d at 0x%08x\n",
+       DRM_INFO("Binding object of size %zd at 0x%08x\n",
                 obj->size, obj_priv->gtt_offset);
 #endif
        ret = i915_gem_object_get_pages(obj);
@@ -3577,9 +3610,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
         * Pre-965 chips need a fence register set up in order to
         * properly handle tiled surfaces.
         */
-       if (!IS_I965G(dev) &&
-           obj_priv->fence_reg == I915_FENCE_REG_NONE &&
-           obj_priv->tiling_mode != I915_TILING_NONE) {
+       if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
                ret = i915_gem_object_get_fence_reg(obj);
                if (ret != 0) {
                        if (ret != -EBUSY && ret != -ERESTARTSYS)
@@ -3788,6 +3819,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
        obj_priv->obj = obj;
        obj_priv->fence_reg = I915_FENCE_REG_NONE;
        INIT_LIST_HEAD(&obj_priv->list);
+       INIT_LIST_HEAD(&obj_priv->fence_list);
 
        return 0;
 }
@@ -4200,15 +4232,11 @@ int
 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
 {
-       int ret;
-
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return 0;
 
-       ret = i915_gem_idle(dev);
        drm_irq_uninstall(dev);
-
-       return ret;
+       return i915_gem_idle(dev);
 }
 
 void
@@ -4227,6 +4255,7 @@ i915_gem_lastclose(struct drm_device *dev)
 void
 i915_gem_load(struct drm_device *dev)
 {
+       int i;
        drm_i915_private_t *dev_priv = dev->dev_private;
 
        spin_lock_init(&dev_priv->mm.active_list_lock);
@@ -4234,6 +4263,7 @@ i915_gem_load(struct drm_device *dev)
        INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
        INIT_LIST_HEAD(&dev_priv->mm.request_list);
+       INIT_LIST_HEAD(&dev_priv->mm.fence_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
                          i915_gem_retire_work_handler);
        dev_priv->mm.next_gem_seqno = 1;
@@ -4246,6 +4276,18 @@ i915_gem_load(struct drm_device *dev)
        else
                dev_priv->num_fence_regs = 8;
 
+       /* Initialize fence registers to zero */
+       if (IS_I965G(dev)) {
+               for (i = 0; i < 16; i++)
+                       I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
+       } else {
+               for (i = 0; i < 8; i++)
+                       I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
+               if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+                       for (i = 0; i < 8; i++)
+                               I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
+       }
+
        i915_gem_detect_bit_6_swizzle(dev);
 }