Merge branch 'upstream-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[pandora-kernel.git] / drivers / gpu / drm / nouveau / nouveau_gem.c
index 0f417ac..19620a6 100644 (file)
@@ -167,11 +167,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
                goto out;
 
        ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
+       /* drop reference from allocate - handle holds it now */
+       drm_gem_object_unreference_unlocked(nvbo->gem);
 out:
-       drm_gem_object_handle_unreference_unlocked(nvbo->gem);
-
-       if (ret)
-               drm_gem_object_unreference_unlocked(nvbo->gem);
        return ret;
 }
 
@@ -245,7 +243,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
                list_del(&nvbo->entry);
                nvbo->reserved_by = NULL;
                ttm_bo_unreserve(&nvbo->bo);
-               drm_gem_object_unreference(nvbo->gem);
+               drm_gem_object_unreference_unlocked(nvbo->gem);
        }
 }
 
@@ -300,7 +298,7 @@ retry:
                        validate_fini(op, NULL);
                        if (ret == -EAGAIN)
                                ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
-                       drm_gem_object_unreference(gem);
+                       drm_gem_object_unreference_unlocked(gem);
                        if (ret) {
                                NV_ERROR(dev, "fail reserve\n");
                                return ret;
@@ -337,7 +335,9 @@ retry:
                                return -EINVAL;
                        }
 
+                       mutex_unlock(&drm_global_mutex);
                        ret = ttm_bo_wait_cpu(&nvbo->bo, false);
+                       mutex_lock(&drm_global_mutex);
                        if (ret) {
                                NV_ERROR(dev, "fail wait_cpu\n");
                                return ret;
@@ -361,16 +361,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
 
        list_for_each_entry(nvbo, list, entry) {
                struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
-               struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
 
-               if (prev_fence && nouveau_fence_channel(prev_fence) != chan) {
-                       spin_lock(&nvbo->bo.lock);
-                       ret = ttm_bo_wait(&nvbo->bo, false, false, false);
-                       spin_unlock(&nvbo->bo.lock);
-                       if (unlikely(ret)) {
-                               NV_ERROR(dev, "fail wait other chan\n");
-                               return ret;
-                       }
+               ret = nouveau_bo_sync_gpu(nvbo, chan);
+               if (unlikely(ret)) {
+                       NV_ERROR(dev, "fail pre-validate sync\n");
+                       return ret;
                }
 
                ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
@@ -381,7 +376,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
                        return ret;
                }
 
-               nvbo->channel = chan;
+               nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
                ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
                                      false, false, false);
                nvbo->channel = NULL;
@@ -390,6 +385,12 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
                        return ret;
                }
 
+               ret = nouveau_bo_sync_gpu(nvbo, chan);
+               if (unlikely(ret)) {
+                       NV_ERROR(dev, "fail post-validate sync\n");
+                       return ret;
+               }
+
                if (nvbo->bo.offset == b->presumed.offset &&
                    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
                      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -613,7 +614,20 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
                return PTR_ERR(bo);
        }
 
-       mutex_lock(&dev->struct_mutex);
+       /* Mark push buffers as being used on PFIFO, the validation code
+        * will then make sure that if the pushbuf bo moves, that they
+        * happen on the kernel channel, which will in turn cause a sync
+        * to happen before we try and submit the push buffer.
+        */
+       for (i = 0; i < req->nr_push; i++) {
+               if (push[i].bo_index >= req->nr_buffers) {
+                       NV_ERROR(dev, "push %d buffer not in list\n", i);
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               bo[push[i].bo_index].read_domains |= (1 << 31);
+       }
 
        /* Validate buffer list */
        ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
@@ -647,7 +661,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
                                      push[i].length);
                }
        } else
-       if (dev_priv->card_type >= NV_20) {
+       if (dev_priv->chipset >= 0x25) {
                ret = RING_SPACE(chan, req->nr_push * 2);
                if (ret) {
                        NV_ERROR(dev, "cal_space: %d\n", ret);
@@ -713,7 +727,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 out:
        validate_fini(&op, fence);
        nouveau_fence_unref((void**)&fence);
-       mutex_unlock(&dev->struct_mutex);
        kfree(bo);
        kfree(push);
 
@@ -722,7 +735,7 @@ out_next:
                req->suffix0 = 0x00000000;
                req->suffix1 = 0x00000000;
        } else
-       if (dev_priv->card_type >= NV_20) {
+       if (dev_priv->chipset >= 0x25) {
                req->suffix0 = 0x00020000;
                req->suffix1 = 0x00000000;
        } else {