static LIST_HEAD(shrink_list);
static DEFINE_SPINLOCK(shrink_list_lock);
+int
+i915_gem_check_is_wedged(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct completion *x = &dev_priv->error_completion;
+ unsigned long flags;
+ int ret;
+
+ if (!atomic_read(&dev_priv->mm.wedged))
+ return 0;
+
+ ret = wait_for_completion_interruptible(x);
+ if (ret)
+ return ret;
+
+ /* Success, we reset the GPU! */
+ if (!atomic_read(&dev_priv->mm.wedged))
+ return 0;
+
+ /* GPU is hung, bump the completion count to account for
+ * the token we just consumed so that we never hit zero and
+ * end up waiting upon a subsequent completion event that
+ * will never happen.
+ */
+ spin_lock_irqsave(&x->wait.lock, flags);
+ x->done++;
+ spin_unlock_irqrestore(&x->wait.lock, flags);
+ return -EIO;
+}
+
+static int i915_mutex_lock_interruptible(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_gem_check_is_wedged(dev);
+ if (ret)
+ return ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ if (atomic_read(&dev_priv->mm.wedged)) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
{
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
ret = i915_gem_object_get_pages(obj, 0);
if (ret != 0)
do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto fail_put_user_pages;
ret = i915_gem_object_get_pages_or_evict(obj);
if (ret)
if (!access_ok(VERIFY_READ, user_data, remain))
return -EFAULT;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
- mutex_lock(&dev->struct_mutex);
ret = i915_gem_object_pin(obj, 0);
if (ret) {
mutex_unlock(&dev->struct_mutex);
goto out_unpin_pages;
}
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto out_unpin_pages;
+
ret = i915_gem_object_pin(obj, 0);
if (ret)
goto out_unlock;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
ret = i915_gem_object_get_pages(obj, 0);
if (ret != 0)
do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto fail_put_user_pages;
ret = i915_gem_object_get_pages_or_evict(obj);
if (ret)
return -ENOENT;
obj_priv = to_intel_bo(obj);
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
intel_mark_busy(dev, obj);
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
-
/* Maintain LRU order of "inactive" objects */
if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
- mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL) {
- mutex_unlock(&dev->struct_mutex);
+ if (obj == NULL)
return -ENOENT;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
}
#if WATCH_BUF
obj->size / PAGE_SIZE, 0, 0);
if (!list->file_offset_node) {
DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
- ret = -ENOMEM;
+ ret = -ENOSPC;
goto out_free_list;
}
}
list->hash.key = list->file_offset_node->start;
- if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
+ ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+ if (ret) {
DRM_ERROR("failed to add to map hash\n");
- ret = -ENOMEM;
goto out_free_mm;
}
if (obj == NULL)
return -ENOENT;
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
obj_priv = to_intel_bo(obj);
obj_priv->pages = NULL;
}
-static uint32_t
-i915_gem_next_request_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- ring->outstanding_lazy_request = true;
-
- return dev_priv->next_seqno;
-}
-
static void
i915_gem_object_move_to_active(struct drm_gem_object *obj,
struct intel_ring_buffer *ring)
{
- struct drm_device *dev = obj->dev;
+ struct drm_i915_private *dev_priv = obj->dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
BUG_ON(ring == NULL);
obj_priv->ring = ring;
/* Move from whatever list we were on to the tail of execution. */
list_move_tail(&obj_priv->list, &ring->active_list);
- obj_priv->last_rendering_seqno = seqno;
+ obj_priv->last_rendering_seqno = dev_priv->next_seqno;
}
static void
i915_verify_inactive(dev, __FILE__, __LINE__);
if (obj_priv->pin_count != 0)
- list_del_init(&obj_priv->list);
+ list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list);
else
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
uint32_t
i915_add_request(struct drm_device *dev,
- struct drm_file *file_priv,
+ struct drm_file *file,
struct drm_i915_gem_request *request,
struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_file_private *i915_file_priv = NULL;
+ struct drm_i915_file_private *file_priv = NULL;
uint32_t seqno;
int was_empty;
- if (file_priv != NULL)
- i915_file_priv = file_priv->driver_priv;
+ if (file != NULL)
+ file_priv = file->driver_priv;
if (request == NULL) {
request = kzalloc(sizeof(*request), GFP_KERNEL);
return 0;
}
- seqno = ring->add_request(dev, ring, file_priv, 0);
+ seqno = ring->add_request(dev, ring, 0);
request->seqno = seqno;
request->ring = ring;
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
- if (i915_file_priv) {
+ if (file_priv) {
+ spin_lock(&file_priv->mm.lock);
+ request->file_priv = file_priv;
list_add_tail(&request->client_list,
- &i915_file_priv->mm.request_list);
- } else {
- INIT_LIST_HEAD(&request->client_list);
+ &file_priv->mm.request_list);
+ spin_unlock(&file_priv->mm.lock);
}
if (!dev_priv->mm.suspended) {
I915_GEM_DOMAIN_COMMAND, flush_domains);
}
-/**
- * Returns true if seq1 is later than seq2.
- */
-bool
-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+static inline void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
{
- return (int32_t)(seq1 - seq2) >= 0;
-}
+ struct drm_i915_file_private *file_priv = request->file_priv;
-uint32_t
-i915_get_gem_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- return ring->get_gem_seqno(dev, ring);
+ if (!file_priv)
+ return;
+
+ spin_lock(&file_priv->mm.lock);
+ list_del(&request->client_list);
+ request->file_priv = NULL;
+ spin_unlock(&file_priv->mm.lock);
}
-void i915_gem_reset_flushing_list(struct drm_device *dev)
+static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ while (!list_empty(&ring->request_list)) {
+ struct drm_i915_gem_request *request;
- while (!list_empty(&dev_priv->mm.flushing_list)) {
+ request = list_first_entry(&ring->request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ list_del(&request->list);
+ i915_gem_request_remove_from_client(request);
+ kfree(request);
+ }
+
+ while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj_priv;
- obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
+ obj_priv = list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
list);
obj_priv->base.write_domain = 0;
+ list_del_init(&obj_priv->gpu_write_list);
i915_gem_object_move_to_inactive(&obj_priv->base);
}
}
-void i915_gem_reset_inactive_gpu_domains(struct drm_device *dev)
+void i915_gem_reset_lists(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
+ i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
+ if (HAS_BSD(dev))
+ i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
+
+ /* Remove anything from the flushing lists. The GPU cache is likely
+ * to be lost on reset along with the data, so simply move the
+ * lost bo to the inactive list.
+ */
+ while (!list_empty(&dev_priv->mm.flushing_list)) {
+ obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
+ struct drm_i915_gem_object,
+ list);
+
+ obj_priv->base.write_domain = 0;
+ list_del_init(&obj_priv->gpu_write_list);
+ i915_gem_object_move_to_inactive(&obj_priv->base);
+ }
+
+ /* Move everything out of the GPU domains to ensure we do any
+ * necessary invalidation upon reuse.
+ */
list_for_each_entry(obj_priv,
&dev_priv->mm.inactive_list,
list)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno;
- bool wedged;
if (!ring->status_page.page_addr ||
list_empty(&ring->request_list))
return;
- seqno = i915_get_gem_seqno(dev, ring);
- wedged = atomic_read(&dev_priv->mm.wedged);
-
+ seqno = ring->get_seqno(dev, ring);
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
struct drm_i915_gem_request,
list);
- if (!wedged && !i915_seqno_passed(seqno, request->seqno))
+ if (!i915_seqno_passed(seqno, request->seqno))
break;
trace_i915_gem_request_retire(dev, request->seqno);
list_del(&request->list);
- list_del(&request->client_list);
+ i915_gem_request_remove_from_client(request);
kfree(request);
}
struct drm_i915_gem_object,
list);
- if (!wedged &&
- !i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
+ if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
break;
obj = &obj_priv->base;
BUG_ON(seqno == 0);
+ if (atomic_read(&dev_priv->mm.wedged))
+ return -EAGAIN;
+
if (seqno == dev_priv->next_seqno) {
seqno = i915_add_request(dev, NULL, NULL, ring);
if (seqno == 0)
return -ENOMEM;
}
- if (atomic_read(&dev_priv->mm.wedged))
- return -EIO;
-
- if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
+ if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
if (HAS_PCH_SPLIT(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
if (interruptible)
ret = wait_event_interruptible(ring->irq_queue,
i915_seqno_passed(
- ring->get_gem_seqno(dev, ring), seqno)
+ ring->get_seqno(dev, ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
else
wait_event(ring->irq_queue,
i915_seqno_passed(
- ring->get_gem_seqno(dev, ring), seqno)
+ ring->get_seqno(dev, ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
ring->user_irq_put(dev, ring);
trace_i915_gem_request_wait_end(dev, seqno);
}
if (atomic_read(&dev_priv->mm.wedged))
- ret = -EIO;
+ ret = -EAGAIN;
if (ret && ret != -ERESTARTSYS)
DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
- __func__, ret, seqno, ring->get_gem_seqno(dev, ring),
+ __func__, ret, seqno, ring->get_seqno(dev, ring),
dev_priv->next_seqno);
/* Directly dispatch request retiring. While we have the work queue
static void
i915_gem_flush_ring(struct drm_device *dev,
+ struct drm_file *file_priv,
struct intel_ring_buffer *ring,
uint32_t invalidate_domains,
uint32_t flush_domains)
static void
i915_gem_flush(struct drm_device *dev,
+ struct drm_file *file_priv,
uint32_t invalidate_domains,
uint32_t flush_domains,
uint32_t flush_rings)
if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
if (flush_rings & RING_RENDER)
- i915_gem_flush_ring(dev,
+ i915_gem_flush_ring(dev, file_priv,
&dev_priv->render_ring,
invalidate_domains, flush_domains);
if (flush_rings & RING_BSD)
- i915_gem_flush_ring(dev,
+ i915_gem_flush_ring(dev, file_priv,
&dev_priv->bsd_ring,
invalidate_domains, flush_domains);
}
obj_priv->gtt_space = NULL;
}
- /* Remove ourselves from the LRU list if present. */
- if (!list_empty(&obj_priv->list))
- list_del_init(&obj_priv->list);
+ list_del_init(&obj_priv->list);
if (i915_gem_object_is_purgeable(obj_priv))
i915_gem_object_truncate(obj);
{
drm_i915_private_t *dev_priv = dev->dev_private;
bool lists_empty;
+ u32 seqno;
int ret;
lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
return 0;
/* Flush everything onto the inactive list. */
- i915_gem_flush_ring(dev,
- &dev_priv->render_ring,
+ seqno = dev_priv->next_seqno;
+ i915_gem_flush_ring(dev, NULL, &dev_priv->render_ring,
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-
- ret = i915_wait_request(dev,
- i915_gem_next_request_seqno(dev, &dev_priv->render_ring),
- &dev_priv->render_ring);
+ ret = i915_wait_request(dev, seqno, &dev_priv->render_ring);
if (ret)
return ret;
if (HAS_BSD(dev)) {
- i915_gem_flush_ring(dev,
- &dev_priv->bsd_ring,
+ seqno = dev_priv->next_seqno;
+ i915_gem_flush_ring(dev, NULL, &dev_priv->bsd_ring,
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-
- ret = i915_wait_request(dev,
- i915_gem_next_request_seqno(dev, &dev_priv->bsd_ring),
- &dev_priv->bsd_ring);
+ ret = i915_wait_request(dev, seqno, &dev_priv->bsd_ring);
if (ret)
return ret;
}
bool interruptible)
{
struct drm_device *dev = obj->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_i915_fence_reg *reg;
if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
return 0;
* therefore we must wait for any outstanding access to complete
* before clearing the fence.
*/
- if (INTEL_INFO(dev)->gen < 4) {
+ reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+ if (reg->gpu) {
int ret;
ret = i915_gem_object_flush_gpu_write_domain(obj, true);
ret = i915_gem_object_wait_rendering(obj, interruptible);
if (ret)
return ret;
+
+ reg->gpu = false;
}
i915_gem_object_flush_gtt_write_domain(obj);
/* Queue the GPU write cache flushing we need. */
old_write_domain = obj->write_domain;
- i915_gem_flush_ring(dev,
+ i915_gem_flush_ring(dev, NULL,
to_intel_bo(obj)->ring,
0, obj->write_domain);
BUG_ON(obj->write_domain);
i915_gem_object_flush_cpu_write_domain(obj);
old_read_domains = obj->read_domains;
- obj->read_domains = I915_GEM_DOMAIN_GTT;
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
* need to be invalidated at next use.
*/
if (write) {
- obj->read_domains &= I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
}
* properly handle blits to/from tiled surfaces.
*/
if (need_fence) {
- ret = i915_gem_object_get_fence_reg(obj, false);
+ ret = i915_gem_object_get_fence_reg(obj, true);
if (ret != 0) {
i915_gem_object_unpin(obj);
return ret;
}
+
+ dev_priv->fence_regs[obj_priv->fence_reg].gpu = true;
}
entry->offset = obj_priv->gtt_offset;
* relatively low latency when blocking on a particular request to finish.
*/
static int
-i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
{
- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
- int ret = 0;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
+ struct drm_i915_gem_request *request;
+ struct intel_ring_buffer *ring = NULL;
+ u32 seqno = 0;
+ int ret;
- mutex_lock(&dev->struct_mutex);
- while (!list_empty(&i915_file_priv->mm.request_list)) {
- struct drm_i915_gem_request *request;
-
- request = list_first_entry(&i915_file_priv->mm.request_list,
- struct drm_i915_gem_request,
- client_list);
-
+ spin_lock(&file_priv->mm.lock);
+ list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
- ret = i915_wait_request(dev, request->seqno, request->ring);
- if (ret != 0)
- break;
+ ring = request->ring;
+ seqno = request->seqno;
}
- mutex_unlock(&dev->struct_mutex);
+ spin_unlock(&file_priv->mm.lock);
+
+ if (seqno == 0)
+ return 0;
+
+ ret = 0;
+ if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+ /* And wait for the seqno passing without holding any locks and
+ * causing extra latency for others. This is safe as the irq
+ * generation is designed to be run atomically and so is
+ * lockless.
+ */
+ ring->user_irq_get(dev, ring);
+ ret = wait_event_interruptible(ring->irq_queue,
+ i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
+ ring->user_irq_put(dev, ring);
+
+ if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
+ ret = -EIO;
+ }
+
+ if (ret == 0)
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
return ret;
}
struct drm_clip_rect *cliprects = NULL;
struct drm_i915_gem_relocation_entry *relocs = NULL;
struct drm_i915_gem_request *request = NULL;
- int ret = 0, ret2, i, pinned = 0;
+ int ret, ret2, i, pinned = 0;
uint64_t exec_offset;
- uint32_t seqno, reloc_index;
+ uint32_t reloc_index;
int pin_tries, flips;
struct intel_ring_buffer *ring = NULL;
+ ret = i915_gem_check_is_wedged(dev);
+ if (ret)
+ return ret;
+
#if WATCH_EXEC
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
(int) args->buffers_ptr, args->buffer_count, args->batch_len);
if (ret != 0)
goto pre_mutex_err;
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto pre_mutex_err;
i915_verify_inactive(dev, __FILE__, __LINE__);
- if (atomic_read(&dev_priv->mm.wedged)) {
- mutex_unlock(&dev->struct_mutex);
- ret = -EIO;
- goto pre_mutex_err;
- }
-
if (dev_priv->mm.suspended) {
mutex_unlock(&dev->struct_mutex);
ret = -EBUSY;
dev->invalidate_domains,
dev->flush_domains);
#endif
- i915_gem_flush(dev,
+ i915_gem_flush(dev, file_priv,
dev->invalidate_domains,
dev->flush_domains,
dev_priv->mm.flush_rings);
}
- if (dev_priv->render_ring.outstanding_lazy_request) {
- (void)i915_add_request(dev, file_priv, NULL, &dev_priv->render_ring);
- dev_priv->render_ring.outstanding_lazy_request = false;
- }
- if (dev_priv->bsd_ring.outstanding_lazy_request) {
- (void)i915_add_request(dev, file_priv, NULL, &dev_priv->bsd_ring);
- dev_priv->bsd_ring.outstanding_lazy_request = false;
- }
-
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
DRM_INFO("%s: move to exec list %p\n", __func__, obj);
#endif
}
-
- /*
- * Get a seqno representing the execution of the current buffer,
- * which we can wait on. We would like to mitigate these interrupts,
- * likely by only creating seqnos occasionally (so that we have
- * *some* interrupts representing completion of buffers that we can
- * wait on when trying to clear up gtt space).
- */
- seqno = i915_add_request(dev, file_priv, request, ring);
+ i915_add_request(dev, file_priv, request, ring);
request = NULL;
#if WATCH_LRU
i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
{
struct drm_device *dev = obj->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
if (obj_priv->pin_count == 1) {
atomic_inc(&dev->pin_count);
atomic_add(obj->size, &dev->pin_memory);
- if (!obj_priv->active &&
- (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
- list_del_init(&obj_priv->list);
+ if (!obj_priv->active)
+ list_move_tail(&obj_priv->list,
+ &dev_priv->mm.pinned_list);
}
i915_verify_inactive(dev, __FILE__, __LINE__);
* the inactive list
*/
if (obj_priv->pin_count == 0) {
- if (!obj_priv->active &&
- (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
+ if (!obj_priv->active)
list_move_tail(&obj_priv->list,
&dev_priv->mm.inactive_list);
atomic_dec(&dev->pin_count);
struct drm_i915_gem_object *obj_priv;
int ret;
- mutex_lock(&dev->struct_mutex);
-
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
args->handle);
- mutex_unlock(&dev->struct_mutex);
return -ENOENT;
}
obj_priv = to_intel_bo(obj);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
+
if (obj_priv->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to pin a purgeable buffer\n");
drm_gem_object_unreference(obj);
struct drm_i915_gem_pin *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
-
- mutex_lock(&dev->struct_mutex);
+ int ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
args->handle);
- mutex_unlock(&dev->struct_mutex);
return -ENOENT;
}
obj_priv = to_intel_bo(obj);
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
+
if (obj_priv->pin_filp != file_priv) {
DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
args->handle);
struct drm_i915_gem_busy *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
+ int ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
return -ENOENT;
}
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
/* Count all active objects as busy, even if they are currently not used
* by the gpu. Users of this interface expect objects to eventually
* use this buffer rather sooner than later, so issuing the required
* flush earlier is beneficial.
*/
- if (obj->write_domain & I915_GEM_GPU_DOMAINS) {
- i915_gem_flush_ring(dev,
+ if (obj->write_domain & I915_GEM_GPU_DOMAINS)
+ i915_gem_flush_ring(dev, file_priv,
obj_priv->ring,
0, obj->write_domain);
- (void)i915_add_request(dev, file_priv, NULL, obj_priv->ring);
- }
/* Update the active list for the hardware's current position.
* Otherwise this only updates on a delayed timer or when irqs
struct drm_i915_gem_madvise *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
+ int ret;
switch (args->madv) {
case I915_MADV_DONTNEED:
args->handle);
return -ENOENT;
}
-
- mutex_lock(&dev->struct_mutex);
obj_priv = to_intel_bo(obj);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
+
if (obj_priv->pin_count) {
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
+ init_completion(&dev_priv->error_completion);
spin_lock(&shrink_list_lock);
list_add(&dev_priv->mm.shrink_list, &shrink_list);
spin_unlock(&shrink_list_lock);
return 0;
}
-void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
+void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
/* Clean up our request list when the client is going away, so that
* later retire_requests won't dereference our soon-to-be-gone
* file_priv.
*/
- mutex_lock(&dev->struct_mutex);
- while (!list_empty(&i915_file_priv->mm.request_list))
- list_del_init(i915_file_priv->mm.request_list.next);
- mutex_unlock(&dev->struct_mutex);
+ spin_lock(&file_priv->mm.lock);
+ while (!list_empty(&file_priv->mm.request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&file_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ client_list);
+ list_del(&request->client_list);
+ request->file_priv = NULL;
+ }
+ spin_unlock(&file_priv->mm.lock);
}
static int