drm/nouveau: Take fence spinlock before reading the last sequence.
authorFrancisco Jerez <currojerez@riseup.net>
Sat, 28 Aug 2010 15:56:33 +0000 (17:56 +0200)
committerBen Skeggs <bskeggs@redhat.com>
Thu, 2 Sep 2010 22:23:30 +0000 (08:23 +1000)
It fixes a race between the TTM delayed work queue and the GEM IOCTLs
(fdo bug 29583) uncovered by the BKL removal.

Signed-off-by: Francisco Jerez <currojerez@riseup.net>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/nouveau/nouveau_fence.c

index 6b208ff..87ac21e 100644 (file)
@@ -64,16 +64,17 @@ nouveau_fence_update(struct nouveau_channel *chan)
        struct nouveau_fence *fence;
        uint32_t sequence;
 
+       spin_lock(&chan->fence.lock);
+
        if (USE_REFCNT)
                sequence = nvchan_rd32(chan, 0x48);
        else
                sequence = atomic_read(&chan->fence.last_sequence_irq);
 
        if (chan->fence.sequence_ack == sequence)
-               return;
+               goto out;
        chan->fence.sequence_ack = sequence;
 
-       spin_lock(&chan->fence.lock);
        list_for_each_safe(entry, tmp, &chan->fence.pending) {
                fence = list_entry(entry, struct nouveau_fence, entry);
 
@@ -85,6 +86,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
                if (sequence == chan->fence.sequence_ack)
                        break;
        }
+out:
        spin_unlock(&chan->fence.lock);
 }