Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / arch / arm / mm / cache-l2x0.c
index 3f9b998..8ac9e9f 100644 (file)
@@ -29,7 +29,7 @@
 #define CACHE_LINE_SIZE                32
 
 static void __iomem *l2x0_base;
-static DEFINE_SPINLOCK(l2x0_lock);
+static DEFINE_RAW_SPINLOCK(l2x0_lock);
 static uint32_t l2x0_way_mask; /* Bitmask of active ways */
 static uint32_t l2x0_size;
 
@@ -126,9 +126,9 @@ static void l2x0_cache_sync(void)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&l2x0_lock, flags);
+       raw_spin_lock_irqsave(&l2x0_lock, flags);
        cache_sync();
-       spin_unlock_irqrestore(&l2x0_lock, flags);
+       raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 }
 
 static void __l2x0_flush_all(void)
@@ -145,9 +145,9 @@ static void l2x0_flush_all(void)
        unsigned long flags;
 
        /* clean all ways */
-       spin_lock_irqsave(&l2x0_lock, flags);
+       raw_spin_lock_irqsave(&l2x0_lock, flags);
        __l2x0_flush_all();
-       spin_unlock_irqrestore(&l2x0_lock, flags);
+       raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 }
 
 static void l2x0_clean_all(void)
@@ -155,11 +155,11 @@ static void l2x0_clean_all(void)
        unsigned long flags;
 
        /* clean all ways */
-       spin_lock_irqsave(&l2x0_lock, flags);
+       raw_spin_lock_irqsave(&l2x0_lock, flags);
        writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
        cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
        cache_sync();
-       spin_unlock_irqrestore(&l2x0_lock, flags);
+       raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 }
 
 static void l2x0_inv_all(void)
@@ -167,13 +167,13 @@ static void l2x0_inv_all(void)
        unsigned long flags;
 
        /* invalidate all ways */
-       spin_lock_irqsave(&l2x0_lock, flags);
+       raw_spin_lock_irqsave(&l2x0_lock, flags);
        /* Invalidating when L2 is enabled is a nono */
        BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
        writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
        cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
        cache_sync();
-       spin_unlock_irqrestore(&l2x0_lock, flags);
+       raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 }
 
 static void l2x0_inv_range(unsigned long start, unsigned long end)
@@ -181,7 +181,7 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
        void __iomem *base = l2x0_base;
        unsigned long flags;
 
-       spin_lock_irqsave(&l2x0_lock, flags);
+       raw_spin_lock_irqsave(&l2x0_lock, flags);
        if (start & (CACHE_LINE_SIZE - 1)) {
                start &= ~(CACHE_LINE_SIZE - 1);
                debug_writel(0x03);
@@ -206,13 +206,13 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
                }
 
                if (blk_end < end) {
-                       spin_unlock_irqrestore(&l2x0_lock, flags);
-                       spin_lock_irqsave(&l2x0_lock, flags);
+                       raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+                       raw_spin_lock_irqsave(&l2x0_lock, flags);
                }
        }
        cache_wait(base + L2X0_INV_LINE_PA, 1);
        cache_sync();
-       spin_unlock_irqrestore(&l2x0_lock, flags);
+       raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 }
 
 static void l2x0_clean_range(unsigned long start, unsigned long end)
@@ -225,7 +225,7 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
                return;
        }
 
-       spin_lock_irqsave(&l2x0_lock, flags);
+       raw_spin_lock_irqsave(&l2x0_lock, flags);
        start &= ~(CACHE_LINE_SIZE - 1);
        while (start < end) {
                unsigned long blk_end = start + min(end - start, 4096UL);
@@ -236,13 +236,13 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
                }
 
                if (blk_end < end) {
-                       spin_unlock_irqrestore(&l2x0_lock, flags);
-                       spin_lock_irqsave(&l2x0_lock, flags);
+                       raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+                       raw_spin_lock_irqsave(&l2x0_lock, flags);
                }
        }
        cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
        cache_sync();
-       spin_unlock_irqrestore(&l2x0_lock, flags);
+       raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 }
 
 static void l2x0_flush_range(unsigned long start, unsigned long end)
@@ -255,7 +255,7 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
                return;
        }
 
-       spin_lock_irqsave(&l2x0_lock, flags);
+       raw_spin_lock_irqsave(&l2x0_lock, flags);
        start &= ~(CACHE_LINE_SIZE - 1);
        while (start < end) {
                unsigned long blk_end = start + min(end - start, 4096UL);
@@ -268,24 +268,24 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
                debug_writel(0x00);
 
                if (blk_end < end) {
-                       spin_unlock_irqrestore(&l2x0_lock, flags);
-                       spin_lock_irqsave(&l2x0_lock, flags);
+                       raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+                       raw_spin_lock_irqsave(&l2x0_lock, flags);
                }
        }
        cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
        cache_sync();
-       spin_unlock_irqrestore(&l2x0_lock, flags);
+       raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 }
 
 static void l2x0_disable(void)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&l2x0_lock, flags);
+       raw_spin_lock_irqsave(&l2x0_lock, flags);
        __l2x0_flush_all();
        writel_relaxed(0, l2x0_base + L2X0_CTRL);
        dsb();
-       spin_unlock_irqrestore(&l2x0_lock, flags);
+       raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 }
 
 static void l2x0_unlock(__u32 cache_id)