git.openpandora.org
/
pandora-kernel.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
libata: no need to speed down if already at PIO0
[pandora-kernel.git]
/
lib
/
kernel_lock.c
diff --git
a/lib/kernel_lock.c
b/lib/kernel_lock.c
index
bd2bc5d
..
f73e2f8
100644
(file)
--- a/
lib/kernel_lock.c
+++ b/
lib/kernel_lock.c
@@
-2,7
+2,7
@@
* lib/kernel_lock.c
*
* This is the traditional BKL - big kernel lock. Largely
* lib/kernel_lock.c
*
* This is the traditional BKL - big kernel lock. Largely
- * relegated to obsolescen
s
e, but used by various less
+ * relegated to obsolescen
c
e, but used by various less
* important (or lazy) subsystems.
*/
#include <linux/smp_lock.h>
* important (or lazy) subsystems.
*/
#include <linux/smp_lock.h>
@@
-14,7
+14,7
@@
* The 'big kernel semaphore'
*
* This mutex is taken and released recursively by lock_kernel()
* The 'big kernel semaphore'
*
* This mutex is taken and released recursively by lock_kernel()
- * and unlock_kernel(). It is transparently dropped and reaquired
+ * and unlock_kernel(). It is transparently dropped and rea
c
quired
* over schedule(). It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
* over schedule(). It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
@@
-92,7
+92,7
@@
void __lockfunc unlock_kernel(void)
* The 'big kernel lock'
*
* This spinlock is taken and released recursively by lock_kernel()
* The 'big kernel lock'
*
* This spinlock is taken and released recursively by lock_kernel()
- * and unlock_kernel(). It is transparently dropped and reaquired
+ * and unlock_kernel(). It is transparently dropped and rea
c
quired
* over schedule(). It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
* over schedule(). It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
@@
-177,6
+177,10
@@
static inline void __lock_kernel(void)
static inline void __unlock_kernel(void)
{
static inline void __unlock_kernel(void)
{
+ /*
+ * the BKL is not covered by lockdep, so we open-code the
+ * unlocking sequence (and thus avoid the dep-chain ops):
+ */
_raw_spin_unlock(&kernel_flag);
preempt_enable();
}
_raw_spin_unlock(&kernel_flag);
preempt_enable();
}