1 #ifndef _PARISC_BITOPS_H
2 #define _PARISC_BITOPS_H
4 #ifndef _LINUX_BITOPS_H
5 #error only <linux/bitops.h> can be included directly
8 #include <linux/compiler.h>
10 #include <asm/byteorder.h>
11 #include <linux/atomic.h>
14 * HP-PARISC specific bit operations
15 * for a detailed description of the functions please refer
16 * to include/asm-i386/bitops.h or kerneldoc
19 #if __BITS_PER_LONG == 64
20 #define SHIFT_PER_LONG 6
22 #define SHIFT_PER_LONG 5
25 #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
28 #define smp_mb__before_clear_bit() smp_mb()
29 #define smp_mb__after_clear_bit() smp_mb()
31 /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
32 * on use of volatile and __*_bit() (set/clear/change):
33 * *_bit() want use of volatile.
34 * __*_bit() are "relaxed" and don't use spinlock or volatile.
37 static __inline__ void set_bit(int nr, volatile unsigned long * addr)
39 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
42 addr += (nr >> SHIFT_PER_LONG);
43 _atomic_spin_lock_irqsave(addr, flags);
45 _atomic_spin_unlock_irqrestore(addr, flags);
48 static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
50 unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr));
53 addr += (nr >> SHIFT_PER_LONG);
54 _atomic_spin_lock_irqsave(addr, flags);
56 _atomic_spin_unlock_irqrestore(addr, flags);
59 static __inline__ void change_bit(int nr, volatile unsigned long * addr)
61 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
64 addr += (nr >> SHIFT_PER_LONG);
65 _atomic_spin_lock_irqsave(addr, flags);
67 _atomic_spin_unlock_irqrestore(addr, flags);
70 static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
72 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
77 addr += (nr >> SHIFT_PER_LONG);
78 _atomic_spin_lock_irqsave(addr, flags);
80 set = (old & mask) ? 1 : 0;
83 _atomic_spin_unlock_irqrestore(addr, flags);
88 static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
90 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
95 addr += (nr >> SHIFT_PER_LONG);
96 _atomic_spin_lock_irqsave(addr, flags);
98 set = (old & mask) ? 1 : 0;
101 _atomic_spin_unlock_irqrestore(addr, flags);
106 static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
108 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
109 unsigned long oldbit;
112 addr += (nr >> SHIFT_PER_LONG);
113 _atomic_spin_lock_irqsave(addr, flags);
115 *addr = oldbit ^ mask;
116 _atomic_spin_unlock_irqrestore(addr, flags);
118 return (oldbit & mask) ? 1 : 0;
121 #include <asm-generic/bitops/non-atomic.h>
126 * __ffs - find first bit in word. returns 0 to "BITS_PER_LONG-1".
127 * @word: The word to search
129 * __ffs() return is undefined if no bit is set.
131 * 32-bit fast __ffs by LaMont Jones "lamont At hp com".
132 * 64-bit enhancement by Grant Grundler "grundler At parisc-linux org".
133 * (with help from willy/jejb to get the semantics right)
135 * This algorithm avoids branches by making use of nullification.
136 * One side effect of "extr" instructions is it sets PSW[N] bit.
137 * How PSW[N] (nullify next insn) gets set is determined by the
138 * "condition" field (eg "<>" or "TR" below) in the extr* insn.
139 * Only the 1st and one of either the 2cd or 3rd insn will get executed.
140 * Each set of 3 insn will get executed in 2 cycles on PA8x00 vs 16 or so
141 * cycles for each mispredicted branch.
144 static __inline__ unsigned long __ffs(unsigned long x)
151 " extrd,u,*<> %0,63,32,%%r0\n"
152 " extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */
157 " extru,<> %0,31,16,%%r0\n"
158 " extru,TR %0,15,16,%0\n" /* xxxx0000 -> 0000xxxx */
160 " extru,<> %0,31,8,%%r0\n"
161 " extru,TR %0,23,8,%0\n" /* 0000xx00 -> 000000xx */
163 " extru,<> %0,31,4,%%r0\n"
164 " extru,TR %0,27,4,%0\n" /* 000000x0 -> 0000000x */
166 " extru,<> %0,31,2,%%r0\n"
167 " extru,TR %0,29,2,%0\n" /* 0000000y, 1100b -> 0011b */
169 " extru,= %0,31,1,%%r0\n" /* check last bit */
171 : "+r" (x), "=r" (ret) );
175 #include <asm-generic/bitops/ffz.h>
178 * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set)
179 * This is defined the same way as the libc and compiler builtin
180 * ffs routines, therefore differs in spirit from the above ffz (man ffs).
182 static __inline__ int ffs(int x)
184 return x ? (__ffs((unsigned long)x) + 1) : 0;
188 * fls: find last (most significant) bit set.
189 * fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
192 static __inline__ int fls(int x)
200 " extru,<> %0,15,16,%%r0\n"
201 " zdep,TR %0,15,16,%0\n" /* xxxx0000 */
203 " extru,<> %0,7,8,%%r0\n"
204 " zdep,TR %0,23,24,%0\n" /* xx000000 */
206 " extru,<> %0,3,4,%%r0\n"
207 " zdep,TR %0,27,28,%0\n" /* x0000000 */
209 " extru,<> %0,1,2,%%r0\n"
210 " zdep,TR %0,29,30,%0\n" /* y0000000 (y&3 = 0) */
212 " extru,= %0,0,1,%%r0\n"
213 " addi 1,%1,%1\n" /* if y & 8, add 1 */
214 : "+r" (x), "=r" (ret) );
219 #include <asm-generic/bitops/__fls.h>
220 #include <asm-generic/bitops/fls64.h>
221 #include <asm-generic/bitops/hweight.h>
222 #include <asm-generic/bitops/lock.h>
223 #include <asm-generic/bitops/sched.h>
225 #endif /* __KERNEL__ */
227 #include <asm-generic/bitops/find.h>
231 #include <asm-generic/bitops/le.h>
232 #include <asm-generic/bitops/ext2-atomic-setbit.h>
234 #endif /* __KERNEL__ */
236 #endif /* _PARISC_BITOPS_H */