From: Jan-Benedict G. <jb...@us...> - 2005-05-09 21:22:37
|
Update of /cvsroot/linux-vax/kernel-2.5/include/asm-vax In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv31466/include/asm-vax Modified Files: bitops.h Log Message: - Another touchup. And now some TV... Index: bitops.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/include/asm-vax/bitops.h,v retrieving revision 1.19 retrieving revision 1.20 diff -u -d -r1.19 -r1.20 --- bitops.h 28 Mar 2005 16:27:23 -0000 1.19 +++ bitops.h 9 May 2005 21:22:23 -0000 1.20 @@ -14,26 +14,27 @@ * was cleared before the operation and != 0 if it was not. * * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). - * + * * VAX port atp Jan 1999 * Updates for 2.4.3+ atp Mar 2002 */ -#define ADDR (*(volatile long *) addr) - +/* This will come handy to access given addresses */ +#define ADDR (* (volatile long *) addr) -/* I'd like to use interlocked variable length bitfield instructions - * here, but they only seem to exist in branch and {set/clear} flavours - * There appears to be no complement bit equivalent. - * We are ignoring SMP for the moment anyway +/* + * I'd like to use interlocked variable length bitfield instructions + * here, but they only seem to exist in branch and {set/clear} flavours. + * There appears to be no complement bit equivalent. + * We are ignoring SMP for the moment anyway... * atp Jan 99. - * - * we use bbss rather than bisl2 for the variable bit field - * if the bit is in a register, and nr>31 then we get a reserveed - * operand fault. Add an "i" to the instructions for interlocked - * operation (note: check subsetting rules?) - * - * Mar 2002 atp. Further reading of asm-i386/bitops.h reveals that the + * + * We use bbss rather than bisl2 for the variable bit field + * if the bit is in a register, and nr>31 then we get a reserveed + * operand fault. Add an "i" to the instructions for interlocked + * operation (note: check subsetting rules?) + * + * Mar 2002 atp. Further reading of asm-i386/bitops.h reveals that the * bit number is constrained by "Ir" to be an integer in the * range 0-31. So we could use bisl2 after all. */ @@ -48,14 +49,15 @@ * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static __inline__ void set_bit(int nr, volatile void * addr) +static __inline__ void set_bit(int nr, volatile void *addr) { __asm__ __volatile__( - "bbss %1, %0, 1f\n" - "1:\n" - :"=m" (ADDR) - :"ir" (nr)); + " bbss %1, %0, 1f \n" + "1: \n" + : "=m" (ADDR) + : "ir" (nr)); } + /** * __set_bit - Set a bit in memory * @nr: the bit to set @@ -65,13 +67,13 @@ * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static __inline__ void __set_bit(int nr, volatile void * addr) +static __inline__ void __set_bit(int nr, volatile void *addr) { __asm__( - "bbss %1,%0, 2f\n" - "2:\n" - :"=m" (ADDR) - :"ir" (nr)); + " bbss %1, %0, 2f \n" + "2: \n" + : "=m" (ADDR) + : "ir" (nr)); } /** @@ -84,13 +86,13 @@ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */ -static __inline__ void clear_bit(int nr, volatile void * addr) +static __inline__ void clear_bit(int nr, volatile void *addr) { __asm__ __volatile__( - "bbcc %1,%0, 1f\n" - "1:\n" - :"=m" (ADDR) - :"ir" (nr)); + " bbcc %1,%0, 1f \n" + "1: \n" + : "=m" (ADDR) + : "ir" (nr)); } /** @@ -102,13 +104,13 @@ * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static __inline__ void __clear_bit(int nr, volatile void * addr) +static __inline__ void __clear_bit(int nr, volatile void *addr) { __asm__( - "bbcc %1,%0, 2f\n" - "2:\n" - :"=m" (ADDR) - :"ir" (nr)); + " bbcc %1, %0, 2f \n" + "2: \n" + : "=m" (ADDR) + : "ir" (nr)); } #define smp_mb__before_clear_bit() smp_mb() @@ -123,14 +125,14 @@ * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static __inline__ void __change_bit(int nr, volatile void * addr) +static __inline__ void __change_bit(int nr, volatile void *addr) { __asm__ __volatile__( - "bbcs %1,%0,3f\n" - "bbsc %1,%0,3f\n" - "3:" - :"=m" (ADDR) - :"ir" (nr)); + " bbcs %1, %0, 3f \n" + " bbsc %1, %0, 3f \n" + "3: \n" + : "=m" (ADDR) + : "ir" (nr)); } /** @@ -142,14 +144,14 @@ * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static __inline__ void change_bit(int nr, volatile void * addr) +static __inline__ void change_bit(int nr, volatile void *addr) { __asm__ __volatile__( - "bbcs %1,%0,3f\n" - "bbsc %1,%0,3f\n" - "3:" - :"=m" (ADDR) - :"ir" (nr)); + " bbcs %1, %0, 3f \n" + " bbsc %1, %0, 3f \n" + "3: \n" + : "=m" (ADDR) + : "ir" (nr)); } /** @@ -157,21 +159,24 @@ * @nr: Bit to set * @addr: Address to count from * - * This operation is atomic and cannot be reordered. + * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static __inline__ int test_and_set_bit(int nr, volatile void * addr) +static __inline__ int test_and_set_bit(int nr, volatile void *addr) { int oldbit; -/* there are interlocked versions of bbss and bbcs we could use*/ + /* There are interlocked versions of bbss and bbcs we could use... */ __asm__ __volatile__( - "clrl %0\n" - "bbcs %2,%1, 1f\n" - "incl %0\n" - "1:\n" - :"=&r" (oldbit),"=m" (ADDR) - :"r" (nr) : "memory"); + " clrl %0 \n" + " bbcs %2, %1, 1f \n" + " incl %0 \n" + "1: \n" + : "=&r" (oldbit), + "=m" (ADDR) + : "r" (nr) + : "memory"); + return oldbit; } @@ -180,21 +185,23 @@ * @nr: Bit to set * @addr: Address to count from * - * This operation is non-atomic and can be reordered. + * This operation is non-atomic and can be reordered. * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static __inline__ int __test_and_set_bit(int nr, volatile void * addr) +static __inline__ int __test_and_set_bit(int nr, volatile void *addr) { int oldbit; __asm__( - "clrl %0\n" - "bbcs %2,%1, 1f\n" - "incl %0\n" - "1:\n" - :"=&r" (oldbit),"=m" (ADDR) - :"ir" (nr)); + " clrl %0 \n" + " bbcs %2, %1, 1f \n" + " incl %0 \n" + "1: \n" + : "=&r" (oldbit), + "=m" (ADDR) + : "ir" (nr)); + return oldbit; } @@ -203,20 +210,23 @@ * @nr: Bit to set * @addr: Address to count from * - * This operation is atomic and cannot be reordered. + * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static __inline__ int test_and_clear_bit(int nr, volatile void * addr) +static __inline__ int test_and_clear_bit(int nr, volatile void *addr) { int oldbit; __asm__ __volatile__( - "clrl %0\n" - "bbcc %2,%1, 1f\n" - "incl %0\n" - "1:\n" - :"=&r" (oldbit),"=m" (ADDR) - :"ir" (nr) : "memory"); + " clrl %0 \n" + " bbcc %2, %1, 1f \n" + " incl %0 \n" + "1: \n" + : "=&r" (oldbit), + "=m" (ADDR) + : "ir" (nr) + : "memory"); + return oldbit; } @@ -225,26 +235,28 @@ * @nr: Bit to set * @addr: Address to count from * - * This operation is non-atomic and can be reordered. + * This operation is non-atomic and can be reordered. * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) +static __inline__ int __test_and_clear_bit(int nr, volatile void *addr) { int oldbit; __asm__( - "clrl %0\n" - "bbcc %2,%1, 1f\n" - "incl %0\n" - "1:\n" - :"=&r" (oldbit),"=m" (ADDR) - :"ir" (nr)); + " clrl %0 \n" + " bbcc %2, %1, 1f \n" + " incl %0 \n" + "1: \n" + : "=&r" (oldbit), + "=m" (ADDR) + : "ir" (nr)); + return oldbit; } /* WARNING: non atomic and it can be reordered! */ -static __inline__ int __test_and_change_bit(int nr, volatile void * addr) +static __inline__ int __test_and_change_bit(int nr, volatile void *addr) { unsigned long mask = 1 << (nr & 0x1f); int *m = ((int *) addr) + (nr >> 5); @@ -259,21 +271,24 @@ * @nr: Bit to set * @addr: Address to count from * - * This operation is atomic and cannot be reordered. + * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static __inline__ int test_and_change_bit(int nr, volatile void * addr) +static __inline__ int test_and_change_bit(int nr, volatile void *addr) { int oldbit; __asm__ __volatile__( - "clrl %0\n" - "bbsc %2. %1,4f\n" - "incl %0\n" - "bbcs %2, %1,4f\n" - "4:\n" - :"=&r" (oldbit),"=m" (ADDR) - :"ir" (nr) : "memory"); + " clrl %0 \n" + " bbsc %2. %1, 4f \n" + " incl %0 \n" + " bbcs %2, %1,4f \n" + "4: \n" + : "=&r" (oldbit), + "=m" (ADDR) + : "ir" (nr) + : "memory"); + return oldbit; } @@ -289,50 +304,55 @@ /* * This routine doesn't need to be atomic. */ -static __inline__ int constant_test_bit(int nr, const volatile void * addr) +static __inline__ int constant_test_bit(int nr, const volatile void *addr) { return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; } -static __inline__ int variable_test_bit(int nr, const volatile void * addr) +static __inline__ int variable_test_bit(int nr, const volatile void *addr) { int oldbit; __asm__ __volatile__( - "clrl %0\n" - "bbc %2,%1,1f\n" - "incl %0\n" - "1:\n" - :"=&r" (oldbit) - :"m" (ADDR),"ir" (nr)); + " clrl %0 \n" + " bbc %2, %1, 1f \n" + " incl %0 \n" + "1: \n" + : "=&r" (oldbit) + : "m" (ADDR), + "ir" (nr)); + return oldbit; } -#define test_bit(nr,addr) \ -(__builtin_constant_p(nr) ? \ - constant_test_bit((nr),(addr)) : \ - variable_test_bit((nr),(addr))) - +#define test_bit(nr, addr) \ + (__builtin_constant_p(nr) \ + ? constant_test_bit((nr), (addr)) \ + : variable_test_bit((nr), (addr))) -/* +/* * FIXME: if we were keen we could do the ffs/ffz in a nice tight * assembly loop using ffc/ffs and a sliding 32bit field window. - * For now, we use the alpha/ppc port method, with 32bit chunks + * + * For now, we use the alpha/ppc port method, with 32bit chunks. */ + /** * ffz - find first zero in word. * @word: The word to search * * Undefined if no zero exists, so code should check against ~0UL first. * (VAX) - * We could check the Z condition code bit if we wanted to check against + * We could check the Z condition code bit if we wanted to check against * the ~OUL case, but this interface is designed for intel. Nuff sed. */ static __inline__ unsigned long ffz(unsigned long word) { - __asm__("ffc $0, $32, %1 , %0\n" - :"=rm" (word) - :"rm" (word)); + __asm__( + " ffc $0, $32, %1, %0 \n" + : "=rm" (word) + : "rm" (word)); + return word; } @@ -344,29 +364,27 @@ */ static __inline__ unsigned long __ffs(unsigned long word) { - __asm__("ffs $0, $32, %1, %0\n" - :"=rm" (word) - :"rm" (word)); + __asm__( + " ffs $0, $32, %1, %0 \n" + : "=rm" (word) + : "rm" (word)); + return word; } - /* * fls: find last bit set. */ - -#define fls(x) generic_fls(x) +#define fls(x) generic_fls(x) #ifdef __KERNEL__ - /* * Every architecture must define this function. It's the fastest * way of searching a 168-bit bitmap where the first 100 bits are * unlikely to be set. It's guaranteed that at least one of the 140 * bits is set. */ - static inline int sched_find_first_bit(unsigned long *b) { if (unlikely(b[0])) @@ -380,7 +398,6 @@ return __ffs(b[4]) + 128; } - /** * ffs - find first bit set * @x: the word to search @@ -393,23 +410,24 @@ { int r; - __asm__("ffs $0, $32, %1, %0\n" - "bnequ 1f\n" - "movl $-1,%0\n" - "1:" - : "=ir" (r) - : "mr" (x)); + __asm__( + " ffs $0, $32, %1, %0 \n" + " bnequ 1f \n" + " movl $-1, %0 \n" + "1: \n" + : "=ir" (r) + : "mr" (x)); + return r + 1; } -#endif - +#endif /* __KERNEL__ */ + /* * This implementation of find_{first,next}_zero_bit was stolen from * Linus' asm-alpha/bitops.h. */ - -extern __inline__ unsigned long -find_next_zero_bit(const void * addr, unsigned long size, unsigned long offset) +extern __inline__ unsigned long +find_next_zero_bit(const void *addr, unsigned long size, unsigned long offset) { unsigned int * p = ((unsigned int *) addr) + (offset >> 5); unsigned int result = offset & ~31UL; @@ -446,8 +464,7 @@ return result + ffz(tmp); } -#define find_first_zero_bit(addr, size) \ - find_next_zero_bit((addr), (size), 0) +#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) /* * This implementation of find_next_bit() is also stolen @@ -491,41 +508,34 @@ return result + __ffs(tmp); } -#define find_first_bit(addr, size) \ - find_next_bit((addr), (size), 0) - +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) #ifdef __KERNEL__ - /** * hweightN - returns the hamming weight of a N-bit word * @x: the word to weigh * * The Hamming Weight of a number is the total number of bits set in it. */ -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) - -#endif /* __KERNEL__ */ - -#ifdef __KERNEL__ - -#define ext2_set_bit __test_and_set_bit -#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) -#define ext2_clear_bit __test_and_clear_bit -#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) -#define ext2_test_bit test_bit -#define ext2_find_first_zero_bit find_first_zero_bit -#define ext2_find_next_zero_bit find_next_zero_bit +#define hweight32(x) generic_hweight32(x) +#define hweight16(x) generic_hweight16(x) +#define hweight8(x) generic_hweight8(x) -/* Bitmap functions for the minix filesystem. */ -#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr) -#define minix_set_bit(nr,addr) __set_bit(nr,addr) -#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr) -#define minix_test_bit(nr,addr) test_bit(nr,addr) -#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) +/* Bitmap functions for the ext2/ext3 filesystems. */ +#define ext2_set_bit __test_and_set_bit +#define ext2_set_bit_atomic(l,n,a) test_and_set_bit((n), (a)) +#define ext2_clear_bit __test_and_clear_bit +#define ext2_clear_bit_atomic(l, n, a) test_and_clear_bit((n), (a)) +#define ext2_test_bit test_bit +#define ext2_find_first_zero_bit find_first_zero_bit +#define ext2_find_next_zero_bit find_next_zero_bit +/* Bitmap functions for the Minix filesystem. */ +#define minix_test_and_set_bit(nr, addr) __test_and_set_bit((nr), (addr)) +#define minix_set_bit(nr, addr) __set_bit((nr), (addr)) +#define minix_test_and_clear_bit(nr, addr) __test_and_clear_bit((nr), (addr)) +#define minix_test_bit(nr, addr) test_bit((nr), (addr)) +#define minix_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size)) #endif /* __KERNEL__ */ #endif /* _VAX_BITOPS_H */ |