|
From: James S. <jsi...@us...> - 2001-09-04 16:15:31
|
Update of /cvsroot/linux-mips/linux/include/asm-mips64
In directory usw-pr-cvs1:/tmp/cvs-serv13690
Modified Files:
bitops.h
Log Message:
Synced up.
Index: bitops.h
===================================================================
RCS file: /cvsroot/linux-mips/linux/include/asm-mips64/bitops.h,v
retrieving revision 1.2
retrieving revision 1.3
diff -C2 -d -r1.2 -r1.3
*** bitops.h 2001/07/09 19:28:48 1.2
--- bitops.h 2001/09/04 16:15:28 1.3
***************
*** 31,36 ****
* restricted to acting on a single-word quantity.
*/
! extern __inline__ void
! set_bit(unsigned long nr, volatile void *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
--- 31,35 ----
* restricted to acting on a single-word quantity.
*/
! static inline void set_bit(unsigned long nr, volatile void *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
***************
*** 56,60 ****
* may be that only one operation succeeds.
*/
! extern __inline__ void __set_bit(int nr, volatile void * addr)
{
unsigned long * m = ((unsigned long *) addr) + (nr >> 6);
--- 55,59 ----
* may be that only one operation succeeds.
*/
! static inline void __set_bit(int nr, volatile void * addr)
{
unsigned long * m = ((unsigned long *) addr) + (nr >> 6);
***************
*** 73,78 ****
* in order to ensure changes are visible on other processors.
*/
! extern __inline__ void
! clear_bit(unsigned long nr, volatile void *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
--- 72,76 ----
* in order to ensure changes are visible on other processors.
*/
! static inline void clear_bit(unsigned long nr, volatile void *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
***************
*** 100,105 ****
* restricted to acting on a single-word quantity.
*/
! extern __inline__ void
! change_bit(unsigned long nr, volatile void *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
--- 98,102 ----
* restricted to acting on a single-word quantity.
*/
! static inline void change_bit(unsigned long nr, volatile void *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
***************
*** 124,128 ****
* may be that only one operation succeeds.
*/
! extern __inline__ void __change_bit(int nr, volatile void * addr)
{
unsigned long * m = ((unsigned long *) addr) + (nr >> 6);
--- 121,125 ----
* may be that only one operation succeeds.
*/
! static inline void __change_bit(int nr, volatile void * addr)
{
unsigned long * m = ((unsigned long *) addr) + (nr >> 6);
***************
*** 139,144 ****
* It also implies a memory barrier.
*/
! extern __inline__ unsigned long
! test_and_set_bit(unsigned long nr, volatile void *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
--- 136,141 ----
* It also implies a memory barrier.
*/
! static inline unsigned long test_and_set_bit(unsigned long nr,
! volatile void *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
***************
*** 169,174 ****
* but actually fail. You must protect multiple accesses with a lock.
*/
! extern __inline__ int
! __test_and_set_bit(int nr, volatile void * addr)
{
unsigned long mask, retval;
--- 166,170 ----
* but actually fail. You must protect multiple accesses with a lock.
*/
! static inline int __test_and_set_bit(int nr, volatile void *addr)
{
unsigned long mask, retval;
***************
*** 191,196 ****
* It also implies a memory barrier.
*/
! extern __inline__ unsigned long
! test_and_clear_bit(unsigned long nr, volatile void *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
--- 187,192 ----
* It also implies a memory barrier.
*/
! static inline unsigned long test_and_clear_bit(unsigned long nr,
! volatile void *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
***************
*** 222,227 ****
* but actually fail. You must protect multiple accesses with a lock.
*/
! extern __inline__ int
! __test_and_clear_bit(int nr, volatile void * addr)
{
unsigned long mask, retval;
--- 218,222 ----
* but actually fail. You must protect multiple accesses with a lock.
*/
! static inline int __test_and_clear_bit(int nr, volatile void * addr)
{
unsigned long mask, retval;
***************
*** 244,249 ****
* It also implies a memory barrier.
*/
! extern __inline__ unsigned long
! test_and_change_bit(unsigned long nr, volatile void *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
--- 239,244 ----
* It also implies a memory barrier.
*/
! static inline unsigned long test_and_change_bit(unsigned long nr,
! volatile void *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
***************
*** 274,279 ****
* but actually fail. You must protect multiple accesses with a lock.
*/
! extern __inline__ int
! __test_and_change_bit(int nr, volatile void * addr)
{
unsigned long mask, retval;
--- 269,273 ----
* but actually fail. You must protect multiple accesses with a lock.
*/
! static inline int __test_and_change_bit(int nr, volatile void *addr)
{
unsigned long mask, retval;
***************
*** 292,297 ****
* @addr: Address to start counting from
*/
! extern __inline__ unsigned long
! test_bit(int nr, volatile void * addr)
{
return 1UL & (((volatile unsigned long *) addr)[nr >> 6] >> (nr & 0x3f));
--- 286,290 ----
* @addr: Address to start counting from
*/
! static inline unsigned long test_bit(int nr, volatile void * addr)
{
return 1UL & (((volatile unsigned long *) addr)[nr >> 6] >> (nr & 0x3f));
***************
*** 310,315 ****
* containing a bit.
*/
! extern __inline__ int
! find_first_zero_bit (void *addr, unsigned size)
{
unsigned long dummy;
--- 303,307 ----
* containing a bit.
*/
! static inline int find_first_zero_bit (void *addr, unsigned size)
{
unsigned long dummy;
***************
*** 359,364 ****
* @size: The maximum size to search
*/
! extern __inline__ int
! find_next_zero_bit (void * addr, int size, int offset)
{
unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
--- 351,355 ----
* @size: The maximum size to search
*/
! static inline int find_next_zero_bit (void * addr, int size, int offset)
{
unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
***************
*** 403,407 ****
* Undefined if no zero exists, so code should check against ~0UL first.
*/
! extern __inline__ unsigned long ffz(unsigned long word)
{
unsigned long k;
--- 394,398 ----
* Undefined if no zero exists, so code should check against ~0UL first.
*/
! static inline unsigned long ffz(unsigned long word)
{
unsigned long k;
***************
*** 454,459 ****
* @size: The maximum size to search
*/
! extern __inline__ unsigned long
! find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
{
unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
--- 445,450 ----
* @size: The maximum size to search
*/
! static inline unsigned long find_next_zero_bit(void *addr, unsigned long size,
! unsigned long offset)
{
unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
***************
*** 502,507 ****
#ifdef __MIPSEB__
! extern inline int
! ext2_set_bit(int nr,void * addr)
{
int mask, retval, flags;
--- 493,497 ----
#ifdef __MIPSEB__
! static inline int ext2_set_bit(int nr,void * addr)
{
int mask, retval, flags;
***************
*** 517,522 ****
}
! extern inline int
! ext2_clear_bit(int nr, void * addr)
{
int mask, retval, flags;
--- 507,511 ----
}
! static inline int ext2_clear_bit(int nr, void * addr)
{
int mask, retval, flags;
***************
*** 532,537 ****
}
! extern inline int
! ext2_test_bit(int nr, const void * addr)
{
int mask;
--- 521,525 ----
}
! static inline int ext2_test_bit(int nr, const void * addr)
{
int mask;
***************
*** 546,551 ****
ext2_find_next_zero_bit((addr), (size), 0)
! extern inline unsigned int
! ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
{
unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
--- 534,540 ----
ext2_find_next_zero_bit((addr), (size), 0)
! static inline unsigned int ext2_find_next_zero_bit(void *addr,
! unsigned long size,
! unsigned long offset)
{
unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
|