Update of /cvsroot/linux-vax/kernel-2.4/include/asm-vax
In directory usw-pr-cvs1:/tmp/cvs-serv4365/include/asm-vax
Modified Files:
byteorder.h
Log Message:
minor fixes to byteorder.h. Fix arch Makefiles for new rules.make. sys_idle
is no more, cpu_idle is here.
Index: byteorder.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/byteorder.h,v
retrieving revision 1.2
retrieving revision 1.3
diff -C2 -r1.2 -r1.3
*** byteorder.h 2001/01/20 11:24:50 1.2
--- byteorder.h 2001/01/20 13:51:05 1.3
***************
*** 7,38 ****
#ifdef __GNUC__
!
static __inline__ __const__ __u32 ___arch__swab32(__u32 x)
{
! __u32 t1, t2; /* assume input is aabbccdd in x*/
! __asm__("clrl %0\n\t" /* clear t1 */
! "rotl $8, %2, %1\n\t" /* %1 = bbccddaa */
! "bisl3 $0xff00ff, %1, %0\n\t" /* %0 = 00cc00aa */
! "rotl $-8, %2, %1\n\t" /* %1 = ddaabbcc */
! "bicl2 $0xff00ff, %1\n\t" /* %1 = dd00bb00 */
! "bisl2 %1, %0\n\t" /* %0 = ddccbbaa */
! : "=g" (t1), "=g" (t2) : "g" (x) );
return t1;
}
static __inline__ __const__ __u16 ___arch__swab16(__u16 x)
{
! __u32 t1, t2, t3;
! __asm__("clrl %2\n\t"
! "movw %3,%2\n\t" /* %2=0000aabb */
! "rotl $-8,%2,%0\n\t" /* %0=bb0000aa */
! "ashl $24,%0,%1\n\t" /* %1=aa000000 */
! "rotl $8,%1,%0\n\t" /* %0=000000aa */
! "ashl $8,%2,%1\n\t" /* %1=00aabb00 */
! "bicl2 $0xfffff00ff, %1\n\t" /* %1=0000bb00 */
! "addl2 %1,%0\n\t" /* %0=0000bbaa */
! : "=g" (t1), "=g" (t2), "=g"(t3) : "g" (x));
! return (__u16) t1;
}
--- 7,34 ----
#ifdef __GNUC__
! /*
! * ragge has a 1 insn shorter sequence which will probably replace this one
! * later, depending on the relative instruction costs.
! */
static __inline__ __const__ __u32 ___arch__swab32(__u32 x)
{
! __u32 t1; /* assume input is aabbccdd in x*/
! __asm__("rotl $8, %1, r1\n\t" /* r1 = bbccddaa */
! "bisl3 $0xff00ff, r1, %0\n\t" /* %0 = 00cc00aa */
! "rotl $-8, %1, r1\n\t" /* r1 = ddaabbcc */
! "bicl2 $0xff00ff, r1\n\t" /* r1 = dd00bb00 */
! "bisl2 r1, %0\n\t" /* %0 = ddccbbaa */
! : "=g" (t1) : "g" (x) : "r1" );
return t1;
}
+ /*
+ * according to the resident expert, this is as fast as assembly
+ */
static __inline__ __const__ __u16 ___arch__swab16(__u16 x)
{
! register __u16 __x = (x);
! return (u_int16_t)(__x << 8 | __x >> 8);
}
|