|
From: Andy P. <at...@us...> - 2001-02-18 20:36:48
|
Update of /cvsroot/linux-vax/kernel-2.4/include/asm-vax
In directory usw-pr-cvs1:/tmp/cvs-serv15546/include/asm-vax
Modified Files:
checksum.h
Log Message:
Add vax_dev_init to init/main.c. new file vax_dev_init. Updated cpu_ka46.c
Stubbed out checksum routines.
Index: checksum.h
===================================================================
RCS file: /cvsroot/linux-vax/kernel-2.4/include/asm-vax/checksum.h,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- checksum.h 2001/01/17 16:18:52 1.1
+++ checksum.h 2001/02/18 20:37:39 1.2
@@ -1,6 +1,20 @@
#ifndef _VAX_CHECKSUM_H
#define _VAX_CHECKSUM_H
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+
+
#define checksum_Asm __asm __volatile
#define checksum_ADDL checksum_Asm("addl2 (%2)+,%0" : "=r" (sum) : "0" (sum), "r" (w))
#define checksum_ADWC checksum_Asm("adwc (%2)+,%0" : "=r" (sum) : "0" (sum), "r" (w))
@@ -11,6 +25,8 @@
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
+
+
static inline unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
{
volatile register unsigned char *w;
@@ -20,28 +36,36 @@
len=ihl;
w=iph;
+
for (i=0; i<ihl; i++)
{
checksum_ADDL;
checksum_ADDC;
/* This printk is required for some reason to make w get updated
- for the next loop iteration.... no idea why.. tried a couple
- of things like adding a varible but they seem to get optimsed
- out ... - D.A. 3 Dec 2000 */
+ * for the next loop iteration.... no idea why.. tried a couple
+ * of things like adding a varible but they seem to get optimsed
+ * out ... - D.A. 3 Dec 2000
+ */
printk("w: %8X, sum: %8X, i: %i\n", *(unsigned int *)w, sum, i);
}
- __asm__("
- addl2 %1, %0
- adwc $0xffff, %0
- " : "=r" (sum)
- : "r" (sum<<16), "0" (sum & 0xffff0000));
+
+
+
+ __asm__("addl2 %1, %0\n"
+ "adwc $0xffff, %0\n" : "=r" (sum)
+ : "r" (sum<<16), "0" (sum & 0xffff0000));
return (~sum)>>16;
+
+
}
+
+
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
+
extern unsigned short int csum_tcpudp_magic(unsigned long saddr,
unsigned long daddr,
unsigned short len,
@@ -52,6 +76,7 @@
unsigned short len, unsigned short proto,
unsigned int sum);
+
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
@@ -67,49 +92,79 @@
extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
/*
- * the same as csum_partial, but copies from src while it
- * checksums
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
+ * csum_partial as an inline function
*/
-unsigned int csum_partial_copy(const char *src, char *dst, int len, unsigned int sum);
+extern inline unsigned int
+csum_partial_inline(const unsigned char * buff, int len, unsigned int sum)
+{
+ __asm__ __volatile__ ("nop");
+ return sum;
+}
/*
- * the same as csum_partial, but copies from user space (but on the alpha
- * we have just one address space, so this is identical to the above)
+ * the same as csum_partial, but copies from src while it
+ * checksums
*
- * this is obsolete and will go away.
+ * here even more important to align src and dst on a 32-bit
+ * (or even better 64-bit) boundary
*/
-#define csum_partial_copy_fromuser csum_partial_copy
+
+extern inline unsigned int
+csum_partial_copy(const char *src, char *dst, int len,unsigned int sum)
+{
+ memcpy(dst,src,len);
+ return csum_partial_inline(dst, len, sum);
+}
/*
- * this is a new version of the above that records errors it finds in *errp,
- * but continues and zeros the rest of the buffer.
+ * the same as csum_partial_copy, but copies from user space.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
*/
-unsigned int csum_partial_copy_from_user(const char *src, char *dst, int len, unsigned int sum, int *errp);
-
-extern __inline__ unsigned int csum_partial_copy_nocheck(const char *src, char *dst, int len, unsigned int sum);
+extern inline unsigned int
+csum_partial_copy_from_user(const char *src, char *dst,
+ int len, unsigned int sum, int *errp)
+{
+ if (copy_from_user(dst, src, len)) {
+ *errp = -EFAULT;
+ memset(dst, 0, len);
+ return sum;
+ }
+ return csum_partial(dst, len, sum);
+}
-/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
+extern inline unsigned int
+csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum)
+{
+ memcpy(dst,src,len);
+ return csum_partial_inline(dst, len, sum);
+}
-extern unsigned short ip_compute_csum(unsigned char * buff, int len);
/*
* Fold a partial checksum without adding pseudo headers
*/
-static inline unsigned short csum_fold(unsigned int sum)
+extern inline unsigned short csum_fold(unsigned int sum)
{
sum = (sum & 0xffff) + (sum >> 16);
sum = (sum & 0xffff) + (sum >> 16);
return ~sum;
}
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+
+extern inline unsigned short
+ip_compute_csum(unsigned char * buff, int len)
+{
+ return csum_fold(csum_partial(buff, len, 0));
+}
+
#define _HAVE_ARCH_IPV6_CSUM
extern unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
struct in6_addr *daddr,
@@ -117,4 +172,4 @@
unsigned short proto,
unsigned int sum);
-#endif
+#endif /* VAX_CHECKSUM_H */
|