You can subscribe to this list here.
2002 |
Jan
|
Feb
|
Mar
|
Apr
|
May
(11) |
Jun
(66) |
Jul
(16) |
Aug
(2) |
Sep
(7) |
Oct
(17) |
Nov
(1) |
Dec
(220) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2003 |
Jan
(154) |
Feb
(167) |
Mar
(159) |
Apr
(172) |
May
(35) |
Jun
(58) |
Jul
(97) |
Aug
(285) |
Sep
(139) |
Oct
(252) |
Nov
(8) |
Dec
(3) |
2004 |
Jan
(13) |
Feb
(159) |
Mar
(136) |
Apr
(33) |
May
(50) |
Jun
(42) |
Jul
(140) |
Aug
(42) |
Sep
(199) |
Oct
(31) |
Nov
(55) |
Dec
|
2005 |
Jan
|
Feb
(12) |
Mar
(214) |
Apr
(119) |
May
(21) |
Jun
(2) |
Jul
(127) |
Aug
(10) |
Sep
(3) |
Oct
(24) |
Nov
(1) |
Dec
|
2006 |
Jan
|
Feb
|
Mar
|
Apr
(45) |
May
(13) |
Jun
|
Jul
|
Aug
|
Sep
|
Oct
(5) |
Nov
(26) |
Dec
|
2007 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
(35) |
Sep
(1) |
Oct
|
Nov
|
Dec
|
From: Jan-Benedict G. <jb...@us...> - 2005-05-21 10:00:31
|
Update of /cvsroot/linux-vax/kernel-2.5/arch/vax/kernel In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv19892 Modified Files: cpu_generic.c Log Message: - Don't access device registers while they're not yet mapped. Index: cpu_generic.c =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/arch/vax/kernel/cpu_generic.c,v retrieving revision 1.14 retrieving revision 1.15 diff -u -d -r1.14 -r1.15 --- cpu_generic.c 25 Apr 2005 09:18:48 -0000 1.14 +++ cpu_generic.c 21 May 2005 10:00:11 -0000 1.15 @@ -118,11 +118,21 @@ console. Normally it is line 3 */ static unsigned int dz11_line; -/* stuff a char out of a DZ11-compatible serial chip */ +/* + * Stuff a char out of a DZ11-compatible serial chip + */ void dz11_putchar(int c) { u_int txcs, txdb, done; + /* + * During early startup, there might be a printk() call inside + * ioremap(), which will be executed while ioremap() hasn't + * finished, so the VM addr isn't yet set... + */ + if (!dz11_addr) + return; + txdb = txcs = done = 0; txdb = (c & DZ11_TDR_DATA_MASK); |
From: Kenn H. <ke...@us...> - 2005-05-15 14:32:46
|
Update of /cvsroot/linux-vax/kernel-2.5/arch/vax/boot In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv27503 Modified Files: cpu_sel.c head.S Log Message: At boot time, don't refer to $_stext in C code. GCC 2.95 generates a relative addressing mode, but GCC 4.1 uses absolute mode, which breaks the re-calculation of the machine vector address. Instead, we explicitly pass in the kernel load address from head.S. Index: head.S =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/arch/vax/boot/head.S,v retrieving revision 1.19 retrieving revision 1.20 diff -u -d -r1.19 -r1.20 --- head.S 23 Apr 2005 19:50:21 -0000 1.19 +++ head.S 15 May 2005 14:32:36 -0000 1.20 @@ -53,7 +53,8 @@ # movzbl $0x42,%r2 # jsb 0x20040058 - calls $0, idcpu # Identify CPU and put the mv ptr into mv + pushal start + calls $1, idcpu # Identify CPU and put the mv ptr into mv movl %r0, mv # now fix up the machine vector entries. (They currently contain Index: cpu_sel.c =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/arch/vax/boot/cpu_sel.c,v retrieving revision 1.16 retrieving revision 1.17 diff -u -d -r1.16 -r1.17 --- cpu_sel.c 25 Apr 2005 22:56:58 -0000 1.16 +++ cpu_sel.c 15 May 2005 14:32:36 -0000 1.17 @@ -14,16 +14,14 @@ * in the loaded kernel before we relocate (which depends on the * exact load address) */ -void * -s0vmaddr_to_load_addr(void *vaddr) +static void * +s0vmaddr_to_load_addr(void *vaddr, unsigned int kernel_load_addr) { - extern char _stext; - - return (char *) vaddr - PAGE_OFFSET - KERNEL_START_PHYS + (unsigned int) &_stext; + return (char *) vaddr - PAGE_OFFSET - KERNEL_START_PHYS + kernel_load_addr; } struct vax_mv * -idcpu (void) +idcpu (unsigned int kernel_load_addr) { extern struct cpu_match __init_cpumatch_start, __init_cpumatch_end; struct cpu_match *match = &__init_cpumatch_start; @@ -45,14 +43,14 @@ * entries with weaker/shorter masks */ if (!match[i].sidex_addr) - return s0vmaddr_to_load_addr(match[i].mv); + return s0vmaddr_to_load_addr(match[i].mv, kernel_load_addr); /* * If a SIDEX match was supplied, too, check it! */ sidex = * ((unsigned long *) match[i].sidex_addr); if ((sidex & match[i].sidex_mask) == match[i].sidex_match) { - retmv = s0vmaddr_to_load_addr(match[i].mv); + retmv = s0vmaddr_to_load_addr(match[i].mv, kernel_load_addr); retmv->sidex = sidex; return retmv; } |
From: Kenn H. <ke...@us...> - 2005-05-10 22:47:41
|
Update of /cvsroot/linux-vax/kernel-2.5/include/asm-vax In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv15434 Modified Files: linkage.h Log Message: Remove obsolete cruft Index: linkage.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/include/asm-vax/linkage.h,v retrieving revision 1.4 retrieving revision 1.5 diff -u -d -r1.4 -r1.5 --- linkage.h 9 May 2005 21:10:56 -0000 1.4 +++ linkage.h 10 May 2005 22:47:30 -0000 1.5 @@ -1,31 +1,7 @@ #ifndef _VAX_LINKAGE_H #define _VAX_LINKAGE_H -#ifdef __cplusplus -# define asmlinkage extern "C" -#else -# define asmlinkage -#endif - -/* FIXME: # is a comment sign under VAX assembly */ -#define SYMBOL_NAME_STR(X) #X -#define SYMBOL_NAME(X) X -#ifdef __STDC__ -# define SYMBOL_NAME_LABEL(X) X##: -#else -# define SYMBOL_NAME_LABEL(X) X/**/: -#endif - #define __ALIGN .balign 2 #define __ALIGN_STR ".balign 2" -#ifdef __ASSEMBLY__ -# define ALIGN __ALIGN -# define ALIGN_STR __ALIGN_STR -# define ENTRY(name) \ - .globl SYMBOL_NAME(name); \ - ALIGN; \ - SYMBOL_NAME_LABEL(name) -#endif /* __ASSEMBLY__ */ - #endif /* _VAX_LINKAGE_H */ |
From: Jan-Benedict G. <jb...@us...> - 2005-05-09 21:22:37
|
Update of /cvsroot/linux-vax/kernel-2.5/include/asm-vax In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv31466/include/asm-vax Modified Files: bitops.h Log Message: - Another touchup. And now some TV... Index: bitops.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/include/asm-vax/bitops.h,v retrieving revision 1.19 retrieving revision 1.20 diff -u -d -r1.19 -r1.20 --- bitops.h 28 Mar 2005 16:27:23 -0000 1.19 +++ bitops.h 9 May 2005 21:22:23 -0000 1.20 @@ -14,26 +14,27 @@ * was cleared before the operation and != 0 if it was not. * * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). - * + * * VAX port atp Jan 1999 * Updates for 2.4.3+ atp Mar 2002 */ -#define ADDR (*(volatile long *) addr) - +/* This will come handy to access given addresses */ +#define ADDR (* (volatile long *) addr) -/* I'd like to use interlocked variable length bitfield instructions - * here, but they only seem to exist in branch and {set/clear} flavours - * There appears to be no complement bit equivalent. - * We are ignoring SMP for the moment anyway +/* + * I'd like to use interlocked variable length bitfield instructions + * here, but they only seem to exist in branch and {set/clear} flavours. + * There appears to be no complement bit equivalent. + * We are ignoring SMP for the moment anyway... * atp Jan 99. - * - * we use bbss rather than bisl2 for the variable bit field - * if the bit is in a register, and nr>31 then we get a reserveed - * operand fault. Add an "i" to the instructions for interlocked - * operation (note: check subsetting rules?) - * - * Mar 2002 atp. Further reading of asm-i386/bitops.h reveals that the + * + * We use bbss rather than bisl2 for the variable bit field + * if the bit is in a register, and nr>31 then we get a reserveed + * operand fault. Add an "i" to the instructions for interlocked + * operation (note: check subsetting rules?) + * + * Mar 2002 atp. Further reading of asm-i386/bitops.h reveals that the * bit number is constrained by "Ir" to be an integer in the * range 0-31. So we could use bisl2 after all. */ @@ -48,14 +49,15 @@ * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static __inline__ void set_bit(int nr, volatile void * addr) +static __inline__ void set_bit(int nr, volatile void *addr) { __asm__ __volatile__( - "bbss %1, %0, 1f\n" - "1:\n" - :"=m" (ADDR) - :"ir" (nr)); + " bbss %1, %0, 1f \n" + "1: \n" + : "=m" (ADDR) + : "ir" (nr)); } + /** * __set_bit - Set a bit in memory * @nr: the bit to set @@ -65,13 +67,13 @@ * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static __inline__ void __set_bit(int nr, volatile void * addr) +static __inline__ void __set_bit(int nr, volatile void *addr) { __asm__( - "bbss %1,%0, 2f\n" - "2:\n" - :"=m" (ADDR) - :"ir" (nr)); + " bbss %1, %0, 2f \n" + "2: \n" + : "=m" (ADDR) + : "ir" (nr)); } /** @@ -84,13 +86,13 @@ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */ -static __inline__ void clear_bit(int nr, volatile void * addr) +static __inline__ void clear_bit(int nr, volatile void *addr) { __asm__ __volatile__( - "bbcc %1,%0, 1f\n" - "1:\n" - :"=m" (ADDR) - :"ir" (nr)); + " bbcc %1,%0, 1f \n" + "1: \n" + : "=m" (ADDR) + : "ir" (nr)); } /** @@ -102,13 +104,13 @@ * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static __inline__ void __clear_bit(int nr, volatile void * addr) +static __inline__ void __clear_bit(int nr, volatile void *addr) { __asm__( - "bbcc %1,%0, 2f\n" - "2:\n" - :"=m" (ADDR) - :"ir" (nr)); + " bbcc %1, %0, 2f \n" + "2: \n" + : "=m" (ADDR) + : "ir" (nr)); } #define smp_mb__before_clear_bit() smp_mb() @@ -123,14 +125,14 @@ * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static __inline__ void __change_bit(int nr, volatile void * addr) +static __inline__ void __change_bit(int nr, volatile void *addr) { __asm__ __volatile__( - "bbcs %1,%0,3f\n" - "bbsc %1,%0,3f\n" - "3:" - :"=m" (ADDR) - :"ir" (nr)); + " bbcs %1, %0, 3f \n" + " bbsc %1, %0, 3f \n" + "3: \n" + : "=m" (ADDR) + : "ir" (nr)); } /** @@ -142,14 +144,14 @@ * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static __inline__ void change_bit(int nr, volatile void * addr) +static __inline__ void change_bit(int nr, volatile void *addr) { __asm__ __volatile__( - "bbcs %1,%0,3f\n" - "bbsc %1,%0,3f\n" - "3:" - :"=m" (ADDR) - :"ir" (nr)); + " bbcs %1, %0, 3f \n" + " bbsc %1, %0, 3f \n" + "3: \n" + : "=m" (ADDR) + : "ir" (nr)); } /** @@ -157,21 +159,24 @@ * @nr: Bit to set * @addr: Address to count from * - * This operation is atomic and cannot be reordered. + * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static __inline__ int test_and_set_bit(int nr, volatile void * addr) +static __inline__ int test_and_set_bit(int nr, volatile void *addr) { int oldbit; -/* there are interlocked versions of bbss and bbcs we could use*/ + /* There are interlocked versions of bbss and bbcs we could use... */ __asm__ __volatile__( - "clrl %0\n" - "bbcs %2,%1, 1f\n" - "incl %0\n" - "1:\n" - :"=&r" (oldbit),"=m" (ADDR) - :"r" (nr) : "memory"); + " clrl %0 \n" + " bbcs %2, %1, 1f \n" + " incl %0 \n" + "1: \n" + : "=&r" (oldbit), + "=m" (ADDR) + : "r" (nr) + : "memory"); + return oldbit; } @@ -180,21 +185,23 @@ * @nr: Bit to set * @addr: Address to count from * - * This operation is non-atomic and can be reordered. + * This operation is non-atomic and can be reordered. * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static __inline__ int __test_and_set_bit(int nr, volatile void * addr) +static __inline__ int __test_and_set_bit(int nr, volatile void *addr) { int oldbit; __asm__( - "clrl %0\n" - "bbcs %2,%1, 1f\n" - "incl %0\n" - "1:\n" - :"=&r" (oldbit),"=m" (ADDR) - :"ir" (nr)); + " clrl %0 \n" + " bbcs %2, %1, 1f \n" + " incl %0 \n" + "1: \n" + : "=&r" (oldbit), + "=m" (ADDR) + : "ir" (nr)); + return oldbit; } @@ -203,20 +210,23 @@ * @nr: Bit to set * @addr: Address to count from * - * This operation is atomic and cannot be reordered. + * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static __inline__ int test_and_clear_bit(int nr, volatile void * addr) +static __inline__ int test_and_clear_bit(int nr, volatile void *addr) { int oldbit; __asm__ __volatile__( - "clrl %0\n" - "bbcc %2,%1, 1f\n" - "incl %0\n" - "1:\n" - :"=&r" (oldbit),"=m" (ADDR) - :"ir" (nr) : "memory"); + " clrl %0 \n" + " bbcc %2, %1, 1f \n" + " incl %0 \n" + "1: \n" + : "=&r" (oldbit), + "=m" (ADDR) + : "ir" (nr) + : "memory"); + return oldbit; } @@ -225,26 +235,28 @@ * @nr: Bit to set * @addr: Address to count from * - * This operation is non-atomic and can be reordered. + * This operation is non-atomic and can be reordered. * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) +static __inline__ int __test_and_clear_bit(int nr, volatile void *addr) { int oldbit; __asm__( - "clrl %0\n" - "bbcc %2,%1, 1f\n" - "incl %0\n" - "1:\n" - :"=&r" (oldbit),"=m" (ADDR) - :"ir" (nr)); + " clrl %0 \n" + " bbcc %2, %1, 1f \n" + " incl %0 \n" + "1: \n" + : "=&r" (oldbit), + "=m" (ADDR) + : "ir" (nr)); + return oldbit; } /* WARNING: non atomic and it can be reordered! */ -static __inline__ int __test_and_change_bit(int nr, volatile void * addr) +static __inline__ int __test_and_change_bit(int nr, volatile void *addr) { unsigned long mask = 1 << (nr & 0x1f); int *m = ((int *) addr) + (nr >> 5); @@ -259,21 +271,24 @@ * @nr: Bit to set * @addr: Address to count from * - * This operation is atomic and cannot be reordered. + * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static __inline__ int test_and_change_bit(int nr, volatile void * addr) +static __inline__ int test_and_change_bit(int nr, volatile void *addr) { int oldbit; __asm__ __volatile__( - "clrl %0\n" - "bbsc %2. %1,4f\n" - "incl %0\n" - "bbcs %2, %1,4f\n" - "4:\n" - :"=&r" (oldbit),"=m" (ADDR) - :"ir" (nr) : "memory"); + " clrl %0 \n" + " bbsc %2. %1, 4f \n" + " incl %0 \n" + " bbcs %2, %1,4f \n" + "4: \n" + : "=&r" (oldbit), + "=m" (ADDR) + : "ir" (nr) + : "memory"); + return oldbit; } @@ -289,50 +304,55 @@ /* * This routine doesn't need to be atomic. */ -static __inline__ int constant_test_bit(int nr, const volatile void * addr) +static __inline__ int constant_test_bit(int nr, const volatile void *addr) { return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; } -static __inline__ int variable_test_bit(int nr, const volatile void * addr) +static __inline__ int variable_test_bit(int nr, const volatile void *addr) { int oldbit; __asm__ __volatile__( - "clrl %0\n" - "bbc %2,%1,1f\n" - "incl %0\n" - "1:\n" - :"=&r" (oldbit) - :"m" (ADDR),"ir" (nr)); + " clrl %0 \n" + " bbc %2, %1, 1f \n" + " incl %0 \n" + "1: \n" + : "=&r" (oldbit) + : "m" (ADDR), + "ir" (nr)); + return oldbit; } -#define test_bit(nr,addr) \ -(__builtin_constant_p(nr) ? \ - constant_test_bit((nr),(addr)) : \ - variable_test_bit((nr),(addr))) - +#define test_bit(nr, addr) \ + (__builtin_constant_p(nr) \ + ? constant_test_bit((nr), (addr)) \ + : variable_test_bit((nr), (addr))) -/* +/* * FIXME: if we were keen we could do the ffs/ffz in a nice tight * assembly loop using ffc/ffs and a sliding 32bit field window. - * For now, we use the alpha/ppc port method, with 32bit chunks + * + * For now, we use the alpha/ppc port method, with 32bit chunks. */ + /** * ffz - find first zero in word. * @word: The word to search * * Undefined if no zero exists, so code should check against ~0UL first. * (VAX) - * We could check the Z condition code bit if we wanted to check against + * We could check the Z condition code bit if we wanted to check against * the ~OUL case, but this interface is designed for intel. Nuff sed. */ static __inline__ unsigned long ffz(unsigned long word) { - __asm__("ffc $0, $32, %1 , %0\n" - :"=rm" (word) - :"rm" (word)); + __asm__( + " ffc $0, $32, %1, %0 \n" + : "=rm" (word) + : "rm" (word)); + return word; } @@ -344,29 +364,27 @@ */ static __inline__ unsigned long __ffs(unsigned long word) { - __asm__("ffs $0, $32, %1, %0\n" - :"=rm" (word) - :"rm" (word)); + __asm__( + " ffs $0, $32, %1, %0 \n" + : "=rm" (word) + : "rm" (word)); + return word; } - /* * fls: find last bit set. */ - -#define fls(x) generic_fls(x) +#define fls(x) generic_fls(x) #ifdef __KERNEL__ - /* * Every architecture must define this function. It's the fastest * way of searching a 168-bit bitmap where the first 100 bits are * unlikely to be set. It's guaranteed that at least one of the 140 * bits is set. */ - static inline int sched_find_first_bit(unsigned long *b) { if (unlikely(b[0])) @@ -380,7 +398,6 @@ return __ffs(b[4]) + 128; } - /** * ffs - find first bit set * @x: the word to search @@ -393,23 +410,24 @@ { int r; - __asm__("ffs $0, $32, %1, %0\n" - "bnequ 1f\n" - "movl $-1,%0\n" - "1:" - : "=ir" (r) - : "mr" (x)); + __asm__( + " ffs $0, $32, %1, %0 \n" + " bnequ 1f \n" + " movl $-1, %0 \n" + "1: \n" + : "=ir" (r) + : "mr" (x)); + return r + 1; } -#endif - +#endif /* __KERNEL__ */ + /* * This implementation of find_{first,next}_zero_bit was stolen from * Linus' asm-alpha/bitops.h. */ - -extern __inline__ unsigned long -find_next_zero_bit(const void * addr, unsigned long size, unsigned long offset) +extern __inline__ unsigned long +find_next_zero_bit(const void *addr, unsigned long size, unsigned long offset) { unsigned int * p = ((unsigned int *) addr) + (offset >> 5); unsigned int result = offset & ~31UL; @@ -446,8 +464,7 @@ return result + ffz(tmp); } -#define find_first_zero_bit(addr, size) \ - find_next_zero_bit((addr), (size), 0) +#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) /* * This implementation of find_next_bit() is also stolen @@ -491,41 +508,34 @@ return result + __ffs(tmp); } -#define find_first_bit(addr, size) \ - find_next_bit((addr), (size), 0) - +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) #ifdef __KERNEL__ - /** * hweightN - returns the hamming weight of a N-bit word * @x: the word to weigh * * The Hamming Weight of a number is the total number of bits set in it. */ -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) - -#endif /* __KERNEL__ */ - -#ifdef __KERNEL__ - -#define ext2_set_bit __test_and_set_bit -#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) -#define ext2_clear_bit __test_and_clear_bit -#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) -#define ext2_test_bit test_bit -#define ext2_find_first_zero_bit find_first_zero_bit -#define ext2_find_next_zero_bit find_next_zero_bit +#define hweight32(x) generic_hweight32(x) +#define hweight16(x) generic_hweight16(x) +#define hweight8(x) generic_hweight8(x) -/* Bitmap functions for the minix filesystem. */ -#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr) -#define minix_set_bit(nr,addr) __set_bit(nr,addr) -#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr) -#define minix_test_bit(nr,addr) test_bit(nr,addr) -#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) +/* Bitmap functions for the ext2/ext3 filesystems. */ +#define ext2_set_bit __test_and_set_bit +#define ext2_set_bit_atomic(l,n,a) test_and_set_bit((n), (a)) +#define ext2_clear_bit __test_and_clear_bit +#define ext2_clear_bit_atomic(l, n, a) test_and_clear_bit((n), (a)) +#define ext2_test_bit test_bit +#define ext2_find_first_zero_bit find_first_zero_bit +#define ext2_find_next_zero_bit find_next_zero_bit +/* Bitmap functions for the Minix filesystem. */ +#define minix_test_and_set_bit(nr, addr) __test_and_set_bit((nr), (addr)) +#define minix_set_bit(nr, addr) __set_bit((nr), (addr)) +#define minix_test_and_clear_bit(nr, addr) __test_and_clear_bit((nr), (addr)) +#define minix_test_bit(nr, addr) test_bit((nr), (addr)) +#define minix_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size)) #endif /* __KERNEL__ */ #endif /* _VAX_BITOPS_H */ |
From: Jan-Benedict G. <jb...@us...> - 2005-05-09 21:11:06
|
Update of /cvsroot/linux-vax/kernel-2.5/include/asm-vax In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv27379/include/asm-vax Modified Files: linkage.h Log Message: - Throw out old cruft for other architectures. - Indent the #defines a bit so it's easier to read. Index: linkage.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/include/asm-vax/linkage.h,v retrieving revision 1.3 retrieving revision 1.4 diff -u -d -r1.3 -r1.4 --- linkage.h 20 May 2002 00:33:39 -0000 1.3 +++ linkage.h 9 May 2005 21:10:56 -0000 1.4 @@ -1,57 +1,31 @@ -#ifndef _LINUX_LINKAGE_H -#define _LINUX_LINKAGE_H - -/* what is this doing in include/linux ? - * This is machine specific */ +#ifndef _VAX_LINKAGE_H +#define _VAX_LINKAGE_H #ifdef __cplusplus -#define asmlinkage extern "C" +# define asmlinkage extern "C" #else -#define asmlinkage +# define asmlinkage #endif /* FIXME: # is a comment sign under VAX assembly */ -#define SYMBOL_NAME_STR(X) #X -#define SYMBOL_NAME(X) X +#define SYMBOL_NAME_STR(X) #X +#define SYMBOL_NAME(X) X #ifdef __STDC__ -#define SYMBOL_NAME_LABEL(X) X##: +# define SYMBOL_NAME_LABEL(X) X##: #else -#define SYMBOL_NAME_LABEL(X) X/**/: +# define SYMBOL_NAME_LABEL(X) X/**/: #endif -/* yuk. -#ifdef __arm__ -#define __ALIGN .align 0 -#define __ALIGN_STR ".align 0" -#else -#ifdef __mc68000__ -#define __ALIGN .align 4 -#define __ALIGN_STR ".align 4" -#else -#if !defined(__i486__) && !defined(__i586__) -#define __ALIGN .align 4,0x90 -#define __ALIGN_STR ".align 4,0x90"*/ -/*#else *//* __i486__/__i586__ */ -/*#define __ALIGN .align 16,0x90 -#define __ALIGN_STR ".align 16,0x90" -#endif *//* __i486__/__i586__ */ -/*#endif *//* __mc68000__ */ -/*#endif *//* __arm__ */ - - #define __ALIGN .balign 2 #define __ALIGN_STR ".balign 2" #ifdef __ASSEMBLY__ +# define ALIGN __ALIGN +# define ALIGN_STR __ALIGN_STR +# define ENTRY(name) \ + .globl SYMBOL_NAME(name); \ + ALIGN; \ + SYMBOL_NAME_LABEL(name) +#endif /* __ASSEMBLY__ */ -#define ALIGN __ALIGN -#define ALIGN_STR __ALIGN_STR - -#define ENTRY(name) \ - .globl SYMBOL_NAME(name); \ - ALIGN; \ - SYMBOL_NAME_LABEL(name) - -#endif - -#endif +#endif /* _VAX_LINKAGE_H */ |
From: Kenn H. <ke...@us...> - 2005-05-09 20:37:28
|
Update of /cvsroot/linux-vax/kernel-2.5/arch/vax/mm In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv18279/mm Modified Files: init.c ioremap.c Log Message: Use Z format modifier for size_t printk to silence warnings with both GCC 2.95 and GCC 4.1 Index: init.c =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/arch/vax/mm/init.c,v retrieving revision 1.19 retrieving revision 1.20 diff -u -d -r1.19 -r1.20 --- init.c 9 May 2005 20:11:06 -0000 1.19 +++ init.c 9 May 2005 20:37:20 -0000 1.20 @@ -152,7 +152,7 @@ free_reserved_mem(&__init_begin, &__init_end); - printk("Freeing unused kernel memory: %ldk freed\n", + printk("Freeing unused kernel memory: %Zdk freed\n", (&__init_end - &__init_begin) >> 10); } Index: ioremap.c =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/arch/vax/mm/ioremap.c,v retrieving revision 1.10 retrieving revision 1.11 diff -u -d -r1.10 -r1.11 --- ioremap.c 9 May 2005 20:07:45 -0000 1.10 +++ ioremap.c 9 May 2005 20:37:20 -0000 1.11 @@ -117,7 +117,7 @@ __flush_tlb_one(virt_start + (i<<PAGE_SHIFT)); } - printk("IO mapped phys addr 0x%08lx, 0x%04x pages at virt 0x%08lx (IOMAP PTE index 0x%04lx)\n", + printk("IO mapped phys addr 0x%08lx, 0x%04x pages at virt 0x%08lx (IOMAP PTE index 0x%04Zx)\n", phys_start, num_ptes, (unsigned long) virt_start, start_pte - iomap_base); return virt_start + offset; @@ -152,7 +152,7 @@ iomap_sizes[p - iomap_base] = 0; - printk("IO unmapping 0x%04x pages at PTE index 0x%04lx\n", + printk("IO unmapping 0x%04x pages at PTE index 0x%04Zx\n", num_ptes, p - iomap_base); while (num_ptes--) { |
From: Jan-Benedict G. <jb...@us...> - 2005-05-09 20:34:36
|
Update of /cvsroot/linux-vax/kernel-2.5/arch/vax/kernel In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv17344/arch/vax/kernel Modified Files: process.c Log Message: - kernel_therad_exit() is correctly attributed to never return, but the called __chmk(SYS_exit) doesn't look non-exiting enough to GCC. Silence warning by entering a tight loop. Index: process.c =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/arch/vax/kernel/process.c,v retrieving revision 1.28 retrieving revision 1.29 diff -u -d -r1.28 -r1.29 --- process.c 25 Apr 2005 22:25:05 -0000 1.28 +++ process.c 9 May 2005 20:34:28 -0000 1.29 @@ -177,6 +177,8 @@ static ATTRIB_NORET void kernel_thread_exit(int exitcode) { __chmk(__NR_exit); + while (1) + /* Keep GCC happy */; } /* |
From: Jan-Benedict G. <jb...@us...> - 2005-05-09 20:30:04
|
Update of /cvsroot/linux-vax/kernel-2.5/include/asm-vax In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv15825/include/asm-vax Modified Files: uaccess.h Log Message: - Real touchup. Index: uaccess.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/include/asm-vax/uaccess.h,v retrieving revision 1.8 retrieving revision 1.9 diff -u -d -r1.8 -r1.9 --- uaccess.h 25 Apr 2005 22:25:07 -0000 1.8 +++ uaccess.h 9 May 2005 20:29:55 -0000 1.9 @@ -30,10 +30,10 @@ #define segment_eq(a,b) ((a).seg == (b).seg) -#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) -#define __user_ok(addr,size) (((size) <= 0x80000000)&&((addr) <= 0x80000000-(size))) -#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) -#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) +#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) +#define __user_ok(addr, size) (((size) <= 0x80000000) && ((addr) <= 0x80000000-(size))) +#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) +#define access_ok(type, addr, size) __access_ok((unsigned long) (addr), (size)) extern inline int __deprecated verify_area(int type, const void * addr, unsigned long size) { @@ -74,72 +74,71 @@ * with a separate "access_ok()" call (this is used when we do multiple * accesses to the same area of user memory). * - * FIXME: Lets try this.. + * FIXME: Lets try this.. * As we use the same address space for kernel and user data on the * PowerPC, we can just do these as direct assignments. (Of course, the * exception handling means that it's no longer "just"...) */ -#define get_user(x,ptr) \ - __get_user_check((x),(ptr),sizeof(*(ptr))) -#define put_user(x,ptr) \ - __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) +#define get_user(x, ptr) \ + __get_user_check((x), (ptr), sizeof (*(ptr))) +#define put_user(x, ptr) \ + __put_user_check((__typeof__ (*(ptr))) (x),(ptr), sizeof (*(ptr))) -#define __get_user(x,ptr) \ - __get_user_nocheck((x),(ptr),sizeof(*(ptr))) -#define __put_user(x,ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) +#define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__ (*(ptr))) (x), (ptr), sizeof (*(ptr))) /* * The "xxx_ret" versions return constant specified in third argument, if * something bad happens. These macros can be optimized for the * case of just returning from the function xxx_ret is used. */ +#define put_user_ret(x, ptr, ret) ({ \ + if (put_user((x), (ptr))) return ret; }) -#define put_user_ret(x,ptr,ret) ({ \ -if (put_user(x,ptr)) return ret; }) - -#define get_user_ret(x,ptr,ret) ({ \ -if (get_user(x,ptr)) return ret; }) +#define get_user_ret(x, ptr, ret) ({ \ + if (get_user((x), (ptr))) return ret; }) -#define __put_user_ret(x,ptr,ret) ({ \ -if (__put_user(x,ptr)) return ret; }) +#define __put_user_ret(x, ptr, ret) ({ \ + if (__put_user((x), (ptr))) return ret; }) -#define __get_user_ret(x,ptr,ret) ({ \ -if (__get_user(x,ptr)) return ret; }) +#define __get_user_ret(x, ptr, ret) ({ \ + if (__get_user((x), (ptr))) return ret; }) extern long __put_user_bad(void); -#define __put_user_nocheck(x,ptr,size) \ +#define __put_user_nocheck(x, ptr, size) \ ({ \ long __pu_err; \ - __put_user_size((x),(ptr),(size),__pu_err); \ + __put_user_size((x), (ptr), (size), __pu_err); \ __pu_err; \ }) -#define __put_user_check(x,ptr,size) \ -({ \ - long __pu_err = -EFAULT; \ - __typeof__(*(ptr)) *__pu_addr = (ptr); \ - if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ - __put_user_size((x),__pu_addr,(size),__pu_err); \ - __pu_err; \ +#define __put_user_check(x, ptr, size) \ +({ \ + long __pu_err = -EFAULT; \ + __typeof__ (*(ptr)) *__pu_addr = (ptr); \ + if (access_ok(VERIFY_WRITE, __pu_addr, (size))) \ + __put_user_size((x), __pu_addr, (size), __pu_err); \ + __pu_err; \ }) -#define __put_user_size(x,ptr,size,retval) \ -do { \ - retval = 0; \ - switch (size) { \ - case 1: __put_user_asm(x,ptr,retval,"movb"); break; \ - case 2: __put_user_asm(x,ptr,retval,"movw"); break; \ - case 4: __put_user_asm(x,ptr,retval,"movl"); break; \ - case 8: __put_user_asm(x,ptr,retval,"movq"); break; \ - default: __put_user_bad(); \ - } \ +#define __put_user_size(x, ptr, size, retval) \ +do { \ + retval = 0; \ + switch (size) { \ + case 1: __put_user_asm((x), (ptr), (retval), "movb"); break; \ + case 2: __put_user_asm((x), (ptr), (retval), "movw"); break; \ + case 4: __put_user_asm((x), (ptr), (retval), "movl"); break; \ + case 8: __put_user_asm((x), (ptr), (retval), "movq"); break; \ + default: __put_user_bad(); \ + } \ } while (0) struct __large_struct { unsigned long buf[100]; }; -#define __m(x) (*(struct __large_struct *)(x)) +#define __m(x) (* (struct __large_struct *) (x)) /* * We don't tell gcc that we are accessing memory, but this is OK @@ -148,68 +147,74 @@ */ #define __put_user_asm(x, addr, err, op) \ __asm__ __volatile__( \ - "1: "op" %1,%2\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: movl %3,%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 2\n" \ - " .long 1b,3b\n" \ - ".previous" \ - : "=r"(err) \ - : "r"(x), "m"(*addr), "i"(-EFAULT), "0"(err)) + "1: "op" %1, %2 \n" \ + "2: \n" \ + ".section .fixup,\"ax\" \n" \ + "3: movl %3,%0 \n" \ + " jmp 2b \n" \ + ".previous \n" \ + ".section __ex_table,\"a\" \n" \ + " .align 2 \n" \ + " .long 1b,3b \n" \ + ".previous" \ + : "=r" (err) \ + : "r" (x), \ + "m" (*addr), \ + "i" (-EFAULT), \ + "0" (err)) -#define __get_user_nocheck(x,ptr,size) \ +#define __get_user_nocheck(x, ptr, size) \ ({ \ long __gu_err, __gu_val; \ - __get_user_size(__gu_val,(ptr),(size),__gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + __get_user_size(__gu_val, (ptr), (size), __gu_err); \ + (x) = (__typeof__(*(ptr))) __gu_val; \ __gu_err; \ }) -#define __get_user_check(x,ptr,size) \ +#define __get_user_check(x, ptr, size) \ ({ \ long __gu_err = -EFAULT, __gu_val = 0; \ const __typeof__(*(ptr)) *__gu_addr = (ptr); \ - if (access_ok(VERIFY_READ,__gu_addr,size)) \ - __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + if (access_ok(VERIFY_READ, __gu_addr, (size))) \ + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + (x) = (__typeof__(*(ptr))) __gu_val; \ __gu_err; \ }) extern long __get_user_bad(void); -#define __get_user_size(x,ptr,size,retval) \ -do { \ - retval = 0; \ - switch (size) { \ - case 1: __get_user_asm(x,ptr,retval,"movb"); break; \ - case 2: __get_user_asm(x,ptr,retval,"movw"); break; \ - case 4: __get_user_asm(x,ptr,retval,"movl"); break; \ - case 8: __get_user_asm(x,ptr,retval,"movq"); break; \ - default: (x) = __get_user_bad(); \ - } \ +#define __get_user_size(x, ptr, size, retval) \ +do { \ + retval = 0; \ + switch (size) { \ + case 1: __get_user_asm((x), (ptr), (retval), "movb"); break; \ + case 2: __get_user_asm((x), (ptr), (retval), "movw"); break; \ + case 4: __get_user_asm((x), (ptr), (retval), "movl"); break; \ + case 8: __get_user_asm((x), (ptr), (retval), "movq"); break; \ + default: (x) = __get_user_bad(); \ + } \ } while (0) -#define __get_user_asm(x, addr, err, op) \ - __asm__ __volatile__( \ - "1: "op" %2,%1\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: movl %3, %0\n" \ - " clrl %1\n" \ - " jmp 2b\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 2\n" \ - " .long 1b,3b\n" \ - ".text" \ - : "=r"(err), "=r"(x) \ - : "m"(*addr), "i"(-EFAULT), "0"(err)) +#define __get_user_asm(x, addr, err, op) \ + __asm__ __volatile__( \ + "1: "op" %2, %1 \n" \ + "2: \n" \ + ".section .fixup,\"ax\" \n" \ + "3: movl %3, %0 \n" \ + " clrl %1 \n" \ + " jmp 2b \n" \ + ".section __ex_table,\"a\" \n" \ + " .align 2 \n" \ + ".long 1b, 3b \n" \ + ".text \n" \ + : "=r" (err), \ + "=r" (x) \ + : "m" (*addr), \ + "i" (-EFAULT), \ + "0" (err)) -/* more complex routines */ +/* More complex routines */ extern int __copy_tofrom_user(void *to, const void *from, unsigned long size); @@ -265,8 +270,10 @@ */ #define strlen_user(s) strnlen_user(s, ~0UL >> 1) -/* Returns: 0 if exception before NUL or reaching the supplied limit (N), - * a value greater than N if the limit would be exceeded, else strlen. */ +/* + * Returns: 0 if exception before NUL or reaching the supplied limit (N), + * a value greater than N if the limit would be exceeded, else strlen. + */ extern long __strnlen_user(const char *, long); extern inline long strnlen_user(const char *str, long n) { |
From: Jan-Benedict G. <jb...@us...> - 2005-05-09 20:11:14
|
Update of /cvsroot/linux-vax/kernel-2.5/arch/vax/mm In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv10534/arch/vax/mm Modified Files: init.c Log Message: - Change format to match argument. gcc-4.1 related. Index: init.c =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/arch/vax/mm/init.c,v retrieving revision 1.18 retrieving revision 1.19 diff -u -d -r1.18 -r1.19 --- init.c 25 Apr 2005 22:25:05 -0000 1.18 +++ init.c 9 May 2005 20:11:06 -0000 1.19 @@ -152,7 +152,7 @@ free_reserved_mem(&__init_begin, &__init_end); - printk("Freeing unused kernel memory: %dk freed\n", + printk("Freeing unused kernel memory: %ldk freed\n", (&__init_end - &__init_begin) >> 10); } |
From: Jan-Benedict G. <jb...@us...> - 2005-05-09 20:10:03
|
Update of /cvsroot/linux-vax/kernel-2.5/arch/vax/kernel In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv10144/arch/vax/kernel Modified Files: time.c Log Message: - Silence "control reaches end of non-void function" warning (gcc-4.1 related). Index: time.c =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/arch/vax/kernel/time.c,v retrieving revision 1.21 retrieving revision 1.22 diff -u -d -r1.21 -r1.22 --- time.c 25 Apr 2005 22:25:05 -0000 1.21 +++ time.c 9 May 2005 20:09:53 -0000 1.22 @@ -282,6 +282,8 @@ time_maxerror = NTP_PHASE_LIMIT; time_esterror = NTP_PHASE_LIMIT; write_sequnlock_irq(&xtime_lock); + + return 0; } /* nicked from the i386 port, but we use the same chip, hee hee */ |
From: Jan-Benedict G. <jb...@us...> - 2005-05-09 20:07:54
|
Update of /cvsroot/linux-vax/kernel-2.5/arch/vax/mm In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv9469/arch/vax/mm Modified Files: ioremap.c Log Message: - Change format argument to match the supplied argument. This silences two gcc-4.1 warnings. Index: ioremap.c =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/arch/vax/mm/ioremap.c,v retrieving revision 1.9 retrieving revision 1.10 diff -u -d -r1.9 -r1.10 --- ioremap.c 25 Apr 2005 22:25:05 -0000 1.9 +++ ioremap.c 9 May 2005 20:07:45 -0000 1.10 @@ -117,7 +117,7 @@ __flush_tlb_one(virt_start + (i<<PAGE_SHIFT)); } - printk("IO mapped phys addr 0x%08lx, 0x%04x pages at virt 0x%08lx (IOMAP PTE index 0x%04x)\n", + printk("IO mapped phys addr 0x%08lx, 0x%04x pages at virt 0x%08lx (IOMAP PTE index 0x%04lx)\n", phys_start, num_ptes, (unsigned long) virt_start, start_pte - iomap_base); return virt_start + offset; @@ -152,7 +152,7 @@ iomap_sizes[p - iomap_base] = 0; - printk("IO unmapping 0x%04x pages at PTE index 0x%04x\n", + printk("IO unmapping 0x%04x pages at PTE index 0x%04lx\n", num_ptes, p - iomap_base); while (num_ptes--) { |
From: Kenn H. <ke...@us...> - 2005-05-08 22:56:22
|
Update of /cvsroot/linux-vax/kernel-2.5/drivers/vax/serial In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv10539/drivers/vax/serial Modified Files: ipr.c Log Message: Fix a static-follows-non-static error with GCC 4.1 Index: ipr.c =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/drivers/vax/serial/ipr.c,v retrieving revision 1.10 retrieving revision 1.11 diff -u -d -r1.10 -r1.11 --- ipr.c 25 Apr 2005 22:25:07 -0000 1.10 +++ ipr.c 8 May 2005 22:56:13 -0000 1.11 @@ -353,6 +353,15 @@ .type = PORT_VAX_IPR, }; +static struct uart_driver iprcons_uart_driver = { + .owner = THIS_MODULE, + .driver_name = "ttyS", + .dev_name = "ttyS", + .major = SERIAL_VAX_IPR_MAJOR, + .minor = SERIAL_VAX_IPR_MINOR, + .nr = 1, +}; + #ifdef CONFIG_SERIAL_CONSOLE static void iprcons_console_write(struct console *co, const char *p, unsigned int count) @@ -404,13 +413,6 @@ __mtpr(old_inten_tx, PR_TXCS); } -/* - * This is really just a forward declaration for the static struct - * uart_driver defined below - */ - -extern struct uart_driver iprcons_uart_driver; - static struct console iprcons_console = { .name = "ttyS", .write = iprcons_console_write, @@ -421,29 +423,14 @@ static int __init iprcons_console_init(void) { + iprcons_uart_driver.cons = &iprcons_console; register_console(&iprcons_console); return 0; } console_initcall(iprcons_console_init); -#define VAX_IPR_CONSOLE &iprcons_console - -#else /* CONFIG_SERIAL_CONSOLE */ - -#define VAX_IPR_CONSOLE NULL - #endif /* CONFIG_SERIAL_CONSOLE */ -static struct uart_driver iprcons_uart_driver = { - .owner = THIS_MODULE, - .driver_name = "ttyS", - .dev_name = "ttyS", - .major = SERIAL_VAX_IPR_MAJOR, - .minor = SERIAL_VAX_IPR_MINOR, - .nr = 1, - .cons = VAX_IPR_CONSOLE, -}; - static void __exit iprcons_exit(void) { /* |
From: Kenn H. <ke...@us...> - 2005-05-04 22:23:01
|
Update of /cvsroot/linux-vax/kernel-2.5/include/asm-vax In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv5834/include/asm-vax Modified Files: mv.h Log Message: Oops - checked wrong CONFIG_CPU option in the is_ka650() helper Index: mv.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/include/asm-vax/mv.h,v retrieving revision 1.32 retrieving revision 1.33 diff -u -d -r1.32 -r1.33 --- mv.h 26 Apr 2005 23:07:54 -0000 1.32 +++ mv.h 4 May 2005 22:22:50 -0000 1.33 @@ -163,7 +163,7 @@ } static inline int is_ka650(void) { -#ifdef CONFIG_CPU_KA49 +#ifdef CONFIG_CPU_KA650 return mv == &mv_ka650; #else return 0; |
From: Jan-Benedict G. <jb...@us...> - 2005-04-27 21:54:43
|
Update of /cvsroot/linux-vax/kernel-2.5/include/asm-vax In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv21244 Removed Files: suspend.h Log Message: - A non-existing file will case an error just like an #error line... --- suspend.h DELETED --- |
From: Jan-Benedict G. <jb...@us...> - 2005-04-27 20:48:45
|
Update of /cvsroot/linux-vax/kernel-2.5/include/asm-vax In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv16373 Removed Files: init.h Log Message: - Not referenced anywhere, so killing die dinosaur... --- init.h DELETED --- |
From: Jan-Benedict G. <jb...@us...> - 2005-04-27 06:52:34
|
Update of /cvsroot/linux-vax/kernel-2.5/arch/vax/kernel In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv28897/arch/vax/kernel Modified Files: cpu_ka49.c Log Message: - Change comment. Index: cpu_ka49.c =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/arch/vax/kernel/cpu_ka49.c,v retrieving revision 1.8 retrieving revision 1.9 diff -u -d -r1.8 -r1.9 --- cpu_ka49.c 25 Apr 2005 12:55:03 -0000 1.8 +++ cpu_ka49.c 27 Apr 2005 06:51:53 -0000 1.9 @@ -91,7 +91,7 @@ __mtpr (0, start); /* - * Flush Cache + * Flush Instruction Cache */ flush_icache (); |
From: Jan-Benedict G. <jb...@us...> - 2005-04-26 23:09:20
|
Update of /cvsroot/linux-vax/kernel-2.5/include/asm-vax In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv14514/include/asm-vax Modified Files: atomic.h Log Message: - Remove pointless braces. Index: atomic.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/include/asm-vax/atomic.h,v retrieving revision 1.4 retrieving revision 1.5 diff -u -d -r1.4 -r1.5 --- atomic.h 26 Apr 2005 22:53:31 -0000 1.4 +++ atomic.h 26 Apr 2005 23:09:11 -0000 1.5 @@ -143,7 +143,7 @@ : "m" (v->counter) : "memory"); - return (c == 0); + return c == 0; } /** @@ -167,7 +167,7 @@ : "m" (v->counter) : "memory"); - return (c == 0); + return c == 0; } /** |
From: Jan-Benedict G. <jb...@us...> - 2005-04-26 23:08:04
|
Update of /cvsroot/linux-vax/kernel-2.5/include/asm-vax In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv13878/include/asm-vax Modified Files: mv.h Log Message: - Whitespace only. Index: mv.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/include/asm-vax/mv.h,v retrieving revision 1.31 retrieving revision 1.32 diff -u -d -r1.31 -r1.32 --- mv.h 26 Apr 2005 22:40:21 -0000 1.31 +++ mv.h 26 Apr 2005 23:07:54 -0000 1.32 @@ -1,5 +1,5 @@ -#ifndef __VAX_MV_H_ -#define __VAX_MV_H_ +#ifndef _VAX_MV_H +#define _VAX_MV_H /* * Define format of machine vector. This structure abstracts out @@ -12,7 +12,7 @@ * * Optional fields can be NULL. A NULL optional field will either * be a no-op, or will lead to sensible default behaviour. - * + * * If you add or remove elements, please don't forget to modify * ./arch/vax/kernel/asm-offsets.c! */ @@ -72,7 +72,7 @@ extern struct vax_mv mv_vxt; /* - * This defines a match for a CPU and a macro to place + * This defines a match for a CPU and a macro to place * such a match structure into the right section at link time */ struct cpu_match { @@ -180,4 +180,4 @@ #endif /* !__ASSEMBLY__ */ -#endif /* __VAX_MV_H_ */ +#endif /* _VAX_MV_H */ |
From: Jan-Benedict G. <jb...@us...> - 2005-04-26 22:53:44
|
Update of /cvsroot/linux-vax/kernel-2.5/include/asm-vax In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv6417 Modified Files: a.out.h atomic.h Log Message: - Ready for upstream merge:) Index: a.out.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/include/asm-vax/a.out.h,v retrieving revision 1.3 retrieving revision 1.4 diff -u -d -r1.3 -r1.4 --- a.out.h 20 May 2002 00:33:38 -0000 1.3 +++ a.out.h 26 Apr 2005 22:53:31 -0000 1.4 @@ -1,30 +1,35 @@ -#ifndef _VAX_A_OUT_H_ -#define _VAX_A_OUT_H_ +#ifndef _VAX_A_OUT_H +#define _VAX_A_OUT_H -/* from the i386 port */ -#define STACK_TOP TASK_SIZE /* 0x7fffffff */ /*was TASK_SIZE and is TASK_SIZE once again .. - D.A. June 2001.. otherwise argc stuff breaks */ +/* Stolen from the i386 port */ -/* this is needed to override a conflicting definition in ../linux/a.out.h */ -#define page_size PAGE_SIZE -/* it should be : -#define SEGMENT_SIZE PAGE_SIZE -*/ -struct exec -{ - unsigned long a_info; /* Use macros N_MAGIC, etc for access */ - unsigned a_text; /* length of text, in bytes */ - unsigned a_data; /* length of data, in bytes */ - unsigned a_bss; /* length of uninitialized data area for file, in bytes */ - unsigned a_syms; /* length of symbol table data in file, in bytes */ - unsigned a_entry; /* start address */ - unsigned a_trsize; /* length of relocation info for text, in bytes */ - unsigned a_drsize; /* length of relocation info for data, in bytes */ -}; +/* + * This was TASK_SIZE and is TASK_SIZE once again. Don't play + * games with it, otherwise argc stuff breaks. + */ +#define STACK_TOP TASK_SIZE /* = 0x7fffffff */ + +/* + * This is needed to override a conflicting definition in ../linux/a.out.h + * It should really be: + * + * #define SEGMENT_SIZE PAGE_SIZE + */ +#define page_size PAGE_SIZE +struct exec { + unsigned long a_info; /* Use macros N_MAGIC, etc for access */ + unsigned a_text; /* length of text, in bytes */ + unsigned a_data; /* length of data, in bytes */ + unsigned a_bss; /* length of uninitialized data area for file, in bytes */ + unsigned a_syms; /* length of symbol table data in file, in bytes */ + unsigned a_entry; /* start address */ + unsigned a_trsize; /* length of relocation info for text, in bytes */ + unsigned a_drsize; /* length of relocation info for data, in bytes */ +}; #define N_TRSIZE(a) ((a).a_trsize) #define N_DRSIZE(a) ((a).a_drsize) #define N_SYMSIZE(a) ((a).a_syms) - -#endif /* _VAX_A_OUT_H_ */ +#endif /* _VAX_A_OUT_H */ Index: atomic.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/include/asm-vax/atomic.h,v retrieving revision 1.3 retrieving revision 1.4 diff -u -d -r1.3 -r1.4 --- atomic.h 20 May 2002 00:33:38 -0000 1.3 +++ atomic.h 26 Apr 2005 22:53:31 -0000 1.4 @@ -1,5 +1,5 @@ -#ifndef _ARCH_VAX_ATOMIC -#define _ARCH_VAX_ATOMIC +#ifndef _VAX_ATOMIC_H +#define _VAX_ATOMIC_H #include <linux/config.h> @@ -10,12 +10,16 @@ /* * Worry about SMP VAXes later. Much later... - * Still, should try and use interlocked instructions here. - * When we do, we'll have to make atomic_t a short int, since - * ADAWI only works on WORDs and that's the only interlocked - * arithmetic primitive we have + * + * Still, should try and use interlocked instructions here. + * When we do, we'll have to make atomic_t a short int, since + * ADAWI only works on WORDs and that's the only interlocked + * arithmetic primitive we have. */ +#ifdef CONFIG_SMP +#error "SMP configuration aren't supported right now..." +#endif typedef struct { volatile int counter; } atomic_t; @@ -24,137 +28,145 @@ /** * atomic_read - read atomic variable * @v: pointer of type atomic_t - * + * * Atomically reads the value of @v. Note that the guaranteed * useful range of an atomic_t is only 24 bits. - */ + */ #define atomic_read(v) ((v)->counter) /** * atomic_set - set atomic variable * @v: pointer of type atomic_t * @i: required value - * + * * Atomically sets the value of @v to @i. Note that the guaranteed * useful range of an atomic_t is only 24 bits. - */ + */ #define atomic_set(v, i) (((v)->counter) = i) /** * atomic_add - add integer to atomic variable * @i: integer value to add * @v: pointer of type atomic_t - * + * * Atomically adds @i to @v. Note that the guaranteed useful range * of an atomic_t is only 24 bits -- Not sure thats true on the vax. */ static __inline__ void atomic_add(int i, atomic_t *v) { __asm__ __volatile__( - "addl2 %1,%0" - : "=m" (v->counter) - : "g" (i), "m" (v->counter)); + " addl2 %1, %0 \n" + : "=m" (v->counter) + : "g" (i), + "m" (v->counter)); } /** * atomic_sub - subtract the atomic variable * @i: integer value to subtract * @v: pointer of type atomic_t - * + * * Atomically subtracts @i from @v. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. Not sure thats + * useful range of an atomic_t is only 24 bits. Not sure thats * true for VAX. */ static __inline__ void atomic_sub(int i, atomic_t *v) { __asm__ __volatile__( - "subl2 %1,%0" - :"=m" (v->counter) - :"g" (i), "m" (v->counter)); + " subl2 %1, %0 \n" + : "=m" (v->counter) + : "g" (i), + "m" (v->counter)); } /** * atomic_sub_and_test - subtract value from variable and test result * @i: integer value to subtract * @v: pointer of type atomic_t - * + * * Atomically subtracts @i from @v and returns * true if the result is zero, or false for all * other cases. Note that the guaranteed * useful range of an atomic_t is only 24 bits. */ -#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) +#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) /** * atomic_inc - increment atomic variable * @v: pointer of type atomic_t - * + * * Atomically increments @v by 1. Note that the guaranteed * useful range of an atomic_t is only 24 bits. (i386) Not sure * thats true on a VAX. - */ + */ static __inline__ void atomic_inc(atomic_t *v) { __asm__ __volatile__( - "incl %0" - : "=m" (v->counter) - : "m" (v->counter)); + " incl %0 \n" + : "=m" (v->counter) + : "m" (v->counter)); } /** * atomic_dec - decrement atomic variable * @v: pointer of type atomic_t - * + * * Atomically decrements @v by 1. Note that the guaranteed * useful range of an atomic_t is only 24 bits. - */ - + */ static __inline__ void atomic_dec(volatile atomic_t *v) { __asm__ __volatile__( - "decl %0" - : "=m" (v->counter) - : "m" (v->counter)); + " decl %0 \n" + : "=m" (v->counter) + : "m" (v->counter)); } /** * atomic_dec_and_test - decrement and test * @v: pointer of type atomic_t - * + * * Atomically decrements @v by 1 and * returns true if the result is 0, or false for all other * cases. Note that the guaranteed * useful range of an atomic_t is only 24 bits. - */ + */ static __inline__ int atomic_dec_and_test(volatile atomic_t *v) { unsigned long c; - + __asm__ __volatile__( - "decl %0; movl %0, %1" - : "=m" (v->counter), "=g" (c) - : "m" (v->counter): "memory"); + " decl %0 \n" + " movl %0, %1 \n" + : "=m" (v->counter), + "=g" (c) + : "m" (v->counter) + : "memory"); + return (c == 0); } - /** - * atomic_inc_and_test - increment and test + * atomic_inc_and_test - increment and test * @v: pointer of type atomic_t - * + * * Atomically increments @v by 1 * and returns true if the result is zero, or false for all * other cases. Note that the guaranteed * useful range of an atomic_t is only 24 bits. - */ + */ static __inline__ int atomic_inc_and_test(atomic_t *v) { unsigned long c; __asm__ __volatile__( - "incl %0; movl %0, %1" - :"=m" (v->counter), "=g" (c) - :"m" (v->counter) : "memory"); + " incl %0 \n" + " movl %0, %1 \n" + : "=m" (v->counter), + "=g" (c) + : "m" (v->counter) + : "memory"); + return (c == 0); } @@ -162,12 +174,12 @@ * atomic_add_negative - add and test if negative * @v: pointer of type atomic_t * @i: integer value to add - * + * * Atomically adds @i to @v and returns true * if the result is negative, or false when * result is greater than or equal to zero. Note that the guaranteed * useful range of an atomic_t is only 24 bits. - */ + */ static __inline__ int atomic_add_negative(int i, atomic_t *v) { int retval = 0; @@ -181,7 +193,10 @@ return retval; } -/* These are x86-specific, used by some header files + + +/* + * These are x86-specific, used by some header files * But we may find a use for them too. */ #define atomic_clear_mask(mask, v) \ @@ -196,36 +211,45 @@ #define smp_mb__after_atomic_inc() smp_mb() -/* +/* * These functions are used in semaphore.h */ -static __inline__ long atomic_add_return(int i, atomic_t * v) +static __inline__ long atomic_add_return(int i, atomic_t *v) { - long temp, result; - __asm__ __volatile__( - "1: movl %1, %0\n" - " addl2 %3,%0\n" - " movl %0,%2\n" - " movl %0,%1\n" - :"=&r" (temp), "=m" (v->counter), "=&r" (result) - :"Ir" (i), "m" (v->counter)); - return result; + long temp, result; + + __asm__ __volatile__( + "1: movl %1, %0 \n" + " addl2 %3, %0 \n" + " movl %0, %2 \n" + " movl %0, %1 \n" + : "=&r" (temp), + "=m" (v->counter), + "=&r" (result) + : "Ir" (i), + "m" (v->counter)); + + return result; } -static __inline__ long atomic_sub_return(int i, atomic_t * v) +static __inline__ long atomic_sub_return(int i, atomic_t *v) { - long temp, result; - __asm__ __volatile__( - "1: movl %1, %0\n" - " subl2 %3,%0\n" - " movl %0,%2\n" - " movl %0,%1\n" - :"=&r" (temp), "=m" (v->counter), "=&r" (result) - :"Ir" (i), "m" (v->counter)); - return result; + long temp, result; + __asm__ __volatile__( + "1: movl %1, %0 \n" + " subl2 %3, %0 \n" + " movl %0, %2 \n" + " movl %0, %1 \n" + : "=&r" (temp), + "=m" (v->counter), + "=&r" (result) + : "Ir" (i), + "m" (v->counter)); + + return result; } -#define atomic_dec_return(v) atomic_sub_return(1,(v)) -#define atomic_inc_return(v) atomic_add_return(1,(v)) +#define atomic_dec_return(v) atomic_sub_return(1,(v)) +#define atomic_inc_return(v) atomic_add_return(1,(v)) -#endif /* _ARCH_VAX_ATOMIC*/ +#endif /* _VAX_ATOMIC_H */ |
From: Kenn H. <ke...@us...> - 2005-04-26 22:40:31
|
Update of /cvsroot/linux-vax/kernel-2.5/include/asm-vax In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv30597 Modified Files: mv.h Log Message: Add is_ka650() Index: mv.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/include/asm-vax/mv.h,v retrieving revision 1.30 retrieving revision 1.31 diff -u -d -r1.30 -r1.31 --- mv.h 4 Oct 2004 06:46:21 -0000 1.30 +++ mv.h 26 Apr 2005 22:40:21 -0000 1.31 @@ -162,6 +162,14 @@ #endif } +static inline int is_ka650(void) { +#ifdef CONFIG_CPU_KA49 + return mv == &mv_ka650; +#else + return 0; +#endif +} + static inline int is_vxt(void) { #ifdef CONFIG_CPU_VXT return mv == &mv_vxt; |
From: Jan-Benedict G. <jb...@us...> - 2005-04-26 22:38:02
|
Update of /cvsroot/linux-vax/kernel-2.5/arch/vax/kernel In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv25835 Modified Files: cpu_ka43.c Log Message: - Enabling KA43's cache is as easy as, aem, enabling it. - Performance increases dramatically, you can even feel it while it's scrolling it's boot-up text: | Speed/1BogoMIPS | LoopsPerJiffie ----------------+-----------------------+---------------- Cache off | 0.96 | 4800 Cache on | 8.80 | 44032 | Speed factor | Speed gain ----------------+---------------+------------ BogoMIPS | 9.167 | 816.7% LoopsPerJiffie | 9.173 | 817.3% - Impressive artwork, eh? 8-) Index: cpu_ka43.c =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/arch/vax/kernel/cpu_ka43.c,v retrieving revision 1.24 retrieving revision 1.25 diff -u -d -r1.24 -r1.25 --- cpu_ka43.c 25 Apr 2005 22:25:05 -0000 1.24 +++ cpu_ka43.c 26 Apr 2005 22:37:50 -0000 1.25 @@ -2,7 +2,7 @@ * Copyright (C) 2000 Kenn Humborg * * This file contains generic machine vector handlers for the - * KA42 and KA43 CPUs in the RIGEL-based VAXstation 3100 + * KA43 CPU in the RIGEL-based VAXstation 3100 * * 2000/04/01 Mattias Nordlund * Fixed the cache initializing, added the functions @@ -93,7 +93,7 @@ static void ka43_cache_enable(volatile unsigned int *creg_addr) { - volatile char *membase = (void*)0x80000000; /* Physical 0x00000000 */ + volatile char *membase = (void *) 0x80000000; /* Physical 0x00000000 */ int i, val; /* Enable primary cache */ @@ -114,11 +114,8 @@ * it and enable again. */ ka43_cache_disable(ka43_creg_addr); -#warning KA43 does not enable caches again! -#if 0 ka43_cache_clear(ka43_ctag_addr); ka43_cache_enable(ka43_creg_addr); -#endif } /* |
From: Jan-Benedict G. <jb...@us...> - 2005-04-26 22:22:00
|
Update of /cvsroot/linux-vax/kernel-2.5/arch/vax/kernel In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv19780 Modified Files: syscall.c Log Message: - Starting off with a pristine tree, I noticed that the number of expected arguments for stat64 was wrong (three instead of two). Have we all had a fix for that in our local trees? Index: syscall.c =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/arch/vax/kernel/syscall.c,v retrieving revision 1.13 retrieving revision 1.14 diff -u -d -r1.13 -r1.14 --- syscall.c 25 Apr 2005 13:11:56 -0000 1.13 +++ syscall.c 26 Apr 2005 22:21:50 -0000 1.14 @@ -224,7 +224,7 @@ [__NR_mmap2] = 6, /* 192 */ [__NR_truncate64] = 2, /* 193 */ [__NR_ftruncate64] = 2, /* 194 */ - [__NR_stat64] = 3, /* 195 */ + [__NR_stat64] = 2, /* 195 */ [__NR_lstat64] = 2, /* 196 */ [__NR_fstat64] = 2, /* 197 */ [__NR_lchown32] = 3, /* 198 */ |
From: Jan-Benedict G. <jb...@us...> - 2005-04-26 21:52:37
|
Update of /cvsroot/linux-vax/kernel-2.5/include/asm-vax In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv4396/include/asm-vax Modified Files: cacheflush.h Log Message: - Update headerfile recursion blocker - Long time ago, we settled to start assembler lines at the same indention as the asm keyword itself. Index: cacheflush.h =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/include/asm-vax/cacheflush.h,v retrieving revision 1.7 retrieving revision 1.8 diff -u -d -r1.7 -r1.8 --- cacheflush.h 25 Apr 2005 22:25:07 -0000 1.7 +++ cacheflush.h 26 Apr 2005 21:52:22 -0000 1.8 @@ -1,5 +1,5 @@ -#ifndef __VAX_CACHEFLUSH_H -#define __VAX_CACHEFLUSH_H +#ifndef _VAX_CACHEFLUSH_H +#define _VAX_CACHEFLUSH_H #include <linux/mm.h> @@ -34,10 +34,10 @@ * an interrupt, and then REI. */ __asm__ ( - " movpsl -(%sp) \n" - " pushab 1f \n" - " rei \n" - "1: \n"); + " movpsl -(%sp) \n" + " pushab 1f \n" + " rei \n" + "1: \n"); } #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ @@ -49,4 +49,4 @@ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) -#endif /* __VAX_MM_CACHE_H */ +#endif /* _VAX_CACHEFLUSH_H */ |
From: Jan-Benedict G. <jb...@us...> - 2005-04-26 21:50:06
|
Update of /cvsroot/linux-vax/kernel-2.5/drivers/vax/net In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv3104/drivers/vax/net Modified Files: sgec.c Log Message: - Oops, some too-early-to-use code slipped in. Index: sgec.c =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/drivers/vax/net/sgec.c,v retrieving revision 1.5 retrieving revision 1.6 diff -u -d -r1.5 -r1.6 --- sgec.c 25 Apr 2005 09:37:12 -0000 1.5 +++ sgec.c 26 Apr 2005 21:49:57 -0000 1.6 @@ -933,9 +933,11 @@ */ printk("Ethernet address in ROM: "); for (i = 0; i < 6; i++) { +#if 0 /* Not yet */ if (is_ka670 ()) dev->dev_addr[i] = (esar[i] & 0xff00) >> 8; else +#endif dev->dev_addr[i] = esar[i] & 0xff; printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); } |
From: Jan-Benedict G. <jb...@us...> - 2005-04-25 22:57:28
|
Update of /cvsroot/linux-vax/kernel-2.5/arch/vax/boot In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv18448 Modified Files: cpu_sel.c Log Message: - Access cpu_matches as an array, not by pointer arithmetic. Index: cpu_sel.c =================================================================== RCS file: /cvsroot/linux-vax/kernel-2.5/arch/vax/boot/cpu_sel.c,v retrieving revision 1.15 retrieving revision 1.16 diff -u -d -r1.15 -r1.16 --- cpu_sel.c 25 Apr 2005 22:25:04 -0000 1.15 +++ cpu_sel.c 25 Apr 2005 22:56:58 -0000 1.16 @@ -18,45 +18,41 @@ s0vmaddr_to_load_addr(void *vaddr) { extern char _stext; - return (char *)vaddr - PAGE_OFFSET - KERNEL_START_PHYS + (unsigned int) &_stext; + + return (char *) vaddr - PAGE_OFFSET - KERNEL_START_PHYS + (unsigned int) &_stext; } struct vax_mv * idcpu (void) { + extern struct cpu_match __init_cpumatch_start, __init_cpumatch_end; + struct cpu_match *match = &__init_cpumatch_start; unsigned long sid; unsigned long sidex; - struct cpu_match *match; unsigned int i; unsigned int num_matches; struct vax_mv *retmv; - extern struct cpu_match __init_cpumatch_start, __init_cpumatch_end; - sid = __mfpr (PR_SID); - num_matches = &__init_cpumatch_end - &__init_cpumatch_start; for (i = 0; i < num_matches; i++) { - - match = &__init_cpumatch_start + i; - - if ((sid & match->sid_mask) == match->sid_match) { + if ((sid & match[i].sid_mask) == match[i].sid_match) { /* * No sidex known? Accept the vector. * FIXME: Maybe sort the metch structs to have * those with "long" masks first, then the loose * entries with weaker/shorter masks */ - if (!match->sidex_addr) - return s0vmaddr_to_load_addr(match->mv); + if (!match[i].sidex_addr) + return s0vmaddr_to_load_addr(match[i].mv); /* * If a SIDEX match was supplied, too, check it! */ - sidex = * ((unsigned long *) match->sidex_addr); - if ((sidex & match->sidex_mask) == match->sidex_match) { - retmv = s0vmaddr_to_load_addr(match->mv); + sidex = * ((unsigned long *) match[i].sidex_addr); + if ((sidex & match[i].sidex_mask) == match[i].sidex_match) { + retmv = s0vmaddr_to_load_addr(match[i].mv); retmv->sidex = sidex; return retmv; } |