From: James S. <jsi...@us...> - 2001-10-31 18:26:55
|
Update of /cvsroot/linux-mips/linux/include/asm-mips64 In directory usw-pr-cvs1:/tmp/cvs-serv9038/include/asm-mips64 Modified Files: checksum.h io.h mipsregs.h mmu_context.h pci.h pgtable.h processor.h softirq.h Added Files: bcache.h branch.h r10kcache.h siginfo.h unaligned.h Log Message: Replace all incarnations of extern inline with static inline. --- NEW FILE: bcache.h --- /* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 1997, 1999 by Ralf Baechle * Copyright (c) 1999 Silicon Graphics, Inc. */ #ifndef _ASM_BCACHE_H #define _ASM_BCACHE_H #include <linux/config.h> #ifdef CONFIG_BOARD_SCACHE /* Some R4000 / R4400 / R4600 / R5000 machines may have a non-dma-coherent, chipset implemented caches. On machines with other CPUs the CPU does the cache thing itself. */ struct bcache_ops { void (*bc_enable)(void); void (*bc_disable)(void); void (*bc_wback_inv)(unsigned long page, unsigned long size); void (*bc_inv)(unsigned long page, unsigned long size); }; extern void indy_sc_init(void); extern void sni_pcimt_sc_init(void); extern struct bcache_ops *bcops; static inline void bc_enable(void) { bcops->bc_enable(); } static inline void bc_disable(void) { bcops->bc_disable(); } static inline void bc_wback_inv(unsigned long page, unsigned long size) { bcops->bc_wback_inv(page, size); } static inline void bc_inv(unsigned long page, unsigned long size) { bcops->bc_inv(page, size); } #else /* !defined(CONFIG_BOARD_SCACHE) */ /* Not R4000 / R4400 / R4600 / R5000. */ #define bc_enable() do { } while (0) #define bc_disable() do { } while (0) #define bc_wback_inv(page, size) do { } while (0) #define bc_inv(page, size) do { } while (0) #endif /* !defined(CONFIG_BOARD_SCACHE) */ #endif /* _ASM_BCACHE_H */ --- NEW FILE: branch.h --- /* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Branch and jump emulation. * * Copyright (C) 1996, 1997, 1998, 1999 by Ralf Baechle */ #include <asm/ptrace.h> static inline int delay_slot(struct pt_regs *regs) { return regs->cp0_cause & CAUSEF_BD; } extern int __compute_return_epc(struct pt_regs *regs); static inline int compute_return_epc(struct pt_regs *regs) { if (!delay_slot(regs)) { regs->cp0_epc += 4; return 0; } return __compute_return_epc(regs); } --- NEW FILE: r10kcache.h --- /* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Inline assembly cache operations. * * Copyright (C) 1996 David S. Miller (dm...@en...) * Copyright (C) 1999 Ralf Baechle * Copyright (C) 1999 Silicon Graphics, Inc. * * FIXME: Handle split L2 caches. */ #ifndef _ASM_R10KCACHE_H #define _ASM_R10KCACHE_H #include <asm/asm.h> #include <asm/r10kcacheops.h> /* These are fixed for the current R10000. */ #define icache_size 0x8000 #define dcache_size 0x8000 #define icache_way_size 0x4000 #define dcache_way_size 0x4000 #define ic_lsize 64 #define dc_lsize 32 /* These are configuration dependant. */ #define scache_size() ({ \ unsigned long __res; \ __res = (read_32bit_cp0_register(CP0_CONFIG) >> 16) & 3; \ __res = 1 << (__res + 19); \ __res; \ }) #define sc_lsize() ({ \ unsigned long __res; \ __res = (read_32bit_cp0_register(CP0_CONFIG) >> 13) & 1; \ __res = 1 << (__res + 6); \ __res; \ }) static inline void flush_icache_line_indexed(unsigned long addr) { __asm__ __volatile__( ".set noreorder\n\t" "cache %1, (%0)\n\t" ".set reorder" : : "r" (addr), "i" (Index_Invalidate_I)); } static inline void flush_dcache_line_indexed(unsigned long addr) { __asm__ __volatile__( ".set noreorder\n\t" "cache %1, (%0)\n\t" ".set reorder" : : "r" (addr), "i" (Index_Writeback_Inv_D)); } static inline void flush_scache_line_indexed(unsigned long addr) { __asm__ __volatile__( ".set noreorder\n\t" "cache %1, (%0)\n\t" ".set reorder" : : "r" (addr), "i" (Index_Writeback_Inv_S)); } static inline void flush_icache_line(unsigned long addr) { __asm__ __volatile__( ".set noreorder\n\t" "cache %1, (%0)\n\t" ".set reorder" : : "r" (addr), "i" (Hit_Invalidate_I)); } static inline void flush_dcache_line(unsigned long addr) { __asm__ __volatile__( ".set noreorder\n\t" "cache %1, (%0)\n\t" ".set reorder" : : "r" (addr), "i" (Hit_Writeback_Inv_D)); } static inline void invalidate_dcache_line(unsigned long addr) { __asm__ __volatile__( ".set noreorder\n\t" "cache %1, (%0)\n\t" ".set reorder" : : "r" (addr), "i" (Hit_Invalidate_D)); } static inline void invalidate_scache_line(unsigned long addr) { __asm__ __volatile__( ".set noreorder\n\t" "cache %1, (%0)\n\t" ".set reorder" : : "r" (addr), "i" (Hit_Invalidate_S)); } static inline void flush_scache_line(unsigned long addr) { __asm__ __volatile__( ".set noreorder\n\t" "cache %1, (%0)\n\t" ".set reorder" : : "r" (addr), "i" (Hit_Writeback_Inv_S)); } /* * The next two are for badland addresses like signal trampolines. */ static inline void protected_flush_icache_line(unsigned long addr) { __asm__ __volatile__( ".set noreorder\n\t" "1:\tcache %1,(%0)\n" "2:\t.set reorder\n\t" ".section\t__ex_table,\"a\"\n\t" ".dword\t1b,2b\n\t" ".previous" : : "r" (addr), "i" (Hit_Invalidate_I)); } static inline void protected_writeback_dcache_line(unsigned long addr) { __asm__ __volatile__( ".set noreorder\n\t" "1:\tcache %1,(%0)\n" "2:\t.set reorder\n\t" ".section\t__ex_table,\"a\"\n\t" ".dword\t1b,2b\n\t" ".previous" : : "r" (addr), "i" (Hit_Writeback_Inv_D)); } #define cache32_unroll16(base,op) \ __asm__ __volatile__(" \ .set noreorder; \ cache %1, 0x000(%0); cache %1, 0x020(%0); \ cache %1, 0x040(%0); cache %1, 0x060(%0); \ cache %1, 0x080(%0); cache %1, 0x0a0(%0); \ cache %1, 0x0c0(%0); cache %1, 0x0e0(%0); \ cache %1, 0x100(%0); cache %1, 0x120(%0); \ cache %1, 0x140(%0); cache %1, 0x160(%0); \ cache %1, 0x180(%0); cache %1, 0x1a0(%0); \ cache %1, 0x1c0(%0); cache %1, 0x1e0(%0); \ .set reorder" \ : \ : "r" (base), \ "i" (op)); #define cache32_unroll32(base,op) \ __asm__ __volatile__(" \ .set noreorder; \ cache %1, 0x000(%0); cache %1, 0x020(%0); \ cache %1, 0x040(%0); cache %1, 0x060(%0); \ cache %1, 0x080(%0); cache %1, 0x0a0(%0); \ cache %1, 0x0c0(%0); cache %1, 0x0e0(%0); \ cache %1, 0x100(%0); cache %1, 0x120(%0); \ cache %1, 0x140(%0); cache %1, 0x160(%0); \ cache %1, 0x180(%0); cache %1, 0x1a0(%0); \ cache %1, 0x1c0(%0); cache %1, 0x1e0(%0); \ cache %1, 0x200(%0); cache %1, 0x220(%0); \ cache %1, 0x240(%0); cache %1, 0x260(%0); \ cache %1, 0x280(%0); cache %1, 0x2a0(%0); \ cache %1, 0x2c0(%0); cache %1, 0x2e0(%0); \ cache %1, 0x300(%0); cache %1, 0x320(%0); \ cache %1, 0x340(%0); cache %1, 0x360(%0); \ cache %1, 0x380(%0); cache %1, 0x3a0(%0); \ cache %1, 0x3c0(%0); cache %1, 0x3e0(%0); \ .set reorder" \ : \ : "r" (base), \ "i" (op)); static inline void blast_dcache32(void) { unsigned long way0 = KSEG0; unsigned long way1 = way0 ^ 1; unsigned long end = (way0 + dcache_way_size); while (way0 < end) { cache32_unroll16(way0, Index_Writeback_Inv_D); cache32_unroll16(way1, Index_Writeback_Inv_D); way0 += 0x200; way1 += 0x200; } } static inline void blast_dcache32_page(unsigned long page) { unsigned long start = page; unsigned long end = page + PAGE_SIZE; while (start < end) { cache32_unroll32(start, Hit_Writeback_Inv_D); start += 0x400; } } static inline void blast_dcache32_page_indexed(unsigned long page) { unsigned long way0 = page; unsigned long way1 = page ^ 1; unsigned long end = page + PAGE_SIZE; while (way0 < end) { cache32_unroll16(way0, Index_Writeback_Inv_D); cache32_unroll16(way1, Index_Writeback_Inv_D); way0 += 0x200; way1 += 0x200; } } #define cache64_unroll16(base,op) \ __asm__ __volatile__(" \ .set noreorder; \ cache %1, 0x000(%0); cache %1, 0x040(%0); \ cache %1, 0x080(%0); cache %1, 0x0c0(%0); \ cache %1, 0x100(%0); cache %1, 0x140(%0); \ cache %1, 0x180(%0); cache %1, 0x1c0(%0); \ cache %1, 0x200(%0); cache %1, 0x240(%0); \ cache %1, 0x280(%0); cache %1, 0x2c0(%0); \ cache %1, 0x300(%0); cache %1, 0x340(%0); \ cache %1, 0x380(%0); cache %1, 0x3c0(%0); \ .set reorder" \ : \ : "r" (base), \ "i" (op)); #define cache64_unroll32(base,op) \ __asm__ __volatile__(" \ .set noreorder; \ cache %1, 0x000(%0); cache %1, 0x040(%0); \ cache %1, 0x080(%0); cache %1, 0x0c0(%0); \ cache %1, 0x100(%0); cache %1, 0x140(%0); \ cache %1, 0x180(%0); cache %1, 0x1c0(%0); \ cache %1, 0x200(%0); cache %1, 0x240(%0); \ cache %1, 0x280(%0); cache %1, 0x2c0(%0); \ cache %1, 0x300(%0); cache %1, 0x340(%0); \ cache %1, 0x380(%0); cache %1, 0x3c0(%0); \ cache %1, 0x400(%0); cache %1, 0x440(%0); \ cache %1, 0x480(%0); cache %1, 0x4c0(%0); \ cache %1, 0x500(%0); cache %1, 0x540(%0); \ cache %1, 0x580(%0); cache %1, 0x5c0(%0); \ cache %1, 0x600(%0); cache %1, 0x640(%0); \ cache %1, 0x680(%0); cache %1, 0x6c0(%0); \ cache %1, 0x700(%0); cache %1, 0x740(%0); \ cache %1, 0x780(%0); cache %1, 0x7c0(%0); \ .set reorder" \ : \ : "r" (base), \ "i" (op)); static inline void blast_icache64(void) { unsigned long way0 = KSEG0; unsigned long way1 = way0 ^ 1; unsigned long end = way0 + icache_way_size; while (way0 < end) { cache64_unroll16(way0,Index_Invalidate_I); cache64_unroll16(way1,Index_Invalidate_I); way0 += 0x400; way1 += 0x400; } } static inline void blast_icache64_page(unsigned long page) { unsigned long start = page; unsigned long end = page + PAGE_SIZE; while (start < end) { cache64_unroll32(start,Hit_Invalidate_I); start += 0x800; } } static inline void blast_icache64_page_indexed(unsigned long page) { unsigned long way0 = page; unsigned long way1 = page ^ 1; unsigned long end = page + PAGE_SIZE; while (way0 < end) { cache64_unroll16(way0,Index_Invalidate_I); cache64_unroll16(way1,Index_Invalidate_I); way0 += 0x400; way1 += 0x400; } } static inline void blast_scache64(void) { unsigned long way0 = KSEG0; unsigned long way1 = way0 ^ 1; unsigned long end = KSEG0 + scache_size(); while (way0 < end) { cache64_unroll16(way0,Index_Writeback_Inv_S); cache64_unroll16(way1,Index_Writeback_Inv_S); way0 += 0x400; way1 += 0x400; } } static inline void blast_scache64_page(unsigned long page) { unsigned long start = page; unsigned long end = page + PAGE_SIZE; while (start < end) { cache64_unroll32(start,Hit_Writeback_Inv_S); start += 0x800; } } static inline void blast_scache64_page_indexed(unsigned long page) { unsigned long way0 = page; unsigned long way1 = page ^ 1; unsigned long end = page + PAGE_SIZE; while (way0 < end) { cache64_unroll16(way0,Index_Writeback_Inv_S); cache64_unroll16(way1,Index_Writeback_Inv_S); way0 += 0x400; way1 += 0x400; } } #define cache128_unroll16(base,op) \ __asm__ __volatile__(" \ .set noreorder; \ cache %1, 0x000(%0); cache %1, 0x080(%0); \ cache %1, 0x100(%0); cache %1, 0x180(%0); \ cache %1, 0x200(%0); cache %1, 0x280(%0); \ cache %1, 0x300(%0); cache %1, 0x380(%0); \ cache %1, 0x400(%0); cache %1, 0x480(%0); \ cache %1, 0x500(%0); cache %1, 0x580(%0); \ cache %1, 0x600(%0); cache %1, 0x680(%0); \ cache %1, 0x700(%0); cache %1, 0x780(%0); \ .set reorder" \ : \ : "r" (base), \ "i" (op)); #define cache128_unroll32(base,op) \ __asm__ __volatile__(" \ .set noreorder; \ cache %1, 0x000(%0); cache %1, 0x080(%0); \ cache %1, 0x100(%0); cache %1, 0x180(%0); \ cache %1, 0x200(%0); cache %1, 0x280(%0); \ cache %1, 0x300(%0); cache %1, 0x380(%0); \ cache %1, 0x400(%0); cache %1, 0x480(%0); \ cache %1, 0x500(%0); cache %1, 0x580(%0); \ cache %1, 0x600(%0); cache %1, 0x680(%0); \ cache %1, 0x700(%0); cache %1, 0x780(%0); \ cache %1, 0x800(%0); cache %1, 0x880(%0); \ cache %1, 0x900(%0); cache %1, 0x980(%0); \ cache %1, 0xa00(%0); cache %1, 0xa80(%0); \ cache %1, 0xb00(%0); cache %1, 0xb80(%0); \ cache %1, 0xc00(%0); cache %1, 0xc80(%0); \ cache %1, 0xd00(%0); cache %1, 0xd80(%0); \ cache %1, 0xe00(%0); cache %1, 0xe80(%0); \ cache %1, 0xf00(%0); cache %1, 0xf80(%0); \ .set reorder" \ : \ : "r" (base), \ "i" (op)); static inline void blast_scache128(void) { unsigned long way0 = KSEG0; unsigned long way1 = way0 ^ 1; unsigned long end = way0 + scache_size(); while (way0 < end) { cache128_unroll16(way0, Index_Writeback_Inv_S); cache128_unroll16(way1, Index_Writeback_Inv_S); way0 += 0x800; way1 += 0x800; } } static inline void blast_scache128_page(unsigned long page) { cache128_unroll32(page, Hit_Writeback_Inv_S); } static inline void blast_scache128_page_indexed(unsigned long page) { cache128_unroll32(page , Index_Writeback_Inv_S); cache128_unroll32(page ^ 1, Index_Writeback_Inv_S); } #endif /* _ASM_R10KCACHE_H */ Index: checksum.h =================================================================== RCS file: /cvsroot/linux-mips/linux/include/asm-mips64/checksum.h,v retrieving revision 1.4 retrieving revision 1.5 diff -u -d -r1.4 -r1.5 --- checksum.h 2001/10/23 17:14:04 1.4 +++ checksum.h 2001/10/31 18:26:52 1.5 @@ -43,7 +43,7 @@ * Copy and checksum to user */ #define HAVE_CSUM_COPY_USER -extern inline unsigned int csum_and_copy_to_user (const char *src, char *dst, +static inline unsigned int csum_and_copy_to_user (const char *src, char *dst, int len, int sum, int *err_ptr) { Index: io.h =================================================================== RCS file: /cvsroot/linux-mips/linux/include/asm-mips64/io.h,v retrieving revision 1.6 retrieving revision 1.7 diff -u -d -r1.6 -r1.7 --- io.h 2001/10/22 19:16:45 1.6 +++ io.h 2001/10/31 18:26:52 1.7 @@ -155,7 +155,7 @@ */ #define __OUT1(s) \ -extern inline void __out##s(unsigned int value, unsigned long port) { +static inline void __out##s(unsigned int value, unsigned long port) { #define __OUT2(m) \ __asm__ __volatile__ ("s" #m "\t%0,%1(%2)" @@ -169,7 +169,7 @@ SLOW_DOWN_IO; } #define __IN1(t,s) \ -extern inline t __in##s(unsigned long port) { t _v; +static inline t __in##s(unsigned long port) { t _v; /* * Required nops will be inserted by the assembler @@ -184,7 +184,7 @@ __IN1(t,s##c_p) __IN2(m) : "=r" (_v) : "ir" (port), "r" (mips_io_port_base)); SLOW_DOWN_IO; return _v; } #define __INS1(s) \ -extern inline void __ins##s(unsigned long port, void * addr, unsigned long count) { +static inline void __ins##s(unsigned long port, void * addr, unsigned long count) { #define __INS2(m) \ if (count) \ @@ -210,7 +210,7 @@ "I" (i));} #define __OUTS1(s) \ -extern inline void __outs##s(unsigned long port, const void * addr, unsigned long count) { +static inline void __outs##s(unsigned long port, const void * addr, unsigned long count) { #define __OUTS2(m) \ if (count) \ Index: mipsregs.h =================================================================== RCS file: /cvsroot/linux-mips/linux/include/asm-mips64/mipsregs.h,v retrieving revision 1.4 retrieving revision 1.5 diff -u -d -r1.4 -r1.5 --- mipsregs.h 2001/10/27 17:28:55 1.4 +++ mipsregs.h 2001/10/31 18:26:52 1.5 @@ -85,37 +85,6 @@ #define PL_16M 24 /* - * Macros to access the system control coprocessor - */ -#define read_32bit_cp0_register(source) \ -({ int __res; \ - __asm__ __volatile__( \ - "mfc0\t%0,"STR(source) \ - : "=r" (__res)); \ - __res;}) - -#define read_64bit_cp0_register(source) \ -({ int __res; \ - __asm__ __volatile__( \ - ".set\tmips3\n\t" \ - "dmfc0\t%0,"STR(source)"\n\t" \ - ".set\tmips0" \ - : "=r" (__res)); \ - __res;}) - -#define write_32bit_cp0_register(register,value) \ - __asm__ __volatile__( \ - "mtc0\t%0,"STR(register) \ - : : "r" (value)); - -#define write_64bit_cp0_register(register,value) \ - __asm__ __volatile__( \ - ".set\tmips3\n\t" \ - "dmtc0\t%0,"STR(register)"\n\t" \ - ".set\tmips0" \ - : : "r" (value)) - -/* * R4x00 interrupt enable / cause bits */ #define IE_SW0 (1<< 8) @@ -139,57 +108,7 @@ #define C_IRQ4 (1<<14) #define C_IRQ5 (1<<15) -#ifndef _LANGUAGE_ASSEMBLY /* - * Manipulate the status register. - * Mostly used to access the interrupt bits. - */ -#define __BUILD_SET_CP0(name,register) \ -extern inline unsigned int \ -set_cp0_##name(unsigned int set) \ -{ \ - unsigned int res; \ - \ - res = read_32bit_cp0_register(register); \ - res |= set; \ - write_32bit_cp0_register(register, res); \ - \ - return res; \ -} \ - \ -extern inline unsigned int \ -clear_cp0_##name(unsigned int clear) \ -{ \ - unsigned int res; \ - \ - res = read_32bit_cp0_register(register); \ - res &= ~clear; \ - write_32bit_cp0_register(register, res); \ - \ - return res; \ -} \ - \ -extern inline unsigned int \ -change_cp0_##name(unsigned int change, unsigned int new) \ -{ \ - unsigned int res; \ - \ - res = read_32bit_cp0_register(register); \ - res &= ~change; \ - res |= (new & change); \ - if (change) \ - write_32bit_cp0_register(register, res); \ - \ - return res; \ -} - -__BUILD_SET_CP0(status,CP0_STATUS) -__BUILD_SET_CP0(cause,CP0_CAUSE) -__BUILD_SET_CP0(config,CP0_CONFIG) - -#endif /* defined (_LANGUAGE_ASSEMBLY) */ - -/* * Bitfields in the R4xx0 cp0 status register */ #define ST0_IE 0x00000001 @@ -350,6 +269,375 @@ extern asmlinkage void write_perf_cntr(unsigned int counter, unsigned int val); extern asmlinkage unsigned int read_perf_cntl(unsigned int counter); extern asmlinkage void write_perf_cntl(unsigned int counter, unsigned int val); -#endif + +/* + * Macros to access the system control coprocessor + */ +#define read_32bit_cp0_register(source) \ +({ int __res; \ + __asm__ __volatile__( \ + "mfc0\t%0,"STR(source) \ + : "=r" (__res)); \ + __res;}) + +#define read_64bit_cp0_register(source) \ +({ int __res; \ + __asm__ __volatile__( \ + ".set\tmips3\n\t" \ + "dmfc0\t%0,"STR(source)"\n\t" \ + ".set\tmips0" \ + : "=r" (__res)); \ + __res;}) + +#define write_32bit_cp0_register(register,value) \ + __asm__ __volatile__( \ + "mtc0\t%0,"STR(register) \ + : : "r" (value)); + +#define write_64bit_cp0_register(register,value) \ + __asm__ __volatile__( \ + ".set\tmips3\n\t" \ + "dmtc0\t%0,"STR(register)"\n\t" \ + ".set\tmips0" \ + : : "r" (value)) + +/* TLB operations. */ +static inline void tlb_probe(void) +{ + __asm__ __volatile__( + ".set noreorder\n\t" + "tlbp\n\t" + ".set reorder"); +} + +static inline void tlb_read(void) +{ + __asm__ __volatile__( + ".set noreorder\n\t" + "tlbr\n\t" + ".set reorder"); +} + +static inline void tlb_write_indexed(void) +{ + __asm__ __volatile__( + ".set noreorder\n\t" + "tlbwi\n\t" + ".set reorder"); +} + +static inline void tlb_write_random(void) +{ + __asm__ __volatile__( + ".set noreorder\n\t" + "tlbwr\n\t" + ".set reorder"); +} + +/* Dealing with various CP0 mmu/cache related registers. */ + +/* CP0_PAGEMASK register */ +static inline unsigned long get_pagemask(void) +{ + unsigned long val; + + __asm__ __volatile__( + ".set noreorder\n\t" + "mfc0 %0, $5\n\t" + ".set reorder" + : "=r" (val)); + return val; +} + +static inline void set_pagemask(unsigned long val) +{ + __asm__ __volatile__( + ".set noreorder\n\t" + "mtc0 %z0, $5\n\t" + ".set reorder" + : : "Jr" (val)); +} + +/* CP0_ENTRYLO0 and CP0_ENTRYLO1 registers */ +static inline unsigned long get_entrylo0(void) +{ + unsigned long val; + + __asm__ __volatile__( + ".set noreorder\n\t" + "dmfc0 %0, $2\n\t" + ".set reorder" + : "=r" (val)); + return val; +} + +static inline void set_entrylo0(unsigned long val) +{ + __asm__ __volatile__( + ".set noreorder\n\t" + "dmtc0 %z0, $2\n\t" + ".set reorder" + : : "Jr" (val)); +} + +static inline unsigned long get_entrylo1(void) +{ + unsigned long val; + + __asm__ __volatile__( + ".set noreorder\n\t" + "dmfc0 %0, $3\n\t" + ".set reorder" : "=r" (val)); + + return val; +} + +static inline void set_entrylo1(unsigned long val) +{ + __asm__ __volatile__( + ".set noreorder\n\t" + "dmtc0 %z0, $3\n\t" + ".set reorder" + : : "Jr" (val)); +} + +/* CP0_ENTRYHI register */ +static inline unsigned long get_entryhi(void) +{ + unsigned long val; + + __asm__ __volatile__( + ".set noreorder\n\t" + "dmfc0 %0, $10\n\t" + ".set reorder" + : "=r" (val)); + + return val; +} + +static inline void set_entryhi(unsigned long val) +{ + __asm__ __volatile__( + ".set noreorder\n\t" + "dmtc0 %z0, $10\n\t" + ".set reorder" + : : "Jr" (val)); +} + +/* CP0_INDEX register */ +static inline unsigned int get_index(void) +{ + unsigned long val; + + __asm__ __volatile__( + ".set noreorder\n\t" + "mfc0 %0, $0\n\t" + ".set reorder" + : "=r" (val)); + return val; +} + +static inline void set_index(unsigned int val) +{ + __asm__ __volatile__( + ".set noreorder\n\t" + "mtc0 %z0, $0\n\t" + ".set reorder\n\t" + : : "Jr" (val)); +} + +/* CP0_WIRED register */ +static inline unsigned long get_wired(void) +{ + unsigned long val; + + __asm__ __volatile__( + ".set noreorder\n\t" + "mfc0 %0, $6\n\t" + ".set reorder\n\t" + : "=r" (val)); + return val; +} + +static inline void set_wired(unsigned long val) +{ + __asm__ __volatile__( + "\n\t.set noreorder\n\t" + "mtc0 %z0, $6\n\t" + ".set reorder" + : : "Jr" (val)); +} + +static inline unsigned long get_info(void) +{ + unsigned long val; + + __asm__(".set push\n\t" + ".set reorder\n\t" + "mfc0 %0, $7\n\t" + ".set pop" + : "=r" (val)); + return val; +} + +/* CP0_STATUS registers */ +static inline unsigned long get_status(void) +{ + unsigned long val; + + __asm__ __volatile__( + ".set noreorder\n\t" + "mfc0 %0, $12\n\t" + ".set reorder" + : "=r" (val)); + return val; +} + +static inline void set_status(unsigned long val) +{ + __asm__ __volatile__( + ".set noreorder\n\t" + "mtc0 %z0, $12\n\t" + ".set reorder" + : : "Jr" (val)); +} + +/* CP0_TAGLO and CP0_TAGHI registers */ +static inline unsigned long get_taglo(void) +{ + unsigned long val; + + __asm__ __volatile__( + ".set noreorder\n\t" + "mfc0 %0, $28\n\t" + ".set reorder" + : "=r" (val)); + return val; +} + +static inline void set_taglo(unsigned long val) +{ + __asm__ __volatile__( + ".set noreorder\n\t" + "mtc0 %z0, $28\n\t" + ".set reorder" + : : "Jr" (val)); +} + +static inline unsigned long get_taghi(void) +{ + unsigned long val; + + __asm__ __volatile__( + ".set noreorder\n\t" + "mfc0 %0, $29\n\t" + ".set reorder" + : "=r" (val)); + return val; +} + +static inline void set_taghi(unsigned long val) +{ + __asm__ __volatile__( + ".set noreorder\n\t" + "mtc0 %z0, $29\n\t" + ".set reorder" + : : "Jr" (val)); +} + +/* CP0_CONTEXT register */ +static inline unsigned long get_context(void) +{ + unsigned long val; + + __asm__ __volatile__( + ".set noreorder\n\t" + "dmfc0 %0, $4\n\t" + ".set reorder" + : "=r" (val)); + + return val; +} + +static inline void set_context(unsigned long val) +{ + __asm__ __volatile__( + ".set noreorder\n\t" + "dmtc0 %z0, $4\n\t" + ".set reorder" + : : "Jr" (val)); +} + +/* + * Manipulate the status register. + * Mostly used to access the interrupt bits. + */ +#define __BUILD_SET_CP0(name,register) \ +static inline unsigned int \ +set_cp0_##name(unsigned int set) \ +{ \ + unsigned int res; \ + \ + res = read_32bit_cp0_register(register); \ + res |= set; \ + write_32bit_cp0_register(register, res); \ + \ + return res; \ +} \ + \ +static inline unsigned int \ +clear_cp0_##name(unsigned int clear) \ +{ \ + unsigned int res; \ + \ + res = read_32bit_cp0_register(register); \ + res &= ~clear; \ + write_32bit_cp0_register(register, res); \ + \ + return res; \ +} \ + \ +static inline unsigned int \ +change_cp0_##name(unsigned int change, unsigned int new) \ +{ \ + unsigned int res; \ + \ + res = read_32bit_cp0_register(register); \ + res &= ~change; \ + res |= (new & change); \ + if (change) \ + write_32bit_cp0_register(register, res); \ + \ + return res; \ +} + +__BUILD_SET_CP0(status,CP0_STATUS) +__BUILD_SET_CP0(cause,CP0_CAUSE) +__BUILD_SET_CP0(config,CP0_CONFIG) + +#define __enable_fpu() \ +do { \ + set_cp0_status(ST0_CU1); \ + asm("nop;nop;nop;nop"); /* max. hazard */ \ +} while (0) + +#define __disable_fpu() \ +do { \ + clear_cp0_status(ST0_CU1); \ + /* We don't care about the cp0 hazard here */ \ +} while (0) + +#define enable_fpu() \ +do { \ + if (mips_cpu.options & MIPS_CPU_FPU) \ + __enable_fpu(); \ +} while (0) + +#define disable_fpu() \ +do { \ + if (mips_cpu.options & MIPS_CPU_FPU) \ + __disable_fpu(); \ +} while (0) +#endif /* defined (_LANGUAGE_ASSEMBLY) */ #endif /* _ASM_MIPSREGS_H */ Index: mmu_context.h =================================================================== RCS file: /cvsroot/linux-mips/linux/include/asm-mips64/mmu_context.h,v retrieving revision 1.1 retrieving revision 1.2 diff -u -d -r1.1 -r1.2 --- mmu_context.h 2001/10/09 21:54:37 1.1 +++ mmu_context.h 2001/10/31 18:26:52 1.2 @@ -51,7 +51,7 @@ #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) -extern inline void +static inline void get_new_cpu_mmu_context(struct mm_struct *mm, unsigned long cpu) { unsigned long asid = ASID_CACHE(cpu); @@ -68,7 +68,7 @@ * Initialize the context related info for a new mm_struct * instance. */ -extern inline int +static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { #ifndef CONFIG_SMP @@ -87,7 +87,7 @@ return 0; } -extern inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu) { /* Check if our ASID is of an older version and thus invalid */ @@ -102,7 +102,7 @@ * Destroy context related info for an mm_struct that is about * to be put to rest. */ -extern inline void destroy_context(struct mm_struct *mm) +static inline void destroy_context(struct mm_struct *mm) { #ifdef CONFIG_SMP if (mm->context) @@ -114,7 +114,7 @@ * After we have set current->mm to a new value, this activates * the context for the new mm so we see the new mappings. */ -extern inline void +static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { /* Unconditionally get a new ASID. */ Index: pci.h =================================================================== RCS file: /cvsroot/linux-mips/linux/include/asm-mips64/pci.h,v retrieving revision 1.3 retrieving revision 1.4 diff -u -d -r1.3 -r1.4 --- pci.h 2001/09/04 16:16:15 1.3 +++ pci.h 2001/10/31 18:26:52 1.4 @@ -23,12 +23,12 @@ #define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_MEM 0x10000000 -extern inline void pcibios_set_master(struct pci_dev *dev) +static inline void pcibios_set_master(struct pci_dev *dev) { /* No special bus mastering setup handling */ } -extern inline void pcibios_penalize_isa_irq(int irq) +static inline void pcibios_penalize_isa_irq(int irq) { /* We don't do dynamic PCI IRQ allocation */ } Index: pgtable.h =================================================================== RCS file: /cvsroot/linux-mips/linux/include/asm-mips64/pgtable.h,v retrieving revision 1.5 retrieving revision 1.6 diff -u -d -r1.5 -r1.6 --- pgtable.h 2001/10/19 21:19:40 1.5 +++ pgtable.h 2001/10/31 18:26:52 1.6 @@ -566,259 +566,13 @@ #define kern_addr_valid(addr) (1) #endif -/* TLB operations. */ -static inline void tlb_probe(void) -{ - __asm__ __volatile__( - ".set noreorder\n\t" - "tlbp\n\t" - ".set reorder"); -} - -static inline void tlb_read(void) -{ - __asm__ __volatile__( - ".set noreorder\n\t" - "tlbr\n\t" - ".set reorder"); -} - -static inline void tlb_write_indexed(void) -{ - __asm__ __volatile__( - ".set noreorder\n\t" - "tlbwi\n\t" - ".set reorder"); -} - -static inline void tlb_write_random(void) -{ - __asm__ __volatile__( - ".set noreorder\n\t" - "tlbwr\n\t" - ".set reorder"); -} - -/* Dealing with various CP0 mmu/cache related registers. */ - -/* CP0_PAGEMASK register */ -static inline unsigned long get_pagemask(void) -{ - unsigned long val; - - __asm__ __volatile__( - ".set noreorder\n\t" - "mfc0 %0, $5\n\t" - ".set reorder" - : "=r" (val)); - return val; -} - -static inline void set_pagemask(unsigned long val) -{ - __asm__ __volatile__( - ".set noreorder\n\t" - "mtc0 %z0, $5\n\t" - ".set reorder" - : : "Jr" (val)); -} - -/* CP0_ENTRYLO0 and CP0_ENTRYLO1 registers */ -static inline unsigned long get_entrylo0(void) -{ - unsigned long val; - - __asm__ __volatile__( - ".set noreorder\n\t" - "dmfc0 %0, $2\n\t" - ".set reorder" - : "=r" (val)); - return val; -} - -static inline void set_entrylo0(unsigned long val) -{ - __asm__ __volatile__( - ".set noreorder\n\t" - "dmtc0 %z0, $2\n\t" - ".set reorder" - : : "Jr" (val)); -} - -static inline unsigned long get_entrylo1(void) -{ - unsigned long val; - - __asm__ __volatile__( - ".set noreorder\n\t" - "dmfc0 %0, $3\n\t" - ".set reorder" : "=r" (val)); - - return val; -} - -static inline void set_entrylo1(unsigned long val) -{ - __asm__ __volatile__( - ".set noreorder\n\t" - "dmtc0 %z0, $3\n\t" - ".set reorder" - : : "Jr" (val)); -} - -/* CP0_ENTRYHI register */ -static inline unsigned long get_entryhi(void) -{ - unsigned long val; - - __asm__ __volatile__( - ".set noreorder\n\t" - "dmfc0 %0, $10\n\t" - ".set reorder" - : "=r" (val)); - - return val; -} - -static inline void set_entryhi(unsigned long val) -{ - __asm__ __volatile__( - ".set noreorder\n\t" - "dmtc0 %z0, $10\n\t" - ".set reorder" - : : "Jr" (val)); -} - -/* CP0_INDEX register */ -static inline unsigned int get_index(void) -{ - unsigned long val; - - __asm__ __volatile__( - ".set noreorder\n\t" - "mfc0 %0, $0\n\t" - ".set reorder" - : "=r" (val)); - return val; -} - -static inline void set_index(unsigned int val) -{ - __asm__ __volatile__( - ".set noreorder\n\t" - "mtc0 %z0, $0\n\t" - ".set reorder\n\t" - : : "Jr" (val)); -} - -/* CP0_WIRED register */ -static inline unsigned long get_wired(void) -{ - unsigned long val; - - __asm__ __volatile__( - ".set noreorder\n\t" - "mfc0 %0, $6\n\t" - ".set reorder\n\t" - : "=r" (val)); - return val; -} - -static inline void set_wired(unsigned long val) -{ - __asm__ __volatile__( - "\n\t.set noreorder\n\t" - "mtc0 %z0, $6\n\t" - ".set reorder" - : : "Jr" (val)); -} - -static inline unsigned long get_info(void) -{ - unsigned long val; - - __asm__( - ".set push\n\t" - ".set reorder\n\t" - "mfc0 %0, $7\n\t" - ".set pop" - : "=r" (val)); - return val; -} - -/* CP0_TAGLO and CP0_TAGHI registers */ -static inline unsigned long get_taglo(void) -{ - unsigned long val; - - __asm__ __volatile__( - ".set noreorder\n\t" - "mfc0 %0, $28\n\t" - ".set reorder" - : "=r" (val)); - return val; -} - -static inline void set_taglo(unsigned long val) -{ - __asm__ __volatile__( - ".set noreorder\n\t" - "mtc0 %z0, $28\n\t" - ".set reorder" - : : "Jr" (val)); -} - -static inline unsigned long get_taghi(void) -{ - unsigned long val; - - __asm__ __volatile__( - ".set noreorder\n\t" - "mfc0 %0, $29\n\t" - ".set reorder" - : "=r" (val)); - return val; -} - -static inline void set_taghi(unsigned long val) -{ - __asm__ __volatile__( - ".set noreorder\n\t" - "mtc0 %z0, $29\n\t" - ".set reorder" - : : "Jr" (val)); -} - -/* CP0_CONTEXT register */ -static inline unsigned long get_context(void) -{ - unsigned long val; - - __asm__ __volatile__( - ".set noreorder\n\t" - "dmfc0 %0, $4\n\t" - ".set reorder" - : "=r" (val)); - - return val; -} - -static inline void set_context(unsigned long val) -{ - __asm__ __volatile__( - ".set noreorder\n\t" - "dmtc0 %z0, $4\n\t" - ".set reorder" - : : "Jr" (val)); -} - -#include <asm-generic/pgtable.h> - -#endif /* !defined (_LANGUAGE_ASSEMBLY) */ - /* * No page table caches to initialise */ #define pgtable_cache_init() do { } while (0) + +#include <asm-generic/pgtable.h> + +#endif /* !defined (_LANGUAGE_ASSEMBLY) */ #endif /* _ASM_PGTABLE_H */ Index: processor.h =================================================================== RCS file: /cvsroot/linux-mips/linux/include/asm-mips64/processor.h,v retrieving revision 1.4 retrieving revision 1.5 diff -u -d -r1.4 -r1.5 --- processor.h 2001/10/19 21:19:40 1.4 +++ processor.h 2001/10/31 18:26:52 1.5 @@ -249,7 +249,7 @@ /* * Return saved PC of a blocked thread. */ -extern inline unsigned long thread_saved_pc(struct thread_struct *t) +static inline unsigned long thread_saved_pc(struct thread_struct *t) { extern void ret_from_sys_call(void); Index: softirq.h =================================================================== RCS file: /cvsroot/linux-mips/linux/include/asm-mips64/softirq.h,v retrieving revision 1.4 retrieving revision 1.5 diff -u -d -r1.4 -r1.5 --- softirq.h 2001/10/27 17:28:55 1.4 +++ softirq.h 2001/10/31 18:26:52 1.5 @@ -13,13 +13,13 @@ #include <asm/atomic.h> #include <asm/hardirq.h> -extern inline void cpu_bh_disable(int cpu) +static inline void cpu_bh_disable(int cpu) { local_bh_count(cpu)++; barrier(); } -extern inline void __cpu_bh_enable(int cpu) +static inline void __cpu_bh_enable(int cpu) { barrier(); local_bh_count(cpu)--; |