From: Kenn H. <ke...@us...> - 2002-12-12 01:43:06
|
Update of /cvsroot/linux-vax/kernel-2.5/include/asm-vax In directory sc8-pr-cvs1:/tmp/cvs-serv13160/include/asm-vax Added Files: cacheflush.h tlbflush.h Log Message: TLB, i-cache and d-cache flushing macros and functions now have their own header files, and are no longer automatically pulled in via <asm/pgalloc.h> --- NEW FILE --- #ifndef __VAX_CACHEFLUSH_H #define __VAX_CACHEFLUSH_H /* * cacheflush.h. Definitions for cache structures/routines. * Copyright atp Jan 2001 */ /* FIXME: double check this. VAX hw ref guide pg 274 */ /* Also see flush cache arch document by D. Mosberger */ #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) #define flush_cache_range(mm, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_page_to_ram(page) do { } while (0) #define flush_dcache_page(page) do { } while (0) /* Flushing the instruction cache is all-or-nothing on VAX. */ #define flush_icache_range(start, end) flush_icache() #define flush_icache_user_range(vma, pg, start, end) flush_icache() #define flush_icache_page(vma, pg) flush_icache() static inline void flush_icache(void) { /* Push a PC/PSL onto the stack so it looks like we got an interrupt, and then REI */ __asm__ ( " movpsl -(sp) \n" " pushab 1f \n" " rei \n" "1: " : : ); } #endif /* __VAX_MM_CACHE_H */ --- NEW FILE --- #ifndef __VAX_TLBFLUSH_H #define __VAX_TLBFLUSH_H /* FIXME: update to 2.4 - clever_smp_invalidate has gone. */ /* * TLB flushing: * * - flush_tlb() flushes the current mm struct TLBs * - flush_tlb_all() flushes all processes TLBs * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(mm, start, end) flushes a range of pages * * VAX hw ref manual pg 216. can use mtpr to either invalidate single * (TBIS) or all (TBIA) TLB entries. In addition LDPCTX will * invalidate all process virtual address translations. */ #define __flush_tlb() \ __asm__ __volatile__("mtpr $0,%0" : : "i" (PR_TBIA) ) #define __flush_tlb_one(addr) \ __asm__ __volatile__("mtpr %0,%1" : :"m" (addr), "i" (PR_TBIS) ) #ifndef __SMP__ #define flush_tlb() __flush_tlb() #define flush_tlb_all() __flush_tlb() #define local_flush_tlb() __flush_tlb() #define flush_tlb_kernel_range(start, end) __flush_tlb() static inline void flush_tlb_mm(struct mm_struct *mm) { if (mm == current->mm) __flush_tlb(); } static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { if (vma->vm_mm == current->mm) __flush_tlb_one(addr); } static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (vma->vm_mm == current->mm) __flush_tlb(); } #else /* FIXME: SMP - another day perhaps */ /* * We aren't very clever about this yet - SMP could certainly * avoid some global flushes.. */ #define local_flush_tlb() \ __flush_tlb() #define CLEVER_SMP_INVALIDATE #ifdef CLEVER_SMP_INVALIDATE /* * Smarter SMP flushing macros. * c/o Linus Torvalds. * * These mean you can really definitely utterly forget about * writing to user space from interrupts. (Its not allowed anyway). */ static inline void flush_tlb_current_task(void) { /* just one copy of this mm? */ if (atomic_read(¤t->mm->count) == 1) local_flush_tlb(); /* and that's us, so.. */ else smp_flush_tlb(); } #define flush_tlb() flush_tlb_current_task() #define flush_tlb_all() smp_flush_tlb() static inline void flush_tlb_mm(struct mm_struct * mm) { if (mm == current->mm && atomic_read(&mm->count) == 1) local_flush_tlb(); else smp_flush_tlb(); } static inline void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) { if (vma->vm_mm == current->mm && atomic_read(¤t->mm->count) == 1) __flush_tlb_one(va); else smp_flush_tlb(); } static inline void flush_tlb_range(struct mm_struct * mm, unsigned long start, unsigned long end) { flush_tlb_mm(mm); } #else #define flush_tlb() \ smp_flush_tlb() #define flush_tlb_all() flush_tlb() static inline void flush_tlb_mm(struct mm_struct *mm) { flush_tlb(); } static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { flush_tlb(); } static inline void flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end) { flush_tlb(); } #endif /* clever_smp_invalidate */ #endif /* smp */ extern inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) { /* FIXME: do we need to notify other CPUs that a process * page table is going away? I don't think so... * But what if two processes are sharing this mm_struct and * are currently running on two different CPUs? */ } #endif /* __VAX_TLBFLUSH_H */ |