From: NIIBE Y. <gn...@ch...> - 2000-08-19 01:52:46
|
NIIBE Yutaka wrote: > flush_ram_to_page: > write back the kernel cache line to the page (D-cache) > as well as flush (purge) the cache line (both of user and kernel) Well this change seems to be needed eventually, as SH-4's cache is not DMA-coherent, AFAIK. Besides, it seems for me that flush_cache_page is the one for virtually tagged cache system. I'd like to clarify, but now we don't have VGER... Here's the patch. It works fine for me. Will soon be committed. Index: arch/sh/mm/cache.c =================================================================== RCS file: /cvsroot/linuxsh/kernel/arch/sh/mm/cache.c,v retrieving revision 1.17 diff -u -p -r1.17 cache.c --- arch/sh/mm/cache.c 2000/08/16 08:34:04 1.17 +++ arch/sh/mm/cache.c 2000/08/19 01:40:11 @@ -358,6 +358,7 @@ void flush_cache_range(struct mm_struct */ void flush_cache_page(struct vm_area_struct *vma, unsigned long addr) { +#if 0 /* We don't need this... */ pgd_t *dir; pmd_t *pmd; pte_t *pte; @@ -378,11 +379,14 @@ void flush_cache_page(struct vm_area_str phys = pte_val(entry)&PAGE_MASK; pg = virt_to_page(__va(phys)); flush_dcache_page(pg); +#endif } /* + * Write-back & invalidate the cache. + * * After accessing the memory from kernel space (P1-area), we need to - * write back the cache line to maintain DMA coherency. + * write back the cache line. * * We search the D-cache to see if we have the entries corresponding to * the page, and if found, write back them. @@ -399,10 +403,8 @@ void flush_page_to_ram(struct page *pg) for (i=0; i<CACHE_OC_NUM_ENTRIES; i++) { addr = CACHE_OC_ADDRESS_ARRAY| (i<<CACHE_OC_ENTRY_SHIFT); data = ctrl_inl(addr); - if (((data & (CACHE_UPDATED|CACHE_VALID)) - == (CACHE_UPDATED|CACHE_VALID)) - && (data&PAGE_MASK) == phys) { - data &= ~CACHE_UPDATED; + if ((data & CACHE_VALID) && (data&PAGE_MASK) == phys) { + data &= ~(CACHE_UPDATED|CACHE_VALID); ctrl_outl(data, addr); } } Index: mm/memory.c =================================================================== RCS file: /cvsroot/linuxsh/kernel/mm/memory.c,v retrieving revision 1.7 diff -u -p -r1.7 memory.c --- mm/memory.c 2000/08/17 04:27:15 1.7 +++ mm/memory.c 2000/08/19 01:40:33 @@ -790,8 +790,8 @@ static inline void break_cow(struct vm_a pte_t *page_table) { copy_cow_page(old_page,new_page,address); - flush_dcache_page(new_page); - flush_icache_page(vma, new_page); + flush_page_to_ram(new_page); + flush_cache_page(vma, address); establish_pte(vma, address, page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)))); } @@ -1081,7 +1081,7 @@ static int do_swap_page(struct mm_struct } else UnlockPage(page); - flush_dcache_page(page); + flush_page_to_ram(page); flush_icache_page(vma, page); set_pte(page_table, pte); /* No need to invalidate - it was non-present before */ @@ -1106,8 +1106,7 @@ static int do_anonymous_page(struct mm_s clear_user_highpage(page, addr); entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); mm->rss++; - flush_dcache_page(page); - flush_icache_page(vma, page); + flush_page_to_ram(page); } set_pte(page_table, entry); /* No need to invalidate - it was non-present before */ @@ -1156,7 +1155,7 @@ static int do_no_page(struct mm_struct * * so we can make it writable and dirty to avoid having to * handle that later. */ - flush_dcache_page(new_page); + flush_page_to_ram(new_page); flush_icache_page(vma, new_page); entry = mk_pte(new_page, vma->vm_page_prot); if (write_access) { Index: mm/vmscan.c =================================================================== RCS file: /cvsroot/linuxsh/kernel/mm/vmscan.c,v retrieving revision 1.11 diff -u -p -r1.11 vmscan.c --- mm/vmscan.c 2000/08/09 12:51:01 1.11 +++ mm/vmscan.c 2000/08/19 01:40:34 @@ -138,6 +138,7 @@ drop_pte: * * That would get rid of a lot of problems. */ + flush_page_to_ram(page); flush_cache_page(vma, address); if (vma->vm_ops && (swapout = vma->vm_ops->swapout)) { int error; |