Stuart Menefy reported that we need to care some special treatment
for shared pages. Here's my solution for that problem.
When the page is shared, we flush the relevant TLB and caches on
TLB update. With this treatment, there's at most one TLB for that
page, and there're no cache line shared.
Comments? It's not tested yet. (it's committed along with other
changes)
Index: arch/sh/mm/fault.c
===================================================================
RCS file: /cvsroot/linuxsh/kernel/arch/sh/mm/fault.c,v
retrieving revision 1.12
diff -u -r1.12 fault.c
--- arch/sh/mm/fault.c 2000/07/04 06:24:41 1.12
+++ arch/sh/mm/fault.c 2000/07/18 05:33:50
@@ -29,6 +29,9 @@
extern void die(const char *,struct pt_regs *,long);
static void __flush_tlb_page(struct mm_struct *mm, unsigned long page);
+#if defined(__SH4__)
+static void __flush_tlb_phys(struct mm_struct *mm, unsigned long phys);
+#endif
/*
* Ugly, ugly, but the goto's result in better assembly..
@@ -277,6 +280,19 @@
save_and_cli(flags);
+#if defined(__SH4__)
+ if ((vma->vm_flags & VM_SHARED)) {
+ pteval = pte_val(pte);
+ pteval &= PAGE_MASK; /* Physicall page address */
+
+ __flush_tlb_phys(vma->vm_mm, pteval);
+
+ /* It would be good we had routine which takes
+ physical memory as argument */
+ flush_cache_page(vma, address&PAGE_MASK);
+ }
+#endif
+
/* Set PTEH register */
if (vma) {
pteaddr = (address & MMU_VPN_MASK) |
@@ -327,6 +343,33 @@
if (saved_asid != MMU_NO_ASID)
set_asid(saved_asid);
}
+
+#if defined(__SH4__)
+static void __flush_tlb_phys(struct mm_struct *mm, unsigned long phys)
+{
+ int i;
+ unsigned long addr, data;
+
+ jump_to_P2();
+ for (i = 0; i < MMU_UTLB_ENTRIES; i++) {
+ addr = MMU_UTLB_DATA_ARRAY | (i<<MMU_U_ENTRY_SHIFT);
+ data = ctrl_inl(addr);
+ if ((data & MMU_UTLB_VALID) && (data&PAGE_MASK) == phys) {
+ data &= ~MMU_UTLB_VALID;
+ ctrl_outl(data, addr);
+ }
+ }
+ for (i = 0; i < MMU_ITLB_ENTRIES; i++) {
+ addr = MMU_ITLB_DATA_ARRAY | (i<<MMU_I_ENTRY_SHIFT);
+ data = ctrl_inl(addr);
+ if ((data & MMU_ITLB_VALID) && (data&PAGE_MASK) == phys) {
+ data &= ~MMU_ITLB_VALID;
+ ctrl_outl(data, addr);
+ }
+ }
+ back_to_P1();
+}
+#endif
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
--
|