[xtensa-cvscommit] linux/arch/xtensa/mm mmu_dump.c,NONE,1.1 Makefile,1.2,1.3 cache.c,1.8,1.9 fault.c
Brought to you by:
zankel
|
From: <ma...@us...> - 2003-04-01 22:28:39
|
Update of /cvsroot/xtensa/linux/arch/xtensa/mm
In directory sc8-pr-cvs1:/tmp/cvs-serv31532
Modified Files:
Makefile cache.c fault.c init.c mmu.c
Added Files:
mmu_dump.c
Log Message:
Fix multihit kernel bug -- essentially adjust pte_none() and related code.
Also separate out MMU dump code, and other sundry cleanup.
--- NEW FILE: mmu_dump.c ---
/*
* arch/xtensa/mm/mmu_dump.c
*
* Code to dump the contents of the MMU.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2002 - 2003 Tensilica Inc.
* Authors: Joe Taylor <jo...@te..., jo...@ya...>
* Marc Gauthier <ma...@te...> <ma...@al...>
*/
#include <xtensa/config/core.h>
//#include <linux/sched.h> /* asm/pgtable needs this (FIXME!?) */
#include <asm/pgtable.h>
#define XTENSA_MMU_DUMP 1
#if XTENSA_MMU_DUMP == 1
#define USE_ITLB 0
#define USE_DTLB 1
struct way_config_t {
int indicies;
int indicies_log2;
int pgsz_log2;
int arf;
};
static struct way_config_t itlb[XCHAL_ITLB_WAYS] =
{
{ XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES_LOG2),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, PAGESZ_LOG2_MIN),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ARF)
},
{ XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES_LOG2),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, PAGESZ_LOG2_MIN),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ARF)
},
{ XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES_LOG2),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, PAGESZ_LOG2_MIN),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ARF)
},
{ XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES_LOG2),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, PAGESZ_LOG2_MIN),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ARF)
},
{ XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES_LOG2),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, PAGESZ_LOG2_MIN),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ARF)
},
{ XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES_LOG2),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, PAGESZ_LOG2_MIN),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ARF)
},
{ XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES_LOG2),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, PAGESZ_LOG2_MIN),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ARF)
}
};
static struct way_config_t dtlb[XCHAL_DTLB_WAYS] =
{
{ XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES_LOG2),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, PAGESZ_LOG2_MIN),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ARF)
},
{ XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES_LOG2),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, PAGESZ_LOG2_MIN),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ARF)
},
{ XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES_LOG2),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, PAGESZ_LOG2_MIN),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ARF)
},
{ XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES_LOG2),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, PAGESZ_LOG2_MIN),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ARF)
},
{ XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES_LOG2),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, PAGESZ_LOG2_MIN),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ARF)
},
{ XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES_LOG2),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, PAGESZ_LOG2_MIN),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ARF)
},
{ XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES_LOG2),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, PAGESZ_LOG2_MIN),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ARF)
},
{ XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES_LOG2),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, PAGESZ_LOG2_MIN),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ARF)
},
{ XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES_LOG2),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, PAGESZ_LOG2_MIN),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ARF)
},
{ XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES_LOG2),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, PAGESZ_LOG2_MIN),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ARF)
}
};
/* Total number of entries: */
#define ITLB_TOTAL_ENTRIES \
XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES) + \
XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES) + \
XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES) + \
XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES) + \
XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES) + \
XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES) + \
XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES)
#define DTLB_TOTAL_ENTRIES \
XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES) + \
XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES) + \
XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES) + \
XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES) + \
XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES) + \
XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES) + \
XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES) + \
XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES) + \
XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES) + \
XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES)
typedef struct {
unsigned va;
unsigned pa;
unsigned char asid;
unsigned char ca;
unsigned char way;
unsigned char index;
unsigned char pgsz_log2; /* 0 .. 32 */
unsigned char type; /* 0=ITLB 1=DTLB */
} tlb_dump_entry_t;
/* Return -1 if a precedes b, +1 if a follows b, 0 if same: */
int cmp_tlb_dump_info( tlb_dump_entry_t *a, tlb_dump_entry_t *b )
{
if (a->asid < b->asid) return -1;
if (a->asid > b->asid) return 1;
if (a->va < b->va) return -1;
if (a->va > b->va) return 1;
if (a->pa < b->pa) return -1;
if (a->pa > b->pa) return 1;
if (a->ca < b->ca) return -1;
if (a->ca > b->ca) return 1;
if (a->way < b->way) return -1;
if (a->way > b->way) return 1;
if (a->index < b->index) return -1;
if (a->index > b->index) return 1;
return 0;
}
void sort_tlb_dump_info( tlb_dump_entry_t *t, int n )
{
int i, j;
/* Simple O(n*n) sort: */
for (i = 0; i < n-1; i++)
for (j = i+1; j < n; j++)
if (cmp_tlb_dump_info(t+i, t+j) > 0) {
tlb_dump_entry_t tmp = t[i];
t[i] = t[j];
t[j] = tmp;
}
}
static tlb_dump_entry_t itlb_dump_info[ITLB_TOTAL_ENTRIES];
static tlb_dump_entry_t dtlb_dump_info[DTLB_TOTAL_ENTRIES];
static inline char *way_type (int type)
{
return type ? "autorefill" : "non-autorefill";
}
void print_entry (struct way_config_t *way_info,
unsigned int way,
unsigned int index,
unsigned int virtual,
unsigned int translation)
{
char valid_chr;
unsigned int va, pa, asid, ca;
va = virtual & ~((1 << (way_info->pgsz_log2 + way_info->indicies_log2)) - 1);
asid = virtual & ((1 << XCHAL_MMU_ASID_BITS) - 1);
pa = translation & ~((1 << way_info->pgsz_log2) - 1);
ca = translation & ((1 << XCHAL_MMU_CA_BITS) - 1);
valid_chr = asid ? 'V' : 'I';
/* Compute and incorporate the effect of the index bits on the
* va. It's more useful for kernel debugging, since we always
* want to know the effective va anyway. */
va += index << way_info->pgsz_log2;
printk ("\t[%d,%d] (%c) vpn 0x%.8x ppn 0x%.8x asid 0x%.2x am 0x%x\n",
way, index, valid_chr, va, pa, asid, ca);
}
void print_itlb_entry (struct way_config_t *way_info, int way, int index)
{
print_entry (way_info, way, index,
read_itlb_virtual (way + (index << way_info->pgsz_log2)),
read_itlb_translation (way + (index << way_info->pgsz_log2)));
}
void print_dtlb_entry (struct way_config_t *way_info, int way, int index)
{
print_entry (way_info, way, index,
read_dtlb_virtual (way + (index << way_info->pgsz_log2)),
read_dtlb_translation (way + (index << way_info->pgsz_log2)));
}
void dump_itlb (void)
{
int way, index;
printk ("\nITLB: ways = %d\n", XCHAL_ITLB_WAYS);
for (way = 0; way < XCHAL_ITLB_WAYS; way++) {
printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
way, itlb[way].indicies,
itlb[way].pgsz_log2, way_type(itlb[way].arf));
for (index = 0; index < itlb[way].indicies; index++) {
print_itlb_entry(&itlb[way], way, index);
}
}
}
void dump_dtlb (void)
{
int way, index;
printk ("\nDTLB: ways = %d\n", XCHAL_DTLB_WAYS);
for (way = 0; way < XCHAL_DTLB_WAYS; way++) {
printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
way, dtlb[way].indicies,
dtlb[way].pgsz_log2, way_type(dtlb[way].arf));
for (index = 0; index < dtlb[way].indicies; index++) {
print_dtlb_entry(&dtlb[way], way, index);
}
}
}
void dump_tlb (tlb_dump_entry_t *tinfo, struct way_config_t *config,
int entries, int ways, int type, int show_invalid)
{
tlb_dump_entry_t *e = tinfo;
int way, i;
/* Gather all info: */
for (way = 0; way < ways; way++) {
struct way_config_t *cfg = config + way;
for (i = 0; i < cfg->indicies; i++) {
unsigned wayindex = way + (i << cfg->pgsz_log2);
unsigned vv = (type ? read_dtlb_virtual (wayindex)
: read_itlb_virtual (wayindex));
unsigned pp = (type ? read_dtlb_translation (wayindex)
: read_itlb_translation (wayindex));
/* Compute and incorporate the effect of the index bits on the
* va. It's more useful for kernel debugging, since we always
* want to know the effective va anyway. */
e->va = (vv & ~((1 << (cfg->pgsz_log2 + cfg->indicies_log2)) - 1));
e->va += (i << cfg->pgsz_log2);
e->pa = (pp & ~((1 << cfg->pgsz_log2) - 1));
e->asid = (vv & ((1 << XCHAL_MMU_ASID_BITS) - 1));
e->ca = (pp & ((1 << XCHAL_MMU_CA_BITS) - 1));
e->way = way;
e->index = i;
e->pgsz_log2 = cfg->pgsz_log2;
e->type = type;
e++;
}
}
/* Sort by ASID and VADDR: */
sort_tlb_dump_info (tinfo, entries);
/* Display all sorted info: */
printk ("\n%cTLB dump:\n", (e->type ? 'D' : 'I'));
for (e = tinfo, i = 0; i < entries; i++, e++) {
if (e->asid == 0 && !show_invalid)
continue;
printk ("%c way=%d i=%d ASID=%02X V=%08X -> P=%08X CA=%X (%d %cB)\n",
(e->type ? 'D' : 'I'), e->way, e->index,
e->asid, e->va, e->pa, e->ca,
(1 << (e->pgsz_log2 % 10)),
" kMG"[e->pgsz_log2 / 10]
);
}
}
void dump_tlbs2 (int showinv)
{
dump_tlb (itlb_dump_info, itlb, ITLB_TOTAL_ENTRIES, XCHAL_ITLB_WAYS, 0, showinv);
dump_tlb (dtlb_dump_info, dtlb, DTLB_TOTAL_ENTRIES, XCHAL_DTLB_WAYS, 1, showinv);
}
void dump_all_tlbs (void)
{
dump_tlbs2 (1);
}
void dump_valid_tlbs (void)
{
dump_tlbs2 (0);
}
void dump_tlbs (void)
{
dump_itlb();
dump_dtlb();
}
#endif /* XTENSA_MMU_DUMP == 1 */
Index: Makefile
===================================================================
RCS file: /cvsroot/xtensa/linux/arch/xtensa/mm/Makefile,v
retrieving revision 1.2
retrieving revision 1.3
diff -C2 -d -r1.2 -r1.3
*** Makefile 17 Jan 2003 00:31:30 -0000 1.2
--- Makefile 1 Apr 2003 22:28:32 -0000 1.3
***************
*** 9,13 ****
O_TARGET := mm.o
! obj-y := extable.o init.o fault.o loadmmu.o mmu.o cache.o
include $(TOPDIR)/Rules.make
--- 9,13 ----
O_TARGET := mm.o
! obj-y := extable.o init.o fault.o loadmmu.o mmu.o cache.o mmu_dump.o
include $(TOPDIR)/Rules.make
Index: cache.c
===================================================================
RCS file: /cvsroot/xtensa/linux/arch/xtensa/mm/cache.c,v
retrieving revision 1.8
retrieving revision 1.9
diff -C2 -d -r1.8 -r1.9
*** cache.c 5 Mar 2003 17:57:44 -0000 1.8
--- cache.c 1 Apr 2003 22:28:33 -0000 1.9
***************
*** 8,12 ****
* this archive for more details.
*
! * Copyright (C) 2003 Tensilica Inc. (by Joe Taylor, jo...@te...)
*/
--- 8,12 ----
* this archive for more details.
*
! * Copyright (C) 2001 - 2003 Tensilica Inc. (by Joe Taylor, jo...@te...)
*/
***************
*** 137,141 ****
* in the cache.
*/
! if (!(pte_val(*ptep) & _PAGE_VALID))
goto out;
--- 137,141 ----
* in the cache.
*/
! if (!pte_valid(*ptep))
goto out;
Index: fault.c
===================================================================
RCS file: /cvsroot/xtensa/linux/arch/xtensa/mm/fault.c,v
retrieving revision 1.6
retrieving revision 1.7
diff -C2 -d -r1.6 -r1.7
*** fault.c 5 Mar 2003 17:57:44 -0000 1.6
--- fault.c 1 Apr 2003 22:28:34 -0000 1.7
***************
*** 9,15 ****
*
* Copyright (C) 1995 Linus Torvalds
! * Copyright (C) 2001 Tensilica Inc.
* Authors: Joe Taylor <jo...@te..., jo...@ya...>
! * Marc Gauthier
* Kevin Chea
*/
--- 9,15 ----
*
* Copyright (C) 1995 Linus Torvalds
! * Copyright (C) 2001 - 2003 Tensilica Inc.
* Authors: Joe Taylor <jo...@te..., jo...@ya...>
! * Marc Gauthier <ma...@te...> <ma...@al...>
* Kevin Chea
*/
***************
*** 85,88 ****
--- 85,100 ----
+ /* Define CATCH_NO_TLB_CLEAR to have a place for a breakpoint
+ * (in xt_staletlb()) for catching cases where do_page_fault() doesn't
+ * clear invalid TLB entries. This is purely for debugging. */
+ #undef CATCH_NO_TLB_CLEAR
+ #ifdef CATCH_NO_TLB_CLEAR
+ void xt_staletlb(void) {
+ }
+ #else
+ # define xt_staletlb() do {} while(0)
+ #endif
+
+
/*
* do_page_fault() could handle:
***************
*** 99,103 ****
*/
-
/*
* Macro for exception fixup code to access integer registers.
--- 111,114 ----
***************
*** 151,157 ****
* pte_alloc() and get_pte_slow() for more details. */
! if (pte_none(*pte)) {
invalidate_itlb_mapping(address);
}
write = 2; /* i-fetch */
--- 162,169 ----
* pte_alloc() and get_pte_slow() for more details. */
! if (!pte_valid(*pte)) {
invalidate_itlb_mapping(address);
}
+ else xt_staletlb();
write = 2; /* i-fetch */
***************
*** 170,177 ****
/* XXX -- jn */
! flush_pte_from_cache(pte, regs->excvaddr);
! // flush_cache_all();
return;
}
write = 2; /* i-fetch */
break;
--- 182,194 ----
/* XXX -- jn */
! flush_pte_from_cache(pte, address);
return;
}
+ xt_staletlb();
+ #define ALWAYS_INVAL 0
+ #if ALWAYS_INVAL
+ if (!pte_valid(*pte))
+ invalidate_itlb_mapping(address);
+ #endif
write = 2; /* i-fetch */
break;
***************
*** 189,196 ****
/* xxx jn */
! flush_pte_from_cache(pte, regs->excvaddr);
! // flush_cache_all();
return;
}
write = 0;
break;
--- 206,217 ----
/* xxx jn */
! flush_pte_from_cache(pte, address);
return;
}
+ xt_staletlb();
+ #if ALWAYS_INVAL
+ if (!pte_valid(*pte))
+ invalidate_dtlb_mapping(address);
+ #endif
write = 0;
break;
***************
*** 207,214 ****
/* xxx jn */
! flush_pte_from_cache(pte, regs->excvaddr);
! // flush_cache_all();
return;
}
write = 1;
break;
--- 228,239 ----
/* xxx jn */
! flush_pte_from_cache(pte, address);
return;
}
+ xt_staletlb();
+ #if ALWAYS_INVAL
+ if (!pte_valid(*pte))
+ invalidate_dtlb_mapping(address);
+ #endif
write = 1;
break;
***************
*** 220,226 ****
* get_pte_slow() for more details. */
! if (pte_none(*pte)) {
invalidate_dtlb_mapping(address);
}
--- 245,252 ----
* get_pte_slow() for more details. */
! if (!pte_valid(*pte)) {
invalidate_dtlb_mapping(address);
}
+ else xt_staletlb();
***************
*** 396,400 ****
/* handle_2nd_level_miss may have mapped
! * exception_pte_table, so unmap it here. Also clean
* up any leftover mappings for this address that may
* be invalid.
--- 422,426 ----
/* handle_2nd_level_miss may have mapped
! * invalid_pte_table, so unmap it here. Also clean
* up any leftover mappings for this address that may
* be invalid.
***************
*** 419,422 ****
--- 445,451 ----
+
+ /* See comments for update_mmu_cache() in pgtable.h */
+
void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address,
***************
*** 443,444 ****
--- 472,474 ----
invalidate_page_table();
}
+
Index: init.c
===================================================================
RCS file: /cvsroot/xtensa/linux/arch/xtensa/mm/init.c,v
retrieving revision 1.5
retrieving revision 1.6
diff -C2 -d -r1.5 -r1.6
*** init.c 13 Feb 2003 18:19:21 -0000 1.5
--- init.c 1 Apr 2003 22:28:34 -0000 1.6
***************
*** 8,12 ****
* for more details.
*
! * Copyright (C) 2001 Tensilica Inc.
* Authors: Joe Taylor <jo...@te..., jo...@ya...>
* Marc Gauthier
--- 8,12 ----
* for more details.
*
! * Copyright (C) 2001 - 2003 Tensilica Inc.
* Authors: Joe Taylor <jo...@te..., jo...@ya...>
* Marc Gauthier
***************
*** 57,61 ****
* the MMU hardware to generate an exception if loading a value into a
* TLB (i.e., auto-refill). In other words, the MMU considers all PTE
! * values as valid.
*
* This comment documents two solutions (so we don't have to redesign
--- 57,61 ----
* the MMU hardware to generate an exception if loading a value into a
* TLB (i.e., auto-refill). In other words, the MMU considers all PTE
! * values as "valid" in the sense of allowing them to be in the TLBs.
*
* This comment documents two solutions (so we don't have to redesign
***************
*** 65,71 ****
*
* This solution clears all PTE values when it allocates a new page of
! * PTEs. When the MMU reads a PTE into a TLB (i.e., auto-refill), vpn
! * = 0, ppn = 0, ring level = 0, and ca = 0. This TLB entry is
! * invalid, though the MMU hardware considers it valid.
*
* The most-likely result is a Privilege Exception in user code
--- 65,72 ----
*
* This solution clears all PTE values when it allocates a new page of
! * PTEs. When the MMU reads such a PTE into a TLB (i.e., auto-refill):
! * vpn = 0, ppn = 0, asid = RASID[ring level = 0] = 1, and ca = 0.
! * This TLB entry is invalid, though the MMU hardware loads it into
! * the TLB.
*
* The most-likely result is a Privilege Exception in user code
***************
*** 97,101 ****
* (i.e., auto-refill), vpn = 0, ppn = 0, ring level = USER_RING, and
* ca = 0. This TLB entry is invalid, though the MMU hardware
! * considers it valid.
*
* A TLB hit on this invalid entry will result in a Cache Attribute
--- 98,102 ----
* (i.e., auto-refill), vpn = 0, ppn = 0, ring level = USER_RING, and
* ca = 0. This TLB entry is invalid, though the MMU hardware
! * loads it into the TLB.
*
* A TLB hit on this invalid entry will result in a Cache Attribute
***************
*** 231,239 ****
extern char empty_bad_page_table[PAGE_SIZE];
int i;
unsigned long tmp = pte_val(BAD_PAGE);
unsigned long page = (unsigned long) empty_bad_page_table;
! for (i = 0; i < PTRS_PER_PGD; i++)
empty_bad_page_table[i] = tmp;
--- 232,243 ----
extern char empty_bad_page_table[PAGE_SIZE];
+ /* XTFIXME: other architectures just zero this page,
+ * why are we different? please comment */
+
int i;
unsigned long tmp = pte_val(BAD_PAGE);
unsigned long page = (unsigned long) empty_bad_page_table;
! for (i = 0; i < PTRS_PER_PTE; i++)
empty_bad_page_table[i] = tmp;
***************
*** 285,289 ****
/*
! * Initialize new page directory with pointers to invalid ptes
*/
void pgd_init(unsigned long page)
--- 289,295 ----
/*
! * Initialize new page directory with pointers to invalid PTE table ptrs
! * (really gets passed a pgd_t*, which is treated here as pmd_t* because
! * of collapsing; a bit too confusing, needs some clarification).
*/
void pgd_init(unsigned long page)
***************
*** 296,299 ****
--- 302,306 ----
* has virtually indexed caches, configurations are currently
* constrained to avoid aliasing issues.
+ * XTFIXME: cache aliasing is now allowed; does that affect this code?
*/
p = (unsigned long *) page;
***************
*** 301,312 ****
while (p < end) {
! p[0] = (unsigned long) invalid_pte_table;
! p[1] = (unsigned long) invalid_pte_table;
! p[2] = (unsigned long) invalid_pte_table;
! p[3] = (unsigned long) invalid_pte_table;
! p[4] = (unsigned long) invalid_pte_table;
! p[5] = (unsigned long) invalid_pte_table;
! p[6] = (unsigned long) invalid_pte_table;
! p[7] = (unsigned long) invalid_pte_table;
p += 8;
}
--- 308,319 ----
while (p < end) {
! pmd_clear ((pmd_t*)&p[0]);
! pmd_clear ((pmd_t*)&p[1]);
! pmd_clear ((pmd_t*)&p[2]);
! pmd_clear ((pmd_t*)&p[3]);
! pmd_clear ((pmd_t*)&p[4]);
! pmd_clear ((pmd_t*)&p[5]);
! pmd_clear ((pmd_t*)&p[6]);
! pmd_clear ((pmd_t*)&p[7]);
p += 8;
}
***************
*** 314,341 ****
! void exc_pte_init (unsigned long page)
{
! unsigned long *p, *end;
!
! p = (unsigned long *) page;
! end = p + PTRS_PER_PTE;
!
! /* Initialize the page so each pte contains cache attributes
! that prevents any and all accesses. */
! while (p < end) {
! p[0] = (unsigned long) pgprot_val(PAGE_NONE);
! p[1] = (unsigned long) pgprot_val(PAGE_NONE);
! p[2] = (unsigned long) pgprot_val(PAGE_NONE);
! p[3] = (unsigned long) pgprot_val(PAGE_NONE);
! p[4] = (unsigned long) pgprot_val(PAGE_NONE);
! p[5] = (unsigned long) pgprot_val(PAGE_NONE);
! p[6] = (unsigned long) pgprot_val(PAGE_NONE);
! p[7] = (unsigned long) pgprot_val(PAGE_NONE);
! p += 8;
! }
}
/* mem_reserve(start, end, must_exist)
*
--- 321,348 ----
! /*
! * Clear every entry of a page table (so that pte_none() is true for each entry).
! * This prevents accesses using an appropriate PTE value
! * (see pgtable.h for details).
! */
! void pte_table_clear (pte_t *ptable)
{
! int i;
! for (i = 0; i < PTRS_PER_PTE/8; i++) {
! pte_clear (&ptable[0]);
! pte_clear (&ptable[1]);
! pte_clear (&ptable[2]);
! pte_clear (&ptable[3]);
! pte_clear (&ptable[4]);
! pte_clear (&ptable[5]);
! pte_clear (&ptable[6]);
! pte_clear (&ptable[7]);
! ptable += 8;
! }
}
+
/* mem_reserve(start, end, must_exist)
*
***************
*** 450,467 ****
{
unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
! unsigned long max_dma, low;
int i;
/* Initialize the entire pgd. */
pgd_init((unsigned long)swapper_pg_dir);
- memset((void *)invalid_pte_table, 0, sizeof(pte_t) * PTRS_PER_PTE);
- /*MIPS did:*/
- /* pgd_init((unsigned long)swapper_pg_dir); */
- /* pgd_init((unsigned long)swapper_pg_dir + PAGE_SIZE / 2); */
! /* Initialize exception_pte_table so each pte contains cache
! attributes that prevents any and all accesses. */
!
! exc_pte_init ((unsigned long)exception_pte_table);
#if 0
--- 457,467 ----
{
unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
! /*unsigned long max_dma, low;*/
int i;
/* Initialize the entire pgd. */
pgd_init((unsigned long)swapper_pg_dir);
! pte_table_clear (invalid_pte_table);
#if 0
***************
*** 482,490 ****
#endif
/*
* All pages are DMA-able so we put them all in the DMA zone.
*/
! zones_size[ZONE_DMA] = max_low_pfn;
! for (i = 1; i < MAX_NR_ZONES; i++)
zones_size[i] = 0;
free_area_init(zones_size);
--- 482,502 ----
#endif
/*
+ * Look in <linux/mmzone.h> for details. There are 3 zones:
+ * ZONE_DMA (DMA capable, directly mapped by kernel)
+ * ZONE_NORMAL (directly mapped by kernel)
+ * ZONE_HIGHMEM (dynamically mapped only)
+ *
* All pages are DMA-able so we put them all in the DMA zone.
+ * XTFIXME: memory past first physical 128 MB must be placed in ZONE_HIGHMEM
+ * (does Linux kernel support DMA capable memory in HIGHMEM?
+ * doesn't appear so...).
+ *
+ * NOTE: max_low_pfn is the same as max_pfn passed to init_bootmem()
+ * in memory_init() in this file. This is in turn dependent on
+ * the sysmem[] array initialized in init_arch() in setup.c.
*/
! for (i = 0; i < MAX_NR_ZONES; i++)
zones_size[i] = 0;
+ zones_size[ZONE_DMA] = max_low_pfn;
free_area_init(zones_size);
***************
*** 492,496 ****
--- 504,511 ----
+ #ifdef DEBUG_UNALIGNMENT_TEST
static void unaligned_test (void);
+ #endif
+
void debug_hooks (void)
Index: mmu.c
===================================================================
RCS file: /cvsroot/xtensa/linux/arch/xtensa/mm/mmu.c,v
retrieving revision 1.10
retrieving revision 1.11
diff -C2 -d -r1.10 -r1.11
*** mmu.c 31 Mar 2003 23:30:00 -0000 1.10
--- mmu.c 1 Apr 2003 22:28:34 -0000 1.11
***************
*** 8,14 ****
* for more details.
*
! * Copyright (C) 2001 Tensilica Inc.
* Authors: Joe Taylor <jo...@te..., jo...@ya...>
! * Marc Gauthier
*/
--- 8,14 ----
* for more details.
*
! * Copyright (C) 2001 - 2003 Tensilica Inc.
* Authors: Joe Taylor <jo...@te..., jo...@ya...>
! * Marc Gauthier <ma...@te...> <ma...@al...>
*/
***************
*** 212,231 ****
pgd = pgd_offset (mm, regs->excvaddr);
pmd = pmd_offset (pgd, regs->excvaddr);
-
- /* We want to map the page of PTEs into the Page Table, but if
- * the task doesn't yet have a mapping for the region, just
- * map the exception_pte_table for the region. Note that we
- * do not modify the mappings for the task (tsk->mm->pgd) itself.
- *
- * exception_pte_table contains PTEs that will generate access
- * faults, and the exception will end up in do_page_fault()
- * for further handling. */
-
pmdval = *pmd;
- if (pmd_none(pmdval))
- pmdval = __pmd((unsigned long)exception_pte_table);
/* read ptevaddr and convert to top of page-table page */
! vpnval = read_ptevaddr_register() & PAGE_MASK;
vpnval += WIRED_WAY_FOR_PAGE_TABLE; /* add way number for 'wdtlb' insn */
pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
--- 212,219 ----
pgd = pgd_offset (mm, regs->excvaddr);
pmd = pmd_offset (pgd, regs->excvaddr);
pmdval = *pmd;
/* read ptevaddr and convert to top of page-table page */
! vpnval = (read_ptevaddr_register() & PAGE_MASK);
vpnval += WIRED_WAY_FOR_PAGE_TABLE; /* add way number for 'wdtlb' insn */
pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
***************
*** 234,426 ****
-
- #define XT2000_MMU_DUMP 1
-
- #if (XT2000_MMU_DUMP == 1)
-
- #define USE_ITLB 0
- #define USE_DTLB 1
-
- struct way_config_t {
- int indicies;
- int indicies_log2;
- int pgsz_log2;
- int arf;
- };
-
- static struct way_config_t itlb[XCHAL_ITLB_WAYS] =
- {
- { XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES_LOG2),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, PAGESZ_LOG2_MIN),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ARF)
- },
- { XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES_LOG2),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, PAGESZ_LOG2_MIN),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ARF)
- },
- { XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES_LOG2),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, PAGESZ_LOG2_MIN),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ARF)
- },
- { XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES_LOG2),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, PAGESZ_LOG2_MIN),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ARF)
- },
- { XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES_LOG2),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, PAGESZ_LOG2_MIN),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ARF)
- },
- { XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES_LOG2),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, PAGESZ_LOG2_MIN),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ARF)
- },
- { XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES_LOG2),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, PAGESZ_LOG2_MIN),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ARF)
- }
- };
-
- static struct way_config_t dtlb[XCHAL_DTLB_WAYS] =
- {
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ARF)
- }
- };
-
- static inline char *way_type (int type)
- {
- return type ? "autorefill" : "non-autorefill";
- }
-
- void print_entry (struct way_config_t *way_info,
- unsigned int way,
- unsigned int index,
- unsigned int virtual,
- unsigned int translation)
- {
- char valid_chr;
- unsigned int va, pa, asid, ca;
-
- va = virtual & ~((1 << (way_info->pgsz_log2 + way_info->indicies_log2)) - 1);
- asid = virtual & ((1 << XCHAL_MMU_ASID_BITS) - 1);
- pa = translation & ~((1 << way_info->pgsz_log2) - 1);
- ca = translation & ((1 << XCHAL_MMU_CA_BITS) - 1);
- valid_chr = asid ? 'V' : 'I';
-
- /* Compute and incorporate the effect of the index bits on the
- * va. It's more useful for kernel debugging, since we always
- * want to know the effective va anyway. */
-
- va += index << way_info->pgsz_log2;
-
- printk ("\t[%d,%d] (%c) vpn 0x%.8x ppn 0x%.8x asid 0x%.2x am 0x%x\n",
- way, index, valid_chr, va, pa, asid, ca);
- }
-
- void print_itlb_entry (struct way_config_t *way_info, int way, int index)
- {
- print_entry (way_info, way, index,
- read_itlb_virtual (way + (index << way_info->pgsz_log2)),
- read_itlb_translation (way + (index << way_info->pgsz_log2)));
- }
-
- void print_dtlb_entry (struct way_config_t *way_info, int way, int index)
- {
- print_entry (way_info, way, index,
- read_dtlb_virtual (way + (index << way_info->pgsz_log2)),
- read_dtlb_translation (way + (index << way_info->pgsz_log2)));
- }
-
- void dump_itlb (void)
- {
- int way, index;
-
- printk ("\nITLB: ways = %d\n", XCHAL_ITLB_WAYS);
-
- for (way = 0; way < XCHAL_ITLB_WAYS; way++) {
- printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
- way, itlb[way].indicies,
- itlb[way].pgsz_log2, way_type(itlb[way].arf));
- for (index = 0; index < itlb[way].indicies; index++) {
- print_itlb_entry(&itlb[way], way, index);
- }
- }
- }
-
- void dump_dtlb (void)
- {
- int way, index;
-
- printk ("\nDTLB: ways = %d\n", XCHAL_DTLB_WAYS);
-
- for (way = 0; way < XCHAL_DTLB_WAYS; way++) {
- printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
- way, dtlb[way].indicies,
- dtlb[way].pgsz_log2, way_type(dtlb[way].arf));
- for (index = 0; index < dtlb[way].indicies; index++) {
- print_dtlb_entry(&dtlb[way], way, index);
- }
- }
- }
-
- void dump_tlbs (void)
- {
- dump_itlb();
- dump_dtlb();
- }
-
- #endif /* XT2000_MMU_DUMP == 1 */
--- 222,223 ----
|