From: BitKeeper B. <ri...@su...> - 2004-08-27 17:00:59
|
ChangeSet 1.1281.1.1, 2004/08/27 17:01:06+01:00, ka...@fr... Allow shadow p.t. code to do unsafe things with shadow locks held. arch/x86/shadow.c | 14 +++++++------- common/kernel.c | 16 ++++++++++++---- include/asm-x86/shadow.h | 21 +++++++++++---------- include/xen/spinlock.h | 11 +++++++++++ 4 files changed, 41 insertions(+), 21 deletions(-) diff -Nru a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c --- a/xen/arch/x86/shadow.c 2004-08-27 13:00:47 -04:00 +++ b/xen/arch/x86/shadow.c 2004-08-27 13:00:47 -04:00 @@ -443,7 +443,7 @@ domain_pause(d); synchronise_pagetables(~0UL); - spin_lock(&d->mm.shadow_lock); + shadow_lock(&d->mm); if ( cmd == DOM0_SHADOW_CONTROL_OP_OFF ) { @@ -470,7 +470,7 @@ rc = -EINVAL; } - spin_unlock(&d->mm.shadow_lock); + shadow_unlock(&d->mm); domain_unpause(d); @@ -620,19 +620,19 @@ // take the lock and reread gpte - spin_lock(¤t->mm.shadow_lock); + shadow_lock(m); if ( unlikely(__get_user(gpte, (unsigned long*)&linear_pg_table[va>>PAGE_SHIFT])) ) { SH_VVLOG("shadow_fault - EXIT: read gpte faulted" ); - spin_unlock(&m->shadow_lock); + shadow_unlock(m); return 0; // propagate to guest } if ( unlikely(!(gpte & _PAGE_PRESENT)) ) { SH_VVLOG("shadow_fault - EXIT: gpte not present (%lx)",gpte ); - spin_unlock(&m->shadow_lock); + shadow_unlock(m); return 0; // we're not going to be able to help } @@ -645,7 +645,7 @@ else { // write fault on RO page SH_VVLOG("shadow_fault - EXIT: write fault on RO page (%lx)",gpte ); - spin_unlock(&m->shadow_lock); + shadow_unlock(m); return 0; // propagate to guest // not clear whether we should set accessed bit here... } @@ -737,7 +737,7 @@ check_pagetable( current, current->mm.pagetable, "post-sf" ); - spin_unlock(&m->shadow_lock); + shadow_unlock(m); return 1; // let's try the faulting instruction again... diff -Nru a/xen/common/kernel.c b/xen/common/kernel.c --- a/xen/common/kernel.c 2004-08-27 13:00:47 -04:00 +++ b/xen/common/kernel.c 2004-08-27 13:00:47 -04:00 @@ -396,12 +396,20 @@ #ifndef NDEBUG -static int crit_count[NR_CPUS]; -static int crit_checking = 1; +static int crit_count[NR_CPUS], crit_checking_disabled[NR_CPUS]; void disable_criticalregion_checking(void) { - crit_checking = 0; + int cpu = smp_processor_id(); + ASSERT(crit_checking_disabled[cpu] >= 0); + crit_checking_disabled[cpu]++; +} + +void enable_criticalregion_checking(void) +{ + int cpu = smp_processor_id(); + crit_checking_disabled[cpu]--; + ASSERT(crit_checking_disabled[cpu] >= 0); } void criticalregion_enter(void) @@ -421,7 +429,7 @@ void ASSERT_no_criticalregion(void) { int cpu = smp_processor_id(); - if ( (crit_count[cpu] == 0) || !crit_checking ) + if ( (crit_count[cpu] == 0) || crit_checking_disabled[cpu] ) return; disable_criticalregion_checking(); ASSERT(crit_count[cpu] >= 0); /* -ve count is a special kind of bogus! */ diff -Nru a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h --- a/xen/include/asm-x86/shadow.h 2004-08-27 13:00:47 -04:00 +++ b/xen/include/asm-x86/shadow.h 2004-08-27 13:00:47 -04:00 @@ -26,6 +26,8 @@ #define shadow_mode(_d) ((_d)->mm.shadow_mode) #define shadow_lock_init(_d) spin_lock_init(&(_d)->mm.shadow_lock) +#define shadow_lock(_m) spin_lock_nochecking(&(_m)->shadow_lock) +#define shadow_unlock(_m) spin_unlock_nochecking(&(_m)->shadow_lock) extern void shadow_mode_init(void); extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc); @@ -134,9 +136,9 @@ int rc; ASSERT(local_irq_is_enabled()); //if(spin_is_locked(&m->shadow_lock)) printk("+"); - spin_lock(&m->shadow_lock); + shadow_lock(m); rc = __mark_dirty( m, mfn ); - spin_unlock(&m->shadow_lock); + shadow_unlock(m); return rc; } @@ -388,20 +390,21 @@ ASSERT(local_irq_is_enabled()); //if(spin_is_locked(&m->shadow_lock)) printk("*"); - spin_lock(&m->shadow_lock); + shadow_lock(m); if( m->shadow_mode == SHM_logdirty ) __mark_dirty( m, gpfn ); res = __shadow_status( m, gpfn ); - if (!res) spin_unlock(&m->shadow_lock); + if (!res) + shadow_unlock(m); return res; } static inline void put_shadow_status( struct mm_struct *m ) { - spin_unlock(&m->shadow_lock); + shadow_unlock(m); } @@ -583,11 +586,9 @@ if ( unlikely(mm->shadow_mode) ) { ASSERT(local_irq_is_enabled()); - spin_lock(&mm->shadow_lock); - - __shadow_mk_pagetable( mm ); - - spin_unlock(&mm->shadow_lock); + shadow_lock(mm); + __shadow_mk_pagetable(mm); + shadow_unlock(mm); } SH_VVLOG("leaving shadow_mk_pagetable( gptbase=%08lx, mode=%d ) sh=%08lx", diff -Nru a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h --- a/xen/include/xen/spinlock.h 2004-08-27 13:00:47 -04:00 +++ b/xen/include/xen/spinlock.h 2004-08-27 13:00:47 -04:00 @@ -78,6 +78,7 @@ extern void criticalregion_exit(void); extern void ASSERT_no_criticalregion(void); extern void disable_criticalregion_checking(void); +extern void enable_criticalregion_checking(void); #define spin_lock(_lock) \ do { criticalregion_enter(); _raw_spin_lock(_lock); } while (0) @@ -111,6 +112,7 @@ #define ASSERT_no_criticalregion() ((void)0) #define disable_criticalregion_checking() ((void)0) +#define enable_criticalregion_checking() ((void)0) #define spin_lock(_lock) _raw_spin_lock(_lock) #define spin_trylock(_lock) _raw_spin_trylock(_lock) @@ -123,5 +125,14 @@ #define write_unlock(_lock) _raw_write_unlock(_lock) #endif + +/* + * Use these if you have taken special care to ensure that certain unsafe + * things can occur in your critical region (e.g., faults, user-space + * accesses). + */ +#define spin_lock_nochecking(_lock) _raw_spin_lock(_lock) +#define spin_trylock_nochecking(_lock) _raw_spin_trylock(_lock) +#define spin_unlock_nochecking(_lock) _raw_spin_unlock(_lock) #endif /* __SPINLOCK_H__ */ |