From: Yang, S. <she...@in...> - 2008-04-18 09:24:44
Attachments:
0003-KVM-MMU-Add-EPT-support.patch
|
From cb851671421832d37c7d90976b603b59a5c75c79 Mon Sep 17 00:00:00 2001 From: Sheng Yang <she...@in...> Date: Fri, 18 Apr 2008 17:05:06 +0800 Subject: [PATCH 3/5] KVM: MMU: Add EPT support Enable kvm_set_spte() to generate EPT entries. Signed-off-by: Sheng Yang <she...@in...> --- arch/x86/kvm/mmu.c | 44 ++++++++++++++++++++++++++++++++++---------- arch/x86/kvm/x86.c | 3 +++ include/asm-x86/kvm_host.h | 3 +++ 3 files changed, 40 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 108886d..1828837 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -161,6 +161,12 @@ static struct kmem_cache *mmu_page_header_cache; static u64 __read_mostly shadow_trap_nonpresent_pte; static u64 __read_mostly shadow_notrap_nonpresent_pte; +static u64 __read_mostly shadow_base_present_pte; +static u64 __read_mostly shadow_nx_mask; +static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ +static u64 __read_mostly shadow_user_mask; +static u64 __read_mostly shadow_accessed_mask; +static u64 __read_mostly shadow_dirty_mask; void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) { @@ -169,6 +175,23 @@ void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) } EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes); +void kvm_mmu_set_base_ptes(u64 base_pte) +{ + shadow_base_present_pte = base_pte; +} +EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes); + +void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, + u64 dirty_mask, u64 nx_mask, u64 x_mask) +{ + shadow_user_mask = user_mask; + shadow_accessed_mask = accessed_mask; + shadow_dirty_mask = dirty_mask; + shadow_nx_mask = nx_mask; + shadow_x_mask = x_mask; +} +EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); + static int is_write_protection(struct kvm_vcpu *vcpu) { return vcpu->arch.cr0 & X86_CR0_WP; @@ -207,7 +230,7 @@ static int is_writeble_pte(unsigned long pte) static int is_dirty_pte(unsigned long pte) { - return pte & PT_DIRTY_MASK; + return pte & shadow_dirty_mask; } static int is_rmap_pte(u64 pte) @@ -522,7 +545,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) return; sp = page_header(__pa(spte)); pfn = spte_to_pfn(*spte); - if (*spte & PT_ACCESSED_MASK) + if (*spte & shadow_accessed_mask) kvm_set_pfn_accessed(pfn); if (is_writeble_pte(*spte)) kvm_release_pfn_dirty(pfn); @@ -1048,17 +1071,18 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, * whether the guest actually used the pte (in order to detect * demand paging). */ - spte = PT_PRESENT_MASK | PT_DIRTY_MASK; + spte = shadow_base_present_pte | shadow_dirty_mask; if (!speculative) pte_access |= PT_ACCESSED_MASK; if (!dirty) pte_access &= ~ACC_WRITE_MASK; - if (!(pte_access & ACC_EXEC_MASK)) - spte |= PT64_NX_MASK; - - spte |= PT_PRESENT_MASK; + if (pte_access & ACC_EXEC_MASK) { + if (shadow_x_mask) + spte |= shadow_x_mask; + } else if (shadow_nx_mask) + spte |= shadow_nx_mask; if (pte_access & ACC_USER_MASK) - spte |= PT_USER_MASK; + spte |= shadow_user_mask; if (largepage) spte |= PT_PAGE_SIZE_MASK; @@ -1164,7 +1188,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, } table[index] = __pa(new_table->spt) | PT_PRESENT_MASK - | PT_WRITABLE_MASK | PT_USER_MASK; + | PT_WRITABLE_MASK | shadow_user_mask; } table_addr = table[index] & PT64_BASE_ADDR_MASK; } @@ -1608,7 +1632,7 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu) { u64 *spte = vcpu->arch.last_pte_updated; - return !!(spte && (*spte & PT_ACCESSED_MASK)); + return !!(spte && (*spte & shadow_accessed_mask)); } static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0ce5563..0735efb 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2417,6 +2417,9 @@ int kvm_arch_init(void *opaque) kvm_x86_ops = ops; kvm_mmu_set_nonpresent_ptes(0ull, 0ull); + kvm_mmu_set_base_ptes(PT_PRESENT_MASK); + kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, + PT_DIRTY_MASK, PT64_NX_MASK, 0); return 0; out: diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index 31aa7d6..9f62773 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h @@ -432,6 +432,9 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu); int kvm_mmu_create(struct kvm_vcpu *vcpu); int kvm_mmu_setup(struct kvm_vcpu *vcpu); void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); +void kvm_mmu_set_base_ptes(u64 base_pte); +void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, + u64 dirty_mask, u64 nx_mask, u64 x_mask); int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); -- 1.5.4.5 |
From: Yang, S. <she...@in...> - 2008-04-25 08:20:53
Attachments:
0004-KVM-MMU-Add-EPT-support.patch
|
From 239f38236196c2321989c64d7c61ff28490b3f00 Mon Sep 17 00:00:00 2001 From: Sheng Yang <she...@in...> Date: Fri, 25 Apr 2008 21:13:50 +0800 Subject: [PATCH 4/8] KVM: MMU: Add EPT support Enable kvm_set_spte() to generate EPT entries. Signed-off-by: Sheng Yang <she...@in...> --- arch/x86/kvm/mmu.c | 47 ++++++++++++++++++++++++++++++++----------- arch/x86/kvm/x86.c | 3 ++ include/asm-x86/kvm_host.h | 3 ++ 3 files changed, 41 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index bcfaf7e..c28a36b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -152,6 +152,12 @@ static struct kmem_cache *mmu_page_header_cache; static u64 __read_mostly shadow_trap_nonpresent_pte; static u64 __read_mostly shadow_notrap_nonpresent_pte; +static u64 __read_mostly shadow_base_present_pte; +static u64 __read_mostly shadow_nx_mask; +static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ +static u64 __read_mostly shadow_user_mask; +static u64 __read_mostly shadow_accessed_mask; +static u64 __read_mostly shadow_dirty_mask; void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) { @@ -160,6 +166,23 @@ void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) } EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes); +void kvm_mmu_set_base_ptes(u64 base_pte) +{ + shadow_base_present_pte = base_pte; +} +EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes); + +void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, + u64 dirty_mask, u64 nx_mask, u64 x_mask) +{ + shadow_user_mask = user_mask; + shadow_accessed_mask = accessed_mask; + shadow_dirty_mask = dirty_mask; + shadow_nx_mask = nx_mask; + shadow_x_mask = x_mask; +} +EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); + static int is_write_protection(struct kvm_vcpu *vcpu) { return vcpu->arch.cr0 & X86_CR0_WP; @@ -198,7 +221,7 @@ static int is_writeble_pte(unsigned long pte) static int is_dirty_pte(unsigned long pte) { - return pte & PT_DIRTY_MASK; + return pte & shadow_dirty_mask; } static int is_rmap_pte(u64 pte) @@ -513,7 +536,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) return; sp = page_header(__pa(spte)); pfn = spte_to_pfn(*spte); - if (*spte & PT_ACCESSED_MASK) + if (*spte & shadow_accessed_mask) kvm_set_pfn_accessed(pfn); if (is_writeble_pte(*spte)) kvm_release_pfn_dirty(pfn); @@ -1039,17 +1062,17 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, * whether the guest actually used the pte (in order to detect * demand paging). */ - spte = PT_PRESENT_MASK | PT_DIRTY_MASK; + spte = shadow_base_present_pte | shadow_dirty_mask; if (!speculative) pte_access |= PT_ACCESSED_MASK; if (!dirty) pte_access &= ~ACC_WRITE_MASK; - if (!(pte_access & ACC_EXEC_MASK)) - spte |= PT64_NX_MASK; - - spte |= PT_PRESENT_MASK; + if (pte_access & ACC_EXEC_MASK) + spte |= shadow_x_mask; + else + spte |= shadow_nx_mask; if (pte_access & ACC_USER_MASK) - spte |= PT_USER_MASK; + spte |= shadow_user_mask; if (largepage) spte |= PT_PAGE_SIZE_MASK; @@ -1155,7 +1178,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, } table[index] = __pa(new_table->spt) | PT_PRESENT_MASK - | PT_WRITABLE_MASK | PT_USER_MASK; + | PT_WRITABLE_MASK | shadow_user_mask; } table_addr = table[index] & PT64_BASE_ADDR_MASK; } @@ -1343,7 +1366,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, spin_lock(&vcpu->kvm->mmu_lock); kvm_mmu_free_some_pages(vcpu); r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK, - largepage, gfn, pfn, TDP_ROOT_LEVEL); + largepage, gfn, pfn, kvm_x86_ops->get_tdp_level()); spin_unlock(&vcpu->kvm->mmu_lock); return r; @@ -1450,7 +1473,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) context->page_fault = tdp_page_fault; context->free = nonpaging_free; context->prefetch_page = nonpaging_prefetch_page; - context->shadow_root_level = TDP_ROOT_LEVEL; + context->shadow_root_level = kvm_x86_ops->get_tdp_level(); context->root_hpa = INVALID_PAGE; if (!is_paging(vcpu)) { @@ -1599,7 +1622,7 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu) { u64 *spte = vcpu->arch.last_pte_updated; - return !!(spte && (*spte & PT_ACCESSED_MASK)); + return !!(spte && (*spte & shadow_accessed_mask)); } static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0ce5563..0735efb 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2417,6 +2417,9 @@ int kvm_arch_init(void *opaque) kvm_x86_ops = ops; kvm_mmu_set_nonpresent_ptes(0ull, 0ull); + kvm_mmu_set_base_ptes(PT_PRESENT_MASK); + kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, + PT_DIRTY_MASK, PT64_NX_MASK, 0); return 0; out: diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index 65b27c9..715f7b9 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h @@ -433,6 +433,9 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu); int kvm_mmu_create(struct kvm_vcpu *vcpu); int kvm_mmu_setup(struct kvm_vcpu *vcpu); void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); +void kvm_mmu_set_base_ptes(u64 base_pte); +void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, + u64 dirty_mask, u64 nx_mask, u64 x_mask); int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); -- 1.5.4.5 |
From: Avi K. <av...@qu...> - 2008-04-27 09:38:20
|
Yang, Sheng wrote: > From 239f38236196c2321989c64d7c61ff28490b3f00 Mon Sep 17 00:00:00 2001 > From: Sheng Yang <she...@in...> > Date: Fri, 25 Apr 2008 21:13:50 +0800 > Subject: [PATCH 4/8] KVM: MMU: Add EPT support > > Enable kvm_set_spte() to generate EPT entries. > > > @@ -1155,7 +1178,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, > int write, > } > > table[index] = __pa(new_table->spt) | PT_PRESENT_MASK > - | PT_WRITABLE_MASK | PT_USER_MASK; > + | PT_WRITABLE_MASK | shadow_user_mask; > } > Shouldn't we have shadow_x_mask here as well? [and for non-EPT, shadow_accessed_mask and shadow_dirty_mask, but that's a different story]. -- error compiling committee.c: too many arguments to function |
From: Anthony L. <an...@co...> - 2008-04-18 13:30:10
|
Yang, Sheng wrote: > @@ -1048,17 +1071,18 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 > *shadow_pte, > * whether the guest actually used the pte (in order to detect > * demand paging). > */ > - spte = PT_PRESENT_MASK | PT_DIRTY_MASK; > + spte = shadow_base_present_pte | shadow_dirty_mask; > if (!speculative) > pte_access |= PT_ACCESSED_MASK; > if (!dirty) > pte_access &= ~ACC_WRITE_MASK; > - if (!(pte_access & ACC_EXEC_MASK)) > - spte |= PT64_NX_MASK; > - > - spte |= PT_PRESENT_MASK; > + if (pte_access & ACC_EXEC_MASK) { > + if (shadow_x_mask) > + spte |= shadow_x_mask; > + } else if (shadow_nx_mask) > + spte |= shadow_nx_mask; > This looks like it may be a bug. The old behavior sets NX if (pte_access & ACC_EXEC_MASK). The new behavior unconditionally sets NX and never sets PRESENT. Also, the if (shadow_x_mask) checks are unnecessary. spte |= 0 is a nop. > if (pte_access & ACC_USER_MASK) > - spte |= PT_USER_MASK; > + spte |= shadow_user_mask; > if (largepage) > spte |= PT_PAGE_SIZE_MASK; > Regards, Anthony Liguori |
From: Yang, S. <she...@in...> - 2008-04-18 15:12:03
|
On Friday 18 April 2008 21:30:14 Anthony Liguori wrote: > Yang, Sheng wrote: > > @@ -1048,17 +1071,18 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, > > u64 *shadow_pte, > > * whether the guest actually used the pte (in order to detect > > * demand paging). > > */ > > - spte = PT_PRESENT_MASK | PT_DIRTY_MASK; > > + spte = shadow_base_present_pte | shadow_dirty_mask; > > if (!speculative) > > pte_access |= PT_ACCESSED_MASK; > > if (!dirty) > > pte_access &= ~ACC_WRITE_MASK; > > - if (!(pte_access & ACC_EXEC_MASK)) > > - spte |= PT64_NX_MASK; > > - > > - spte |= PT_PRESENT_MASK; > > + if (pte_access & ACC_EXEC_MASK) { > > + if (shadow_x_mask) > > + spte |= shadow_x_mask; > > + } else if (shadow_nx_mask) > > + spte |= shadow_nx_mask; > > This looks like it may be a bug. The old behavior sets NX if > (pte_access & ACC_EXEC_MASK). The new behavior unconditionally sets NX > and never sets PRESENT. Also, the if (shadow_x_mas k) checks are > unnecessary. spte |= 0 is a nop. Thanks for the comment! I realized two judgments of shadow_nx/x_mask is unnecessary... In fact, the correct behavior is either set shadow_x_mask or shadow_nx_mask, may be there is a better approach for this. The logic assured by program itself is always safer. But I will remove the redundant code at first. But I don't think it's a bug. The old behavior set NX if (!(pte_access & ACC_EXEC_MASK)), the same as the new one. And I also curious about the PRESENT bit. You see, the PRESENT bit was set at the beginning of the code, and I really don't know why the duplicate one exists there... > > > if (pte_access & ACC_USER_MASK) > > - spte |= PT_USER_MASK; > > + spte |= shadow_user_mask; > > if (largepage) > > spte |= PT_PAGE_SIZE_MASK; -- Thanks Yang, Sheng > > Regards, > > Anthony Liguori |
From: Anthony L. <an...@co...> - 2008-04-18 15:54:03
|
Yang, Sheng wrote: > On Friday 18 April 2008 21:30:14 Anthony Liguori wrote: > >> Yang, Sheng wrote: >> >>> @@ -1048,17 +1071,18 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, >>> u64 *shadow_pte, >>> * whether the guest actually used the pte (in order to detect >>> * demand paging). >>> */ >>> - spte = PT_PRESENT_MASK | PT_DIRTY_MASK; >>> + spte = shadow_base_present_pte | shadow_dirty_mask; >>> if (!speculative) >>> pte_access |= PT_ACCESSED_MASK; >>> if (!dirty) >>> pte_access &= ~ACC_WRITE_MASK; >>> - if (!(pte_access & ACC_EXEC_MASK)) >>> - spte |= PT64_NX_MASK; >>> - >>> - spte |= PT_PRESENT_MASK; >>> + if (pte_access & ACC_EXEC_MASK) { >>> + if (shadow_x_mask) >>> + spte |= shadow_x_mask; >>> + } else if (shadow_nx_mask) >>> + spte |= shadow_nx_mask; >>> >> This looks like it may be a bug. The old behavior sets NX if >> (pte_access & ACC_EXEC_MASK). The new behavior unconditionally sets NX >> and never sets PRESENT. Also, the if (shadow_x_mas k) checks are >> unnecessary. spte |= 0 is a nop. >> > > Thanks for the comment! I realized two judgments of shadow_nx/x_mask is > unnecessary... In fact, the correct behavior is either set shadow_x_mask or > shadow_nx_mask, may be there is a better approach for this. The logic assured > by program itself is always safer. But I will remove the redundant code at > first. > > But I don't think it's a bug. The old behavior set NX if (!(pte_access & > ACC_EXEC_MASK)), the same as the new one. The new behavior sets NX regardless of whether (pte_access & ACC_EXEC_MASK). Is the desired change to unconditionally set NX? > And I also curious about the > PRESENT bit. You see, the PRESENT bit was set at the beginning of the code, > and I really don't know why the duplicate one exists there... > Looking at the code, you appear to be right. In the future, I think you should separate any cleanups (like removing the redundant setting of PRESENT) into a separate patch and stick to just programmatic changes of PT_USER_MASK => shadow_user_mask, etc. in this patch. That makes it a lot easier to review correctness. Regards, Anthony Liguori >>> if (pte_access & ACC_USER_MASK) >>> - spte |= PT_USER_MASK; >>> + spte |= shadow_user_mask; >>> if (largepage) >>> spte |= PT_PAGE_SIZE_MASK; >>> > > |
From: Yang, S. <she...@in...> - 2008-04-20 13:46:35
|
On Friday 18 April 2008 23:54:04 Anthony Liguori wrote: > Yang, Sheng wrote: > > On Friday 18 April 2008 21:30:14 Anthony Liguori wrote: > >> Yang, Sheng wrote: > >>> @@ -1048,17 +1071,18 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, > >>> u64 *shadow_pte, > >>> * whether the guest actually used the pte (in order to detect > >>> * demand paging). > >>> */ > >>> - spte = PT_PRESENT_MASK | PT_DIRTY_MASK; > >>> + spte = shadow_base_present_pte | shadow_dirty_mask; > >>> if (!speculative) > >>> pte_access |= PT_ACCESSED_MASK; > >>> if (!dirty) > >>> pte_access &= ~ACC_WRITE_MASK; > >>> - if (!(pte_access & ACC_EXEC_MASK)) > >>> - spte |= PT64_NX_MASK; > >>> - > >>> - spte |= PT_PRESENT_MASK; > >>> + if (pte_access & ACC_EXEC_MASK) { > >>> + if (shadow_x_mask) > >>> + spte |= shadow_x_mask; > >>> + } else if (shadow_nx_mask) > >>> + spte |= shadow_nx_mask; > >> > >> This looks like it may be a bug. The old behavior sets NX if > >> (pte_access & ACC_EXEC_MASK). The new behavior unconditionally sets NX > >> and never sets PRESENT. Also, the if (shadow_x_mas k) checks are > >> unnecessary. spte |= 0 is a nop. > > > > Thanks for the comment! I realized two judgments of shadow_nx/x_mask is > > unnecessary... In fact, the correct behavior is either set shadow_x_mask > > or shadow_nx_mask, may be there is a better approach for this. The logic > > assured by program itself is always safer. But I will remove the > > redundant code at first. > > > > But I don't think it's a bug. The old behavior set NX if (!(pte_access & > > ACC_EXEC_MASK)), the same as the new one. > > The new behavior sets NX regardless of whether (pte_access & > ACC_EXEC_MASK). Is the desired change to unconditionally set NX? Oh, I may see the point... shadow_x_mask != shadow_nx_mask. the old behavior was: if (!(pte_access & ACC_EXEC_MASK)) spte |= PT64_NX_MASK; the new behavior is: if (pte_access & ACC_EXEC_MASK) { spte |= shadow_x_mask; } else spte |= shadow_nx_mask; For current behavior, kvm_arch_init() got: kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, PT_DIRTY_MASK, PT64_NX_MASK, 0); which means shadow_nx_mask = PT64_NX_MASK, and shadow_x_mask = 0 (NX means not executable, and X means executable). In patch 5/6, EPT got: kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK, VMX_EPT_FAKE_DIRTY_MASK, 0ull, VMX_EPT_EXECUTABLE_MASK); which means, shadow_nx_mask = 0, and shadow_x_mask = VMX_EPT_EXECUTABLE_MASK So, when shadow enabled, and (!(pte_access & ACC_EXEC_MASK)), then spte |= shadow_nx_mask = PT64_NX_MASK (no change would happen when the condition is not satisfied). When EPT enabled, and (pte_access & ACC_EXEC_MASK), then spte |= shadow_x_mask = VMX_EPT_EXECUTABLE_MASK (no change would happen when condition is not satisfied). They are two different bit and mutual exclusive ones. Maybe there are some better way to get their meaning more clearly... > > > And I also curious about the > > PRESENT bit. You see, the PRESENT bit was set at the beginning of the > > code, and I really don't know why the duplicate one exists there... > > Looking at the code, you appear to be right. In the future, I think you > should separate any cleanups (like removing the redundant setting of > PRESENT) into a separate patch and stick to just programmatic changes of > PT_USER_MASK => shadow_user_mask, etc. in this patch. That makes it a > lot easier to review correctness. Thanks for the advice, it's important to separate the cleanups. I will get it done more properly next time. -- Thanks Yang, Sheng > > Regards, > > Anthony Liguori > > >>> if (pte_access & ACC_USER_MASK) > >>> - spte |= PT_USER_MASK; > >>> + spte |= shadow_user_mask; > >>> if (largepage) > >>> spte |= PT_PAGE_SIZE_MASK; |