From: Zhang, X. <xia...@in...> - 2008-01-31 10:34:09
|
From: Zhang Xiantao <xia...@in...> Date: Tue, 29 Jan 2008 14:30:36 +0800 Subject: [PATCH] kvm/ia64: Add interruption vector table for vmm. vmm_ivt.S includes an ivt for vmm use. Signed-off-by: Anthony Xu <ant...@in...> Signed-off-by: Xiantao Zhang <xia...@in...> --- arch/ia64/kvm/vmm_ivt.S | 1423 +++++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 1423 insertions(+), 0 deletions(-) create mode 100644 arch/ia64/kvm/vmm_ivt.S diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S new file mode 100644 index 0000000..5ca8651 --- /dev/null +++ b/arch/ia64/kvm/vmm_ivt.S @@ -0,0 +1,1423 @@ +/* + * /ia64/kvm_ivt.S + * + * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co + * Stephane Eranian <er...@hp...> + * David Mosberger <da...@hp...> + * Copyright (C) 2000, 2002-2003 Intel Co + * Asit Mallick <asi...@in...> + * Suresh Siddha <sur...@in...> + * Kenneth Chen <ken...@in...> + * Fenghua Yu <fen...@in...> + * + * + * 00/08/23 Asit Mallick <asi...@in...> TLB handling + * for SMP + * 00/12/20 David Mosberger-Tang <da...@hp...> DTLB/ITLB + * handler now uses virtual PT. + * + * 07/6/20 Xuefei Xu (Anthony Xu) (ant...@in...) + * Supporting Intel virtualization architecture + * + */ + +/* + * This file defines the interruption vector table used by the CPU. + * It does not include one entry per possible cause of interruption. + * + * The first 20 entries of the table contain 64 bundles each while the + * remaining 48 entries contain only 16 bundles each. + * + * The 64 bundles are used to allow inlining the whole handler for + * critical + * interruptions like TLB misses. + * + * For each entry, the comment is as follows: + * + * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss + * (12,51) + * entry offset ----/ / / / + * / + * entry number ---------/ / / + * / + * size of the entry -------------/ / + * / + * vector name -------------------------------------/ + * / + * interruptions triggering this vector + * ----------------------/ + * + * The table is 32KB in size and must be aligned on 32KB + * boundary. + * (The CPU ignores the 15 lower bits of the address) + * + * Table is based upon EAS2.6 (Oct 1999) + */ + + +#include <asm/asmmacro.h> +#include <asm/cache.h> +#include <asm/pgtable.h> + +#include "asm-offsets.h" +#include "vcpu.h" +#include "kvm_minstate.h" +#include "vti.h" + +#if 1 +# define PSR_DEFAULT_BITS psr.ac +#else +# define PSR_DEFAULT_BITS 0 +#endif + + +#define KVM_FAULT(n) \ + kvm_fault_##n:; \ + mov r19=3Dn;; \ + br.sptk.many kvm_fault_##n; \ + ;; \ + + +#define KVM_REFLECT(n) \ + mov r31=3Dpr; \ + mov r19=3Dn; /* prepare to save predicates */ \ + mov r29=3Dcr.ipsr; \ + ;; \ + tbit.z p6,p7=3Dr29,IA64_PSR_VM_BIT; \ +(p7)br.sptk.many kvm_dispatch_reflection; \ + br.sptk.many kvm_panic; \ + + +GLOBAL_ENTRY(kvm_panic) + br.sptk.many kvm_panic + ;; +END(kvm_panic) + + + + + + .section .text.ivt,"ax" + + .align 32768 // align on 32KB boundary + .global kvm_ia64_ivt +kvm_ia64_ivt: +/////////////////////////////////////////////////////////////// +// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) +ENTRY(kvm_vhpt_miss) + KVM_FAULT(0) +END(kvm_vhpt_miss) + + + .org kvm_ia64_ivt+0x400 +//////////////////////////////////////////////////////////////// +// 0x0400 Entry 1 (size 64 bundles) ITLB (21) +ENTRY(kvm_itlb_miss) + mov r31 =3D pr + mov r29=3Dcr.ipsr; + ;; + tbit.z p6,p7=3Dr29,IA64_PSR_VM_BIT; + (p6) br.sptk kvm_alt_itlb_miss + mov r19 =3D 1 + br.sptk kvm_itlb_miss_dispatch + KVM_FAULT(1); +END(kvm_itlb_miss) + + .org kvm_ia64_ivt+0x0800 +////////////////////////////////////////////////////////////////// +// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) +ENTRY(kvm_dtlb_miss) + mov r31 =3D pr + mov r29=3Dcr.ipsr; + ;; + tbit.z p6,p7=3Dr29,IA64_PSR_VM_BIT; +(p6)br.sptk kvm_alt_dtlb_miss + br.sptk kvm_dtlb_miss_dispatch +END(kvm_dtlb_miss) + + .org kvm_ia64_ivt+0x0c00 +//////////////////////////////////////////////////////////////////// +// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) +ENTRY(kvm_alt_itlb_miss) + mov r16=3Dcr.ifa // get address that caused the TLB miss + ;; + movl r17=3DPAGE_KERNEL + mov r24=3Dcr.ipsr + movl r19=3D(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) + ;; + and r19=3Dr19,r16 // clear ed, reserved bits, and PTE control bits + ;; + or r19=3Dr17,r19 // insert PTE control bits into r19 + ;; + movl r20=3DIA64_GRANULE_SHIFT<<2 + ;; + mov cr.itir=3Dr20 + ;; + itc.i r19 // insert the TLB entry + mov pr=3Dr31,-1 + rfi +END(kvm_alt_itlb_miss) + + .org kvm_ia64_ivt+0x1000 +///////////////////////////////////////////////////////////////////// +// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) +ENTRY(kvm_alt_dtlb_miss) + mov r16=3Dcr.ifa // get address that caused the TLB miss + ;; + movl r17=3DPAGE_KERNEL + movl r19=3D(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) + mov r24=3Dcr.ipsr + ;; + and r19=3Dr19,r16 // clear ed, reserved bits, and PTE control bits + ;; + or r19=3Dr19,r17 // insert PTE control bits into r19 + ;; + movl r20=3DIA64_GRANULE_SHIFT<<2 + ;; + mov cr.itir=3Dr20 + ;; + itc.d r19 // insert the TLB entry + mov pr=3Dr31,-1 + rfi +END(kvm_alt_dtlb_miss) + + .org kvm_ia64_ivt+0x1400 +////////////////////////////////////////////////////////////////////// +// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) +ENTRY(kvm_nested_dtlb_miss) + KVM_FAULT(5) +END(kvm_nested_dtlb_miss) + + .org kvm_ia64_ivt+0x1800 +///////////////////////////////////////////////////////////////////// +// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) +ENTRY(kvm_ikey_miss) + KVM_REFLECT(6) +END(kvm_ikey_miss) + + .org kvm_ia64_ivt+0x1c00 +///////////////////////////////////////////////////////////////////// +// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) +ENTRY(kvm_dkey_miss) + KVM_REFLECT(7) +END(kvm_dkey_miss) + + .org kvm_ia64_ivt+0x2000 +//////////////////////////////////////////////////////////////////// +// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) +ENTRY(kvm_dirty_bit) + KVM_REFLECT(8) +END(kvm_dirty_bit) + + .org kvm_ia64_ivt+0x2400 +//////////////////////////////////////////////////////////////////// +// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) +ENTRY(kvm_iaccess_bit) + KVM_REFLECT(9) +END(kvm_iaccess_bit) + + .org kvm_ia64_ivt+0x2800 +/////////////////////////////////////////////////////////////////// +// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) +ENTRY(kvm_daccess_bit) + KVM_REFLECT(10) +END(kvm_daccess_bit) + + .org kvm_ia64_ivt+0x2c00 +///////////////////////////////////////////////////////////////// +// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) +ENTRY(kvm_break_fault) + mov r31=3Dpr + mov r19=3D11 + mov r29=3Dcr.ipsr + ;; + KVM_SAVE_MIN_WITH_COVER_R19 + ;; + alloc r14=3Dar.pfs,0,0,4,0 // now it's safe (must be first in insn group!) + mov out0=3Dcr.ifa + mov out2=3Dcr.isr // FIXME: pity to make this slow access twice + mov out3=3Dcr.iim // FIXME: pity to make this slow access twice + adds r3=3D8,r2 // set up second base pointer + ;; + ssm psr.ic + ;; + srlz.i // guarantee that interruption collection is on + ;; + //(p15)ssm psr.i // restore psr.i + addl r14=3D@gprel(ia64_leave_hypervisor),gp + ;; + KVM_SAVE_REST + mov rp=3Dr14 + ;; + adds out1=3D16,sp + br.call.sptk.many b6=3Dkvm_ia64_handle_break + ;; +END(kvm_break_fault) + + .org kvm_ia64_ivt+0x3000 +///////////////////////////////////////////////////////////////// +// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) +ENTRY(kvm_interrupt) + mov r31=3Dpr // prepare to save predicates + mov r19=3D12 + mov r29=3Dcr.ipsr + ;; + tbit.z p6,p7=3Dr29,IA64_PSR_VM_BIT + tbit.z p0,p15=3Dr29,IA64_PSR_I_BIT + ;; +(p7) br.sptk kvm_dispatch_interrupt + ;; + mov r27=3Dar.rsc /* M */ + mov r20=3Dr1 /* A */ + mov r25=3Dar.unat /* M */ + mov r26=3Dar.pfs /* I */ + mov r28=3Dcr.iip /* M */ + cover /* B (or nothing) */ + ;; + mov r1=3Dsp + ;; + invala /* M */ + mov r30=3Dcr.ifs + ;; + addl r1=3D-VMM_PT_REGS_SIZE,r1 + ;; + adds r17=3D2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */ + adds r16=3DPT(CR_IPSR),r1 + ;; + lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES + st8 [r16]=3Dr29 /* save cr.ipsr */ + ;; + lfetch.fault.excl.nt1 [r17] + mov r29=3Db0 + ;; + adds r16=3DPT(R8),r1 /* initialize first base pointer */ + adds r17=3DPT(R9),r1 /* initialize second base pointer */ + mov r18=3Dr0 /* make sure r18 isn't NaT */ + ;; +.mem.offset 0,0; st8.spill [r16]=3Dr8,16 +.mem.offset 8,0; st8.spill [r17]=3Dr9,16 + ;; +.mem.offset 0,0; st8.spill [r16]=3Dr10,24 +.mem.offset 8,0; st8.spill [r17]=3Dr11,24 + ;; + st8 [r16]=3Dr28,16 /* save cr.iip */ + st8 [r17]=3Dr30,16 /* save cr.ifs */ + mov r8=3Dar.fpsr /* M */ + mov r9=3Dar.csd + mov r10=3Dar.ssd + movl r11=3DFPSR_DEFAULT /* L-unit */ + ;; + st8 [r16]=3Dr25,16 /* save ar.unat */ + st8 [r17]=3Dr26,16 /* save ar.pfs */ + shl r18=3Dr18,16 /* compute ar.rsc to be used for "loadrs" */ + ;; + st8 [r16]=3Dr27,16 /* save ar.rsc */ + adds r17=3D16,r17 /* skip over ar_rnat field */ + ;; + st8 [r17]=3Dr31,16 /* save predicates */ + adds r16=3D16,r16 /* skip over ar_bspstore field */ + ;; + st8 [r16]=3Dr29,16 /* save b0 */ + st8 [r17]=3Dr18,16 /* save ar.rsc value for "loadrs" */ + ;; +.mem.offset 0,0; st8.spill [r16]=3Dr20,16 /* save original r1 */ +.mem.offset 8,0; st8.spill [r17]=3Dr12,16 + adds r12=3D-16,r1 + /* switch to kernel memory stack (with 16 bytes of scratch) */ + ;; +.mem.offset 0,0; st8.spill [r16]=3Dr13,16 +.mem.offset 8,0; st8.spill [r17]=3Dr8,16 /* save ar.fpsr */ + ;; +.mem.offset 0,0; st8.spill [r16]=3Dr15,16 +.mem.offset 8,0; st8.spill [r17]=3Dr14,16 + dep r14=3D-1,r0,60,4 + ;; +.mem.offset 0,0; st8.spill [r16]=3Dr2,16 +.mem.offset 8,0; st8.spill [r17]=3Dr3,16 + adds r2=3DVMM_PT_REGS_R16_OFFSET,r1 + adds r14 =3D VMM_VCPU_GP_OFFSET,r13 + ;; + mov r8=3Dar.ccv + ld8 r14 =3D [r14] + ;; + mov r1=3Dr14 /* establish kernel global pointer */ + ;; \ + bsw.1 + ;; + alloc r14=3Dar.pfs,0,0,1,0 // must be first in an insn group + mov out0=3Dr13 + ;; + ssm psr.ic + ;; + srlz.i + ;; + //(p15) ssm psr.i + adds r3=3D8,r2 // set up second base pointer for SAVE_REST + srlz.i // ensure everybody knows psr.ic is back on + ;; +.mem.offset 0,0; st8.spill [r2]=3Dr16,16 +.mem.offset 8,0; st8.spill [r3]=3Dr17,16 + ;; +.mem.offset 0,0; st8.spill [r2]=3Dr18,16 +.mem.offset 8,0; st8.spill [r3]=3Dr19,16 + ;; +.mem.offset 0,0; st8.spill [r2]=3Dr20,16 +.mem.offset 8,0; st8.spill [r3]=3Dr21,16 + mov r18=3Db6 + ;; +.mem.offset 0,0; st8.spill [r2]=3Dr22,16 +.mem.offset 8,0; st8.spill [r3]=3Dr23,16 + mov r19=3Db7 + ;; +.mem.offset 0,0; st8.spill [r2]=3Dr24,16 +.mem.offset 8,0; st8.spill [r3]=3Dr25,16 + ;; +.mem.offset 0,0; st8.spill [r2]=3Dr26,16 +.mem.offset 8,0; st8.spill [r3]=3Dr27,16 + ;; +.mem.offset 0,0; st8.spill [r2]=3Dr28,16 +.mem.offset 8,0; st8.spill [r3]=3Dr29,16 + ;; +.mem.offset 0,0; st8.spill [r2]=3Dr30,16 +.mem.offset 8,0; st8.spill [r3]=3Dr31,32 + ;; + mov ar.fpsr=3Dr11 /* M-unit */ + st8 [r2]=3Dr8,8 /* ar.ccv */ + adds r24=3DPT(B6)-PT(F7),r3 + ;; + stf.spill [r2]=3Df6,32 + stf.spill [r3]=3Df7,32 + ;; + stf.spill [r2]=3Df8,32 + stf.spill [r3]=3Df9,32 + ;; + stf.spill [r2]=3Df10 + stf.spill [r3]=3Df11 + adds r25=3DPT(B7)-PT(F11),r3 + ;; + st8 [r24]=3Dr18,16 /* b6 */ + st8 [r25]=3Dr19,16 /* b7 */ + ;; + st8 [r24]=3Dr9 /* ar.csd */ + st8 [r25]=3Dr10 /* ar.ssd */ + ;; + srlz.d // make sure we see the effect of cr.ivr + addl r14=3D@gprel(ia64_leave_nested),gp + ;; + mov rp=3Dr14 + br.call.sptk.many b6=3Dkvm_ia64_handle_irq + ;; +END(kvm_interrupt) + + .global kvm_dispatch_vexirq + .org kvm_ia64_ivt+0x3400 +////////////////////////////////////////////////////////////////////// +// 0x3400 Entry 13 (size 64 bundles) Reserved +ENTRY(kvm_virtual_exirq) + mov r31=3Dpr + mov r19=3D13 + mov r30 =3Dr0 + ;; +kvm_dispatch_vexirq: + cmp.eq p6,p0 =3D 1,r30 + ;; +(p6)add r29 =3D VMM_VCPU_SAVED_GP_OFFSET,r21 + ;; +(p6)ld8 r1 =3D [r29] + ;; + KVM_SAVE_MIN_WITH_COVER_R19 + alloc r14=3Dar.pfs,0,0,1,0 + mov out0=3Dr13 + + ssm psr.ic + ;; + srlz.i // guarantee that interruption collection is on + ;; + //(p15) ssm psr.i // restore psr.i + adds r3=3D8,r2 // set up second base pointer + ;; + KVM_SAVE_REST + addl r14=3D@gprel(ia64_leave_hypervisor),gp + ;; + mov rp=3Dr14 + br.call.sptk.many b6=3Dkvm_vexirq +END(kvm_virtual_exirq) + + .org kvm_ia64_ivt+0x3800 +///////////////////////////////////////////////////////////////////// +// 0x3800 Entry 14 (size 64 bundles) Reserved + KVM_FAULT(14) + // this code segment is from 2.6.16.13 + + + .org kvm_ia64_ivt+0x3c00 +/////////////////////////////////////////////////////////////////////// +// 0x3c00 Entry 15 (size 64 bundles) Reserved + KVM_FAULT(15) + + + .org kvm_ia64_ivt+0x4000 +/////////////////////////////////////////////////////////////////////// +// 0x4000 Entry 16 (size 64 bundles) Reserved + KVM_FAULT(16) + + .org kvm_ia64_ivt+0x4400 +////////////////////////////////////////////////////////////////////// +// 0x4400 Entry 17 (size 64 bundles) Reserved + KVM_FAULT(17) + + .org kvm_ia64_ivt+0x4800 +////////////////////////////////////////////////////////////////////// +// 0x4800 Entry 18 (size 64 bundles) Reserved + KVM_FAULT(18) + + .org kvm_ia64_ivt+0x4c00 +////////////////////////////////////////////////////////////////////// +// 0x4c00 Entry 19 (size 64 bundles) Reserved + KVM_FAULT(19) + + .org kvm_ia64_ivt+0x5000 +////////////////////////////////////////////////////////////////////// +// 0x5000 Entry 20 (size 16 bundles) Page Not Present +ENTRY(kvm_page_not_present) + KVM_REFLECT(20) +END(kvm_page_not_present) + + .org kvm_ia64_ivt+0x5100 +/////////////////////////////////////////////////////////////////////// +// 0x5100 Entry 21 (size 16 bundles) Key Permission vector +ENTRY(kvm_key_permission) + KVM_REFLECT(21) +END(kvm_key_permission) + + .org kvm_ia64_ivt+0x5200 +////////////////////////////////////////////////////////////////////// +// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) +ENTRY(kvm_iaccess_rights) + KVM_REFLECT(22) +END(kvm_iaccess_rights) + + .org kvm_ia64_ivt+0x5300 +////////////////////////////////////////////////////////////////////// +// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) +ENTRY(kvm_daccess_rights) + KVM_REFLECT(23) +END(kvm_daccess_rights) + + .org kvm_ia64_ivt+0x5400 +///////////////////////////////////////////////////////////////////// +// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) +ENTRY(kvm_general_exception) + KVM_REFLECT(24) + KVM_FAULT(24) +END(kvm_general_exception) + + .org kvm_ia64_ivt+0x5500 +////////////////////////////////////////////////////////////////////// +// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) +ENTRY(kvm_disabled_fp_reg) + KVM_REFLECT(25) +END(kvm_disabled_fp_reg) + + .org kvm_ia64_ivt+0x5600 +//////////////////////////////////////////////////////////////////// +// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) +ENTRY(kvm_nat_consumption) + KVM_REFLECT(26) +END(kvm_nat_consumption) + + .org kvm_ia64_ivt+0x5700 +///////////////////////////////////////////////////////////////////// +// 0x5700 Entry 27 (size 16 bundles) Speculation (40) +ENTRY(kvm_speculation_vector) + KVM_REFLECT(27) +END(kvm_speculation_vector) + + .org kvm_ia64_ivt+0x5800 +///////////////////////////////////////////////////////////////////// +// 0x5800 Entry 28 (size 16 bundles) Reserved + KVM_FAULT(28) + + .org kvm_ia64_ivt+0x5900 +/////////////////////////////////////////////////////////////////// +// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) +ENTRY(kvm_debug_vector) + KVM_FAULT(29) +END(kvm_debug_vector) + + .org kvm_ia64_ivt+0x5a00 +/////////////////////////////////////////////////////////////// +// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) +ENTRY(kvm_unaligned_access) + KVM_REFLECT(30) +END(kvm_unaligned_access) + + .org kvm_ia64_ivt+0x5b00 +////////////////////////////////////////////////////////////////////// +// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) +ENTRY(kvm_unsupported_data_reference) + KVM_REFLECT(31) +END(kvm_unsupported_data_reference) + + .org kvm_ia64_ivt+0x5c00 +//////////////////////////////////////////////////////////////////// +// 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65) +ENTRY(kvm_floating_point_fault) + KVM_REFLECT(32) +END(kvm_floating_point_fault) + + .org kvm_ia64_ivt+0x5d00 +///////////////////////////////////////////////////////////////////// +// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) +ENTRY(kvm_floating_point_trap) + KVM_REFLECT(33) +END(kvm_floating_point_trap) + + .org kvm_ia64_ivt+0x5e00 +////////////////////////////////////////////////////////////////////// +// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) +ENTRY(kvm_lower_privilege_trap) + KVM_REFLECT(34) +END(kvm_lower_privilege_trap) + + .org kvm_ia64_ivt+0x5f00 +////////////////////////////////////////////////////////////////////// +// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) +ENTRY(kvm_taken_branch_trap) + KVM_REFLECT(35) +END(kvm_taken_branch_trap) + + .org kvm_ia64_ivt+0x6000 +//////////////////////////////////////////////////////////////////// +// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) +ENTRY(kvm_single_step_trap) + KVM_REFLECT(36) +END(kvm_single_step_trap) + .global kvm_virtualization_fault_back + .org kvm_ia64_ivt+0x6100 +///////////////////////////////////////////////////////////////////// +// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault +ENTRY(kvm_virtualization_fault) + mov r31=3Dpr + adds r16 =3D VMM_VCPU_SAVED_GP_OFFSET,r21 + ;; + st8 [r16] =3D r1 + adds r17 =3D VMM_VCPU_GP_OFFSET, r21 + ;; + ld8 r1 =3D [r17] + cmp.eq p6,p0=3DEVENT_MOV_FROM_AR,r24 + cmp.eq p7,p0=3DEVENT_MOV_FROM_RR,r24 + cmp.eq p8,p0=3DEVENT_MOV_TO_RR,r24 + cmp.eq p9,p0=3DEVENT_RSM,r24 + cmp.eq p10,p0=3DEVENT_SSM,r24 + cmp.eq p11,p0=3DEVENT_MOV_TO_PSR,r24 + cmp.eq p12,p0=3DEVENT_THASH,r24 + (p6) br.dptk.many kvm_asm_mov_from_ar + (p7) br.dptk.many kvm_asm_mov_from_rr + (p8) br.dptk.many kvm_asm_mov_to_rr + (p9) br.dptk.many kvm_asm_rsm + (p10) br.dptk.many kvm_asm_ssm + (p11) br.dptk.many kvm_asm_mov_to_psr + (p12) br.dptk.many kvm_asm_thash + ;; +kvm_virtualization_fault_back: + adds r16 =3D VMM_VCPU_SAVED_GP_OFFSET,r21 + ;; + ld8 r1 =3D [r16] + ;; + mov r19=3D37 + adds r16 =3D VMM_VCPU_CAUSE_OFFSET,r21 + adds r17 =3D VMM_VCPU_OPCODE_OFFSET,r21 + ;; + st8 [r16] =3D r24 + st8 [r17] =3D r25 + ;; + cmp.ne p6,p0=3DEVENT_RFI, r24 + (p6) br.sptk kvm_dispatch_virtualization_fault + ;; + adds r18=3DVMM_VPD_BASE_OFFSET,r21 + ;; + ld8 r18=3D[r18] + ;; + adds r18=3DVMM_VPD_VIFS_OFFSET,r18 + ;; + ld8 r18=3D[r18] + ;; + tbit.z p6,p0=3Dr18,63 + (p6) br.sptk kvm_dispatch_virtualization_fault + ;; + //if vifs.v=3D1 desert current register frame + alloc r18=3Dar.pfs,0,0,0,0 + br.sptk kvm_dispatch_virtualization_fault +END(kvm_virtualization_fault) + + .org kvm_ia64_ivt+0x6200 +////////////////////////////////////////////////////////////// +// 0x6200 Entry 38 (size 16 bundles) Reserved + KVM_FAULT(38) + + .org kvm_ia64_ivt+0x6300 +///////////////////////////////////////////////////////////////// +// 0x6300 Entry 39 (size 16 bundles) Reserved + KVM_FAULT(39) + + .org kvm_ia64_ivt+0x6400 +///////////////////////////////////////////////////////////////// +// 0x6400 Entry 40 (size 16 bundles) Reserved + KVM_FAULT(40) + + .org kvm_ia64_ivt+0x6500 +////////////////////////////////////////////////////////////////// +// 0x6500 Entry 41 (size 16 bundles) Reserved + KVM_FAULT(41) + + .org kvm_ia64_ivt+0x6600 +////////////////////////////////////////////////////////////////// +// 0x6600 Entry 42 (size 16 bundles) Reserved + KVM_FAULT(42) + + .org kvm_ia64_ivt+0x6700 +////////////////////////////////////////////////////////////////// +// 0x6700 Entry 43 (size 16 bundles) Reserved + KVM_FAULT(43) + + .org kvm_ia64_ivt+0x6800 +////////////////////////////////////////////////////////////////// +// 0x6800 Entry 44 (size 16 bundles) Reserved + KVM_FAULT(44) + + .org kvm_ia64_ivt+0x6900 +/////////////////////////////////////////////////////////////////// +// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception +//(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) +ENTRY(kvm_ia32_exception) + KVM_FAULT(45) +END(kvm_ia32_exception) + + .org kvm_ia64_ivt+0x6a00 +//////////////////////////////////////////////////////////////////// +// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) +ENTRY(kvm_ia32_intercept) + KVM_FAULT(47) +END(kvm_ia32_intercept) + + .org kvm_ia64_ivt+0x6c00 +///////////////////////////////////////////////////////////////////// +// 0x6c00 Entry 48 (size 16 bundles) Reserved + KVM_FAULT(48) + + .org kvm_ia64_ivt+0x6d00 +////////////////////////////////////////////////////////////////////// +// 0x6d00 Entry 49 (size 16 bundles) Reserved + KVM_FAULT(49) + + .org kvm_ia64_ivt+0x6e00 +////////////////////////////////////////////////////////////////////// +// 0x6e00 Entry 50 (size 16 bundles) Reserved + KVM_FAULT(50) + + .org kvm_ia64_ivt+0x6f00 +///////////////////////////////////////////////////////////////////// +// 0x6f00 Entry 51 (size 16 bundles) Reserved + KVM_FAULT(52) + + .org kvm_ia64_ivt+0x7100 +//////////////////////////////////////////////////////////////////// +// 0x7100 Entry 53 (size 16 bundles) Reserved + KVM_FAULT(53) + + .org kvm_ia64_ivt+0x7200 +///////////////////////////////////////////////////////////////////// +// 0x7200 Entry 54 (size 16 bundles) Reserved + KVM_FAULT(54) + + .org kvm_ia64_ivt+0x7300 +//////////////////////////////////////////////////////////////////// +// 0x7300 Entry 55 (size 16 bundles) Reserved + KVM_FAULT(55) + + .org kvm_ia64_ivt+0x7400 +//////////////////////////////////////////////////////////////////// +// 0x7400 Entry 56 (size 16 bundles) Reserved + KVM_FAULT(56) + + .org kvm_ia64_ivt+0x7500 +///////////////////////////////////////////////////////////////////// +// 0x7500 Entry 57 (size 16 bundles) Reserved + KVM_FAULT(57) + + .org kvm_ia64_ivt+0x7600 +///////////////////////////////////////////////////////////////////// +// 0x7600 Entry 58 (size 16 bundles) Reserved + KVM_FAULT(58) + + .org kvm_ia64_ivt+0x7700 +//////////////////////////////////////////////////////////////////// +// 0x7700 Entry 59 (size 16 bundles) Reserved + KVM_FAULT(59) + + .org kvm_ia64_ivt+0x7800 +//////////////////////////////////////////////////////////////////// +// 0x7800 Entry 60 (size 16 bundles) Reserved + KVM_FAULT(60) + + .org kvm_ia64_ivt+0x7900 +///////////////////////////////////////////////////////////////////// +// 0x7900 Entry 61 (size 16 bundles) Reserved + KVM_FAULT(61) + + .org kvm_ia64_ivt+0x7a00 +///////////////////////////////////////////////////////////////////// +// 0x7a00 Entry 62 (size 16 bundles) Reserved + KVM_FAULT(62) + + .org kvm_ia64_ivt+0x7b00 +///////////////////////////////////////////////////////////////////// +// 0x7b00 Entry 63 (size 16 bundles) Reserved + KVM_FAULT(63) + + .org kvm_ia64_ivt+0x7c00 +//////////////////////////////////////////////////////////////////// +// 0x7c00 Entry 64 (size 16 bundles) Reserved + KVM_FAULT(64) + + .org kvm_ia64_ivt+0x7d00 +///////////////////////////////////////////////////////////////////// +// 0x7d00 Entry 65 (size 16 bundles) Reserved + KVM_FAULT(65) + + .org kvm_ia64_ivt+0x7e00 +///////////////////////////////////////////////////////////////////// +// 0x7e00 Entry 66 (size 16 bundles) Reserved + KVM_FAULT(66) + + .org kvm_ia64_ivt+0x7f00 +//////////////////////////////////////////////////////////////////// +// 0x7f00 Entry 67 (size 16 bundles) Reserved + KVM_FAULT(67) + + .org kvm_ia64_ivt+0x8000 +// There is no particular reason for this code to be here, other than that +// there happens to be space here that would go unused otherwise. If this +// fault ever gets "unreserved", simply moved the following code to a more +// suitable spot... + + +ENTRY(kvm_dtlb_miss_dispatch) + mov r19 =3D 2 + KVM_SAVE_MIN_WITH_COVER_R19 + alloc r14=3Dar.pfs,0,0,3,0 + mov out0=3Dcr.ifa + mov out1=3Dr15 + adds r3=3D8,r2 // set up second base pointer + ;; + ssm psr.ic + ;; + srlz.i // guarantee that interruption collection is on + ;; + //(p15) ssm psr.i // restore psr.i + addl r14=3D@gprel(ia64_leave_hypervisor_prepare),gp + ;; + KVM_SAVE_REST + KVM_SAVE_EXTRA + mov rp=3Dr14 + ;; + adds out2=3D16,r12 + br.call.sptk.many b6=3Dkvm_page_fault +END(kvm_dtlb_miss_dispatch) + +ENTRY(kvm_itlb_miss_dispatch) + + KVM_SAVE_MIN_WITH_COVER_R19 + alloc r14=3Dar.pfs,0,0,3,0 + mov out0=3Dcr.ifa + mov out1=3Dr15 + adds r3=3D8,r2 // set up second base pointer + ;; + ssm psr.ic + ;; + srlz.i // guarantee that interruption collection is on + ;; + //(p15) ssm psr.i // restore psr.i + addl r14=3D@gprel(ia64_leave_hypervisor),gp + ;; + KVM_SAVE_REST + mov rp=3Dr14 + ;; + adds out2=3D16,r12 + br.call.sptk.many b6=3Dkvm_page_fault +END(kvm_itlb_miss_dispatch) + +ENTRY(kvm_dispatch_reflection) + /* + * Input: + * psr.ic: off + * r19: intr type (offset into ivt, see ia64_int.h) + * r31: contains saved predicates (pr) + */ + KVM_SAVE_MIN_WITH_COVER_R19 + alloc r14=3Dar.pfs,0,0,5,0 + mov out0=3Dcr.ifa + mov out1=3Dcr.isr + mov out2=3Dcr.iim + mov out3=3Dr15 + adds r3=3D8,r2 // set up second base pointer + ;; + ssm psr.ic + ;; + srlz.i // guarantee that interruption collection is on + ;; + //(p15) ssm psr.i // restore psr.i + addl r14=3D@gprel(ia64_leave_hypervisor),gp + ;; + KVM_SAVE_REST + mov rp=3Dr14 + ;; + adds out4=3D16,r12 + br.call.sptk.many b6=3Dreflect_interruption +END(kvm_dispatch_reflection) + +ENTRY(kvm_dispatch_virtualization_fault) + adds r16 =3D VMM_VCPU_CAUSE_OFFSET,r21 + adds r17 =3D VMM_VCPU_OPCODE_OFFSET,r21 + ;; + st8 [r16] =3D r24 + st8 [r17] =3D r25 + ;; + KVM_SAVE_MIN_WITH_COVER_R19 + ;; + alloc r14=3Dar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) + mov out0=3Dr13 //vcpu + adds r3=3D8,r2 // set up second base pointer + ;; + ssm psr.ic + ;; + srlz.i // guarantee that interruption collection is on + ;; + //(p15) ssm psr.i // restore psr.i + addl r14=3D@gprel(ia64_leave_hypervisor_prepare),gp + ;; + KVM_SAVE_REST + KVM_SAVE_EXTRA + mov rp=3Dr14 + ;; + adds out1=3D16,sp //regs + br.call.sptk.many b6=3Dkvm_emulate +END(kvm_dispatch_virtualization_fault) + + +ENTRY(kvm_dispatch_interrupt) + KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3 + ;; + alloc r14=3Dar.pfs,0,0,1,0 // must be first in an insn group + //mov out0=3Dcr.ivr // pass cr.ivr as first arg + adds r3=3D8,r2 // set up second base pointer for SAVE_REST + ;; + ssm psr.ic + ;; + srlz.i + ;; + //(p15) ssm psr.i + addl r14=3D@gprel(ia64_leave_hypervisor),gp + ;; + KVM_SAVE_REST + mov rp=3Dr14 + ;; + mov out0=3Dr13 // pass pointer to pt_regs as second arg + br.call.sptk.many b6=3Dkvm_ia64_handle_irq +END(kvm_dispatch_interrupt) + + + + +GLOBAL_ENTRY(ia64_leave_nested) + rsm psr.i + ;; + adds r21=3DPT(PR)+16,r12 + ;; + lfetch [r21],PT(CR_IPSR)-PT(PR) + adds r2=3DPT(B6)+16,r12 + adds r3=3DPT(R16)+16,r12 + ;; + lfetch [r21] + ld8 r28=3D[r2],8 // load b6 + adds r29=3DPT(R24)+16,r12 + + ld8.fill r16=3D[r3] + adds r3=3DPT(AR_CSD)-PT(R16),r3 + adds r30=3DPT(AR_CCV)+16,r12 + ;; + ld8.fill r24=3D[r29] + ld8 r15=3D[r30] // load ar.ccv + ;; + ld8 r29=3D[r2],16 // load b7 + ld8 r30=3D[r3],16 // load ar.csd + ;; + ld8 r31=3D[r2],16 // load ar.ssd + ld8.fill r8=3D[r3],16 + ;; + ld8.fill r9=3D[r2],16 + ld8.fill r10=3D[r3],PT(R17)-PT(R10) + ;; + ld8.fill r11=3D[r2],PT(R18)-PT(R11) + ld8.fill r17=3D[r3],16 + ;; + ld8.fill r18=3D[r2],16 + ld8.fill r19=3D[r3],16 + ;; + ld8.fill r20=3D[r2],16 + ld8.fill r21=3D[r3],16 + mov ar.csd=3Dr30 + mov ar.ssd=3Dr31 + ;; + rsm psr.i | psr.ic + // initiate turning off of interrupt and interruption collection + invala // invalidate ALAT + ;; + ld8.fill r22=3D[r2],24 + ld8.fill r23=3D[r3],24 + mov b6=3Dr28 + ;; + ld8.fill r25=3D[r2],16 + ld8.fill r26=3D[r3],16 + mov b7=3Dr29 + ;; + ld8.fill r27=3D[r2],16 + ld8.fill r28=3D[r3],16 + ;; + ld8.fill r29=3D[r2],16 + ld8.fill r30=3D[r3],24 + ;; + ld8.fill r31=3D[r2],PT(F9)-PT(R31) + adds r3=3DPT(F10)-PT(F6),r3 + ;; + ldf.fill f9=3D[r2],PT(F6)-PT(F9) + ldf.fill f10=3D[r3],PT(F8)-PT(F10) + ;; + ldf.fill f6=3D[r2],PT(F7)-PT(F6) + ;; + ldf.fill f7=3D[r2],PT(F11)-PT(F7) + ldf.fill f8=3D[r3],32 + ;; + srlz.i // ensure interruption collection is off + mov ar.ccv=3Dr15 + ;; + bsw.0 // switch back to bank 0 (no stop bit required beforehand...) + ;; + ldf.fill f11=3D[r2] +// mov r18=3Dr13 +// mov r21=3Dr13 + adds r16=3DPT(CR_IPSR)+16,r12 + adds r17=3DPT(CR_IIP)+16,r12 + ;; + ld8 r29=3D[r16],16 // load cr.ipsr + ld8 r28=3D[r17],16 // load cr.iip + ;; + ld8 r30=3D[r16],16 // load cr.ifs + ld8 r25=3D[r17],16 // load ar.unat + ;; + ld8 r26=3D[r16],16 // load ar.pfs + ld8 r27=3D[r17],16 // load ar.rsc + cmp.eq p9,p0=3Dr0,r0 + // set p9 to indicate that we should restore cr.ifs + ;; + ld8 r24=3D[r16],16 // load ar.rnat (may be garbage) + ld8 r23=3D[r17],16// load ar.bspstore (may be garbage) + ;; + ld8 r31=3D[r16],16 // load predicates + ld8 r22=3D[r17],16 // load b0 + ;; + ld8 r19=3D[r16],16 // load ar.rsc value for "loadrs" + ld8.fill r1=3D[r17],16 // load r1 + ;; + ld8.fill r12=3D[r16],16 + ld8.fill r13=3D[r17],16 + ;; + ld8 r20=3D[r16],16 // ar.fpsr + ld8.fill r15=3D[r17],16 + ;; + ld8.fill r14=3D[r16],16 + ld8.fill r2=3D[r17] + ;; + ld8.fill r3=3D[r16] + ;; + mov r16=3Dar.bsp // get existing backing store pointer + ;; + mov b0=3Dr22 + mov ar.pfs=3Dr26 + mov cr.ifs=3Dr30 + mov cr.ipsr=3Dr29 + mov ar.fpsr=3Dr20 + mov cr.iip=3Dr28 + ;; + mov ar.rsc=3Dr27 + mov ar.unat=3Dr25 + mov pr=3Dr31,-1 + rfi +END(ia64_leave_nested) + + + +GLOBAL_ENTRY(ia64_leave_hypervisor_prepare) + /* + * work.need_resched etc. mustn't get changed + *by this CPU before it returns to + ;; + * user- or fsys-mode, hence we disable interrupts early on: + */ + adds r2 =3D PT(R4)+16,r12 + adds r3 =3D PT(R5)+16,r12 + adds r8 =3D PT(EML_UNAT)+16,r12 + ;; + ld8 r8 =3D [r8] + ;; + mov ar.unat=3Dr8 + ;; + ld8.fill r4=3D[r2],16 //load r4 + ld8.fill r5=3D[r3],16 //load r5 + ;; + ld8.fill r6=3D[r2] //load r6 + ld8.fill r7=3D[r3] //load r7 + ;; +END(ia64_leave_hypervisor_prepare) +//fall through +GLOBAL_ENTRY(ia64_leave_hypervisor) + rsm psr.i + ;; + br.call.sptk.many b0=3Dleave_hypervisor_tail + ;; + adds r20=3DPT(PR)+16,r12 + adds r8=3DPT(EML_UNAT)+16,r12 + ;; + ld8 r8=3D[r8] + ;; + mov ar.unat=3Dr8 + ;; + lfetch [r20],PT(CR_IPSR)-PT(PR) + adds r2 =3D PT(B6)+16,r12 + adds r3 =3D PT(B7)+16,r12 + ;; + lfetch [r20] + ;; + ld8 r24=3D[r2],16 /* B6 */ + ld8 r25=3D[r3],16 /* B7 */ + ;; + ld8 r26=3D[r2],16 /* ar_csd */ + ld8 r27=3D[r3],16 /* ar_ssd */ + mov b6 =3D r24 + ;; + ld8.fill r8=3D[r2],16 + ld8.fill r9=3D[r3],16 + mov b7 =3D r25 + ;; + mov ar.csd =3D r26 + mov ar.ssd =3D r27 + ;; + ld8.fill r10=3D[r2],PT(R15)-PT(R10) + ld8.fill r11=3D[r3],PT(R14)-PT(R11) + ;; + ld8.fill r15=3D[r2],PT(R16)-PT(R15) + ld8.fill r14=3D[r3],PT(R17)-PT(R14) + ;; + ld8.fill r16=3D[r2],16 + ld8.fill r17=3D[r3],16 + ;; + ld8.fill r18=3D[r2],16 + ld8.fill r19=3D[r3],16 + ;; + ld8.fill r20=3D[r2],16 + ld8.fill r21=3D[r3],16 + ;; + ld8.fill r22=3D[r2],16 + ld8.fill r23=3D[r3],16 + ;; + ld8.fill r24=3D[r2],16 + ld8.fill r25=3D[r3],16 + ;; + ld8.fill r26=3D[r2],16 + ld8.fill r27=3D[r3],16 + ;; + ld8.fill r28=3D[r2],16 + ld8.fill r29=3D[r3],16 + ;; + ld8.fill r30=3D[r2],PT(F6)-PT(R30) + ld8.fill r31=3D[r3],PT(F7)-PT(R31) + ;; + rsm psr.i | psr.ic + // initiate turning off of interrupt and interruption collection + invala // invalidate ALAT + ;; + srlz.i // ensure interruption collection is off + ;; + bsw.0 + ;; + adds r16 =3D PT(CR_IPSR)+16,r12 + adds r17 =3D PT(CR_IIP)+16,r12 + mov r21=3Dr13 // get current + ;; + ld8 r31=3D[r16],16 // load cr.ipsr + ld8 r30=3D[r17],16 // load cr.iip + ;; + ld8 r29=3D[r16],16 // load cr.ifs + ld8 r28=3D[r17],16 // load ar.unat + ;; + ld8 r27=3D[r16],16 // load ar.pfs + ld8 r26=3D[r17],16 // load ar.rsc + ;; + ld8 r25=3D[r16],16 // load ar.rnat + ld8 r24=3D[r17],16 // load ar.bspstore + ;; + ld8 r23=3D[r16],16 // load predicates + ld8 r22=3D[r17],16 // load b0 + ;; + ld8 r20=3D[r16],16 // load ar.rsc value for "loadrs" + ld8.fill r1=3D[r17],16 //load r1 + ;; + ld8.fill r12=3D[r16],16 //load r12 + ld8.fill r13=3D[r17],PT(R2)-PT(R13) //load r13 + ;; + ld8 r19=3D[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr + ld8.fill r2=3D[r17],PT(AR_CCV)-PT(R2) //load r2 + ;; + ld8.fill r3=3D[r16] //load r3 + ld8 r18=3D[r17] //load ar_ccv + ;; + mov ar.fpsr=3Dr19 + mov ar.ccv=3Dr18 + shr.u r18=3Dr20,16 + ;; +kvm_rbs_switch: + mov r19=3D96 + +kvm_dont_preserve_current_frame: +/* + * To prevent leaking bits between the hypervisor and guest domain, + * we must clear the stacked registers in the "invalid" partition here. + * 5 registers/cycle on McKinley). + */ +# define pRecurse p6 +# define pReturn p7 +# define Nregs 14 + + alloc loc0=3Dar.pfs,2,Nregs-2,2,0 + shr.u loc1=3Dr18,9 // RNaTslots <=3D floor(dirtySize / (64*8)) + sub r19=3Dr19,r18 // r19 =3D (physStackedSize + 8) - dirtySize + ;; + mov ar.rsc=3Dr20 // load ar.rsc to be used for "loadrs" + shladd in0=3Dloc1,3,r19 + mov in1=3D0 + ;; + TEXT_ALIGN(32) +kvm_rse_clear_invalid: + alloc loc0=3Dar.pfs,2,Nregs-2,2,0 + cmp.lt pRecurse,p0=3DNregs*8,in0 + // if more than Nregs regs left to clear, (re)curse + add out0=3D-Nregs*8,in0 + add out1=3D1,in1 // increment recursion count + mov loc1=3D0 + mov loc2=3D0 + ;; + mov loc3=3D0 + mov loc4=3D0 + mov loc5=3D0 + mov loc6=3D0 + mov loc7=3D0 +(pRecurse) br.call.dptk.few b0=3Dkvm_rse_clear_invalid + ;; + mov loc8=3D0 + mov loc9=3D0 + cmp.ne pReturn,p0=3Dr0,in1 + // if recursion count !=3D 0, we need to do a br.ret + mov loc10=3D0 + mov loc11=3D0 +(pReturn) br.ret.dptk.many b0 + +# undef pRecurse +# undef pReturn + +// loadrs has already been shifted + alloc r16=3Dar.pfs,0,0,0,0 // drop current register frame + ;; + loadrs + ;; + mov ar.bspstore=3Dr24 + ;; + mov ar.unat=3Dr28 + mov ar.rnat=3Dr25 + mov ar.rsc=3Dr26 + ;; + mov cr.ipsr=3Dr31 + mov cr.iip=3Dr30 + mov cr.ifs=3Dr29 + mov ar.pfs=3Dr27 + adds r18=3DVMM_VPD_BASE_OFFSET,r21 + ;; + ld8 r18=3D[r18] //vpd + adds r17=3DVMM_VCPU_ISR_OFFSET,r21 + ;; + ld8 r17=3D[r17] + adds r19=3DVMM_VPD_VPSR_OFFSET,r18 + ;; + ld8 r19=3D[r19] //vpsr + adds r20=3DVMM_VCPU_VSA_BASE_OFFSET,r21 + ;; + ld8 r20=3D[r20] + ;; +//vsa_sync_write_start + mov r25=3Dr18 + adds r16=3D VMM_VCPU_GP_OFFSET,r21 + ;; + ld8 r16=3D [r16] // Put gp in r24 + movl r24=3D@gprel(ia64_vmm_entry) // calculate return address + ;; + add r24=3Dr24,r16 + ;; + add r16=3DPAL_VPS_SYNC_WRITE,r20 + ;; + mov b0=3Dr16 + br.cond.sptk b0 // call the service + ;; +END(ia64_leave_hypervisor) +// fall through +GLOBAL_ENTRY(ia64_vmm_entry) +/* + * must be at bank 0 + * parameter: + * r17:cr.isr + * r18:vpd + * r19:vpsr + * r20:__vsa_base + * r22:b0 + * r23:predicate + */ + mov r24=3Dr22 + mov r25=3Dr18 + tbit.nz p1,p2 =3D r19,IA64_PSR_IC_BIT // p1=3Dvpsr.ic + ;; + (p1) add r29=3DPAL_VPS_RESUME_NORMAL,r20 + (p1) br.sptk.many ia64_vmm_entry_out + ;; + tbit.nz p1,p2 =3D r17,IA64_ISR_IR_BIT //p1=3Dcr.isr.ir + ;; + (p1) add r29=3DPAL_VPS_RESUME_NORMAL,r20 + (p2) add r29=3DPAL_VPS_RESUME_HANDLER,r20 + (p2) ld8 r26=3D[r25] + ;; +ia64_vmm_entry_out: + mov pr=3Dr23,-2 + mov b0=3Dr29 + ;; + br.cond.sptk b0 // call pal service +END(ia64_vmm_entry) + + + +/* + * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, + * u64 arg3, u64 arg4, u64 arg5, + * u64 arg6, u64 arg7); + * + * XXX: The currently defined services use only 4 args at the max. The + * rest are not consumed. + */ +GLOBAL_ENTRY(ia64_call_vsa) + .regstk 4,4,0,0 + +rpsave =3D loc0 +pfssave =3D loc1 +psrsave =3D loc2 +entry =3D loc3 +hostret =3D r24 + + alloc pfssave=3Dar.pfs,4,4,0,0 + mov rpsave=3Drp + adds entry=3DVMM_VCPU_VSA_BASE_OFFSET, r13 + ;; + ld8 entry=3D[entry] +1: mov hostret=3Dip + mov r25=3Din1 // copy arguments + mov r26=3Din2 + mov r27=3Din3 + mov psrsave=3Dpsr + ;; + tbit.nz p6,p0=3Dpsrsave,14 // IA64_PSR_I + tbit.nz p7,p0=3Dpsrsave,13 // IA64_PSR_IC + ;; + add hostret=3D2f-1b,hostret // calculate return address + add entry=3Dentry,in0 + ;; + rsm psr.i | psr.ic + ;; + srlz.d + mov b6=3Dentry + br.cond.sptk b6 // call the service +2: + // Architectural sequence for enabling interrupts if necessary +(p7) ssm psr.ic + ;; +(p7) srlz.d + ;; +//(p6) ssm psr.i + ;; + mov rp=3Drpsave + mov ar.pfs=3Dpfssave + mov r8=3Dr31 + ;; + srlz.d + br.ret.sptk rp + +END(ia64_call_vsa) + +#define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100) + +GLOBAL_ENTRY(vmm_reset_entry) + //set up ipsr, iip, vpd.vpsr, dcr + // For IPSR: it/dt/rt=3D1, i/ic=3D1, si=3D1, vm/bn=3D1 + // For DCR: all bits 0 + adds r14=3D-VMM_PT_REGS_SIZE, r12 + ;; + movl r6=3D0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1 + movl r10=3D0x8000000000000000 + adds r16=3DPT(CR_IIP), r14 + adds r20=3DPT(R1), r14 + ;; + rsm psr.ic | psr.i + ;; + srlz.d + ;; + bsw.0 + ;; + mov r21 =3Dr13 + ;; + bsw.1 + ;; + mov ar.rsc =3D 0 + ;; + flushrs + ;; + mov ar.bspstore =3D 0 + // clear BSPSTORE + ;; + mov cr.ipsr=3Dr6 + mov cr.ifs=3Dr10 + ld8 r4 =3D [r16] // Set init iip for first run. + ld8 r1 =3D [r20] + ;; + mov cr.iip=3Dr4 + ;; + adds r16=3DVMM_VPD_BASE_OFFSET,r13 + adds r20=3DVMM_VCPU_VSA_BASE_OFFSET,r13 + ;; + ld8 r18=3D[r16] + ld8 r20=3D[r20] + ;; + adds r19=3DVMM_VPD_VPSR_OFFSET,r18 + ;; + ld8 r19=3D[r19] + mov r17=3Dr0 + mov r22=3Dr0 + mov r23=3Dr0 + br.cond.sptk ia64_vmm_entry + br.ret.sptk b0 +END(vmm_reset_entry) + --=20 1.5.1 |