You can subscribe to this list here.
2008 |
Jan
(41) |
Feb
(101) |
Mar
(164) |
Apr
(94) |
May
(27) |
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
---|
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:30
|
Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/xen/xen_pv_ops.c | 194 ++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 194 insertions(+), 0 deletions(-) diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c index 18aa2f6..a2a7493 100644 --- a/arch/ia64/xen/xen_pv_ops.c +++ b/arch/ia64/xen/xen_pv_ops.c @@ -58,6 +58,199 @@ xen_info_init(void) } /*************************************************************************** + * pv_init_ops + * initialization hooks. + */ + +static void +xen_panic_hypercall(struct unw_frame_info *info, void *arg) +{ + current->thread.ksp = (__u64)info->sw - 16; + HYPERVISOR_shutdown(SHUTDOWN_crash); + /* we're never actually going to get here... */ +} + +static int +xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) +{ + unw_init_running(xen_panic_hypercall, NULL); + /* we're never actually going to get here... */ + return NOTIFY_DONE; +} + +static struct notifier_block xen_panic_block = { + xen_panic_event, NULL, 0 /* try to go last */ +}; + +static void xen_pm_power_off(void) +{ + local_irq_disable(); + HYPERVISOR_shutdown(SHUTDOWN_poweroff); +} + +static void __init +xen_banner(void) +{ + printk(KERN_INFO + "Running on Xen! pl = %d start_info_pfn=0x%lx nr_pages=%ld " + "flags=0x%x\n", + xen_info.kernel_rpl, + HYPERVISOR_shared_info->arch.start_info_pfn, + xen_start_info->nr_pages, xen_start_info->flags); +} + +static int __init +xen_reserve_memory(struct rsvd_region *region) +{ + region->start = (unsigned long)__va((HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT)); + region->end = region->start + PAGE_SIZE; + return 1; +} + +static void __init +xen_arch_setup_early(void) +{ + struct shared_info *s; + BUG_ON(!is_running_on_xen()); + + s = HYPERVISOR_shared_info; + xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT); + + /* Must be done before any hypercall. */ + xencomm_initialize(); + + xen_setup_features(); + /* Register a call for panic conditions. */ + atomic_notifier_chain_register(&panic_notifier_list, + &xen_panic_block); + pm_power_off = xen_pm_power_off; + + xen_ia64_enable_opt_feature(); +} + +static void __init +xen_arch_setup_console(char **cmdline_p) +{ + /* + * If a console= is NOT specified, we assume using the + * xencons console is desired. By default, this is xvc0 + * for both dom0 and domU. + */ + if (!strstr(*cmdline_p, "console=")) { + char *p, *q, name[5] = "xvc"; + int offset = 0; + +#if defined(CONFIG_VGA_CONSOLE) + /* + * conswitchp might be set intelligently from the + * PCDP code. If set to VGA console, use it. + */ + if (is_initial_xendomain() && conswitchp == &vga_con) + strncpy(name, "tty", 3); +#endif + + p = strstr(*cmdline_p, "xencons="); + + if (p) { + p += 8; + if (!strncmp(p, "ttyS", 4)) { + strncpy(name, p, 4); + p += 4; + offset = simple_strtol(p, &q, 10); + if (p == q) + offset = 0; + } else if (!strncmp(p, "tty", 3) || + !strncmp(p, "xvc", 3)) { + strncpy(name, p, 3); + p += 3; + offset = simple_strtol(p, &q, 10); + if (p == q) + offset = 0; + } else if (!strncmp(p, "off", 3)) + offset = -1; + } + + if (offset >= 0) + add_preferred_console(name, offset, NULL); + } else if (!is_initial_xendomain()) { + /* use hvc_xen */ + add_preferred_console("hvc", 0, NULL); + } + +#if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE) + if (!is_initial_xendomain()) { + conswitchp = NULL; + } +#endif +} + +static int __init +xen_arch_setup_nomca(void) +{ + if (!is_initial_xendomain()) + return 1; + return 0; +} + +static void __init +xen_post_platform_setup(void) +{ +#ifdef CONFIG_XEN_PRIVILEGED_GUEST + if (is_running_on_xen() && !ia64_platform_is("xen")) { + extern ia64_mv_setup_t xen_setup; + xen_setup(cmdline_p); + } +#endif +} + +static void __init +xen_post_paging_init(void) +{ +#ifdef notyet /* XXX: notyet dma api paravirtualization*/ +#ifdef CONFIG_XEN + xen_contiguous_bitmap_init(max_pfn); +#endif +#endif +} + +static void __init +__xen_cpu_init(void) +{ +#ifdef CONFIG_XEN_PRIVILEGED_GUEST + if (is_running_on_xen() && !ia64_platform_is("xen")) { + extern ia64_mv_cpu_init_t xen_cpu_init; + xen_cpu_init(); + } +#endif +} + +static void __init +xen_post_smp_prepare_boot_cpu(void) +{ + xen_setup_vcpu_info_placement(); +} + +static const struct pv_init_ops xen_init_ops __initdata = { + .banner = xen_banner, + + .reserve_memory = xen_reserve_memory, + + .arch_setup_early = xen_arch_setup_early, + .arch_setup_console = xen_arch_setup_console, + .arch_setup_nomca = xen_arch_setup_nomca, + .post_platform_setup = xen_post_platform_setup, + .post_paging_init = xen_post_paging_init, + + .cpu_init = __xen_cpu_init, + + .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu, + + .bundle_patch_module = &xen_alt_bundle_patch_module, + .inst_patch_module = &xen_alt_inst_patch_module, +}; + + +/*************************************************************************** * pv_ops initialization */ @@ -66,4 +259,5 @@ xen_setup_pv_ops(void) { xen_info_init(); pv_info = xen_info; + pv_init_ops = xen_init_ops; } -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:30
|
Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/xen/inst_xen.h | 503 ++++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 503 insertions(+), 0 deletions(-) create mode 100644 arch/ia64/xen/inst_xen.h diff --git a/arch/ia64/xen/inst_xen.h b/arch/ia64/xen/inst_xen.h new file mode 100644 index 0000000..51b4f82 --- /dev/null +++ b/arch/ia64/xen/inst_xen.h @@ -0,0 +1,503 @@ +/****************************************************************************** + * inst_xen.h + * + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#define IA64_ASM_PARAVIRTUALIZED_XEN + +#define ia64_ivt xen_ivt + +#define __paravirt_switch_to xen_switch_to +#define __paravirt_leave_syscall xen_leave_syscall +#define __paravirt_work_processed_syscall xen_work_processed_syscall +#define __paravirt_leave_kernel xen_leave_kernel +#define __paravirt_pending_syscall_end xen_work_pending_syscall_end +#define __paravirt_work_processed_syscall_target \ + xen_work_processed_syscall + +#define MOV_FROM_IFA(reg) \ + movl reg = XSI_IFA; \ + ;; \ + ld8 reg = [reg] + +#define MOV_FROM_ITIR(reg) \ + movl reg = XSI_ITIR; \ + ;; \ + ld8 reg = [reg] + +#define MOV_FROM_ISR(reg) \ + movl reg = XSI_ISR; \ + ;; \ + ld8 reg = [reg] + +#define MOV_FROM_IHA(reg) \ + movl reg = XSI_IHA; \ + ;; \ + ld8 reg = [reg] + +#define MOV_FROM_IPSR(reg) \ + movl reg = XSI_IPSR; \ + ;; \ + ld8 reg = [reg] + +#define MOV_FROM_IIM(reg) \ + movl reg = XSI_IIM; \ + ;; \ + ld8 reg = [reg] + +#define MOV_FROM_IIP(reg) \ + movl reg = XSI_IIP; \ + ;; \ + ld8 reg = [reg] + +.macro __MOV_FROM_IVR reg, clob + .ifc "\reg", "r8" + XEN_HYPER_GET_IVR + .exitm + .endif + .ifc "\clob", "r8" + XEN_HYPER_GET_IVR + ;; + mov \reg = r8 + .exitm + .endif + .ifc "\reg", "\clob" + .error "it should be reg \reg != clob \clob" + .endif + + mov \clob = r8 + ;; + XEN_HYPER_GET_IVR + ;; + mov \reg = r8 + ;; + mov r8 = \clob +.endm +#define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob + +.macro __MOV_FROM_PSR pred, reg, clob + .ifc "\reg", "r8" + (\pred) XEN_HYPER_GET_PSR; + .exitm + .endif + .ifc "\clob", "r8" + (\pred) XEN_HYPER_GET_PSR + ;; + (\pred) mov \reg = r8 + .exitm + .endif + + (\pred) mov \clob = r8 + (\pred) XEN_HYPER_GET_PSR + ;; + (\pred) mov \reg = r8 + (\pred) mov r8 = \clob +.endm +#define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob + + +#define MOV_TO_IFA(reg, clob) \ + movl clob = XSI_IFA; \ + ;; \ + st8 [clob] = reg \ + +#define MOV_TO_ITIR(pred, reg, clob) \ +(pred) movl clob = XSI_ITIR; \ + ;; \ +(pred) st8 [clob] = reg + +#define MOV_TO_IHA(pred, reg, clob) \ +(pred) movl clob = XSI_IHA; \ + ;; \ +(pred) st8 [clob] = reg + +#define MOV_TO_IPSR(reg, clob) \ + movl clob = XSI_IPSR; \ + ;; \ + st8 [clob] = reg; \ + ;; + +#define MOV_TO_IFS(pred, reg, clob) \ +(pred) movl clob = XSI_IFS; \ + ;; \ +(pred) st8 [clob] = reg; \ + ;; + +#define MOV_TO_IIP(reg, clob) \ + movl clob = XSI_IIP; \ + ;; \ + st8 [clob] = reg + +.macro ____MOV_TO_KR kr, reg, clob0, clob1 + .ifc "\clob0", "r9" + .error "clob0 \clob0 must not be r9" + .endif + .ifc "\clob1", "r8" + .error "clob1 \clob1 must not be r8" + .endif + + .ifnc "\reg", "r9" + .ifnc "\clob1", "r9" + mov \clob1 = r9 + .endif + mov r9 = \reg + .endif + .ifnc "\clob0", "r8" + mov \clob0 = r8 + .endif + mov r8 = \kr + ;; + XEN_HYPER_SET_KR + + .ifnc "\reg", "r9" + .ifnc "\clob1", "r9" + mov r9 = \clob1 + .endif + .endif + .ifnc "\clob0", "r8" + mov r8 = \clob0 + .endif +.endm + +.macro __MOV_TO_KR kr, reg, clob0, clob1 + .ifc "\clob0", "r9" + ____MOV_TO_KR \kr, \reg, \clob1, \clob0 + .exitm + .endif + .ifc "\clob1", "r8" + ____MOV_TO_KR \kr, \reg, \clob1, \clob0 + .exitm + .endif + + ____MOV_TO_KR \kr, \reg, \clob0, \clob1 +.endm + +#define MOV_TO_KR(kr, reg, clob0, clob1) \ + __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1 + + +.macro __ITC_I pred, reg, clob + .ifc "\reg", "r8" + (\pred) XEN_HYPER_ITC_I + .exitm + .endif + .ifc "\clob", "r8" + (\pred) mov r8 = \reg + ;; + (\pred) XEN_HYPER_ITC_I + .exitm + .endif + + (\pred) mov \clob = r8 + (\pred) mov r8 = \reg + ;; + (\pred) XEN_HYPER_ITC_I + ;; + (\pred) mov r8 = \clob + ;; +.endm +#define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob + +.macro __ITC_D pred, reg, clob + .ifc "\reg", "r8" + (\pred) XEN_HYPER_ITC_D + ;; + .exitm + .endif + .ifc "\clob", "r8" + (\pred) mov r8 = \reg + ;; + (\pred) XEN_HYPER_ITC_D + ;; + .exitm + .endif + + (\pred) mov \clob = r8 + (\pred) mov r8 = \reg + ;; + (\pred) XEN_HYPER_ITC_D + ;; + (\pred) mov r8 = \clob + ;; +.endm +#define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob + +.macro __ITC_I_AND_D pred_i, pred_d, reg, clob + .ifc "\reg", "r8" + (\pred_i)XEN_HYPER_ITC_I + ;; + (\pred_d)XEN_HYPER_ITC_D + ;; + .exitm + .endif + .ifc "\clob", "r8" + mov r8 = \reg + ;; + (\pred_i)XEN_HYPER_ITC_I + ;; + (\pred_d)XEN_HYPER_ITC_D + ;; + .exitm + .endif + + mov \clob = r8 + mov r8 = \reg + ;; + (\pred_i)XEN_HYPER_ITC_I + ;; + (\pred_d)XEN_HYPER_ITC_D + ;; + mov r8 = \clob + ;; +.endm +#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \ + __ITC_I_AND_D pred_i, pred_d, reg, clob + +.macro __THASH pred, reg0, reg1, clob + .ifc "\reg0", "r8" + (\pred) mov r8 = \reg1 + (\pred) XEN_HYPER_THASH + .exitm + .endc + .ifc "\reg1", "r8" + (\pred) XEN_HYPER_THASH + ;; + (\pred) mov \reg0 = r8 + ;; + .exitm + .endif + .ifc "\clob", "r8" + (\pred) mov r8 = \reg1 + (\pred) XEN_HYPER_THASH + ;; + (\pred) mov \reg0 = r8 + ;; + .exitm + .endif + + (\pred) mov \clob = r8 + (\pred) mov r8 = \reg1 + (\pred) XEN_HYPER_THASH + ;; + (\pred) mov \reg0 = r8 + (\pred) mov r8 = \clob + ;; +.endm +#define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob + +#define SSM_PSR_IC_AND_DEFAULT_BITS(clob0, clob1) \ + mov clob0 = 1; \ + movl clob1 = XSI_PSR_IC; \ + ;; \ + st4 [clob1] = clob0 \ + ;; + +#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \ + ;; \ + srlz.d; \ + mov clob1 = 1; \ + movl clob0=XSI_PSR_IC; \ + ;; \ + st4 [clob0] = clob1 + +#define RSM_PSR_IC(clob) \ + movl clob = XSI_PSR_IC; \ + ;; \ + st4 [clob] = r0; \ + ;; + +/* pred will be clobbered */ +#define MASK_TO_PEND_OFS (-1) +#define SSM_PSR_I(pred, clob) \ +(pred) movl clob = XSI_PSR_I_ADDR \ + ;; \ +(pred) ld8 clob = [clob] \ + ;; \ + /* if (pred) vpsr.i = 1 */ \ + /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \ +(pred) st1 [clob] = r0, MASK_TO_PEND_OFS \ + ;; \ + /* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \ +(pred) ld1 clob = [clob] \ + ;; \ +(pred) cmp.ne pred, p0 = clob, r0 \ + ;; \ +(pred) XEN_HYPER_SSM_I /* do areal ssm psr.i */ + +#define RSM_PSR_I(pred, clob0, clob1) \ + movl clob0 = XSI_PSR_I_ADDR; \ + mov clob1 = 1; \ + ;; \ + ld8 clob0 = [clob0]; \ + ;; \ +(pred) st1 [clob0] = clob1 + +#define RSM_PSR_I_IC(clob0, clob1, clob2) \ + movl clob0 = XSI_PSR_I_ADDR; \ + movl clob1 = XSI_PSR_IC; \ + ;; \ + ld8 clob0 = [clob0]; \ + mov clob2 = 1; \ + ;; \ + /* note: clears both vpsr.i and vpsr.ic! */ \ + st1 [clob0] = clob2; \ + st4 [clob1] = r0; \ + ;; + +#define RSM_PSR_DT \ + XEN_HYPER_RSM_PSR_DT + +#define RSM_PSR_DT_AND_SRLZ_I \ + XEN_HYPER_RSM_PSR_DT + +#define SSM_PSR_DT_AND_SRLZ_I \ + XEN_HYPER_SSM_PSR_DT + +#define BSW_0(clob0, clob1, clob2) \ + ;; \ + /* r16-r31 all now hold bank1 values */ \ + mov clob2 = ar.unat; \ + movl clob0 = XSI_BANK1_R16; \ + movl clob1 = XSI_BANK1_R16 + 8; \ + ;; \ +.mem.offset 0, 0; st8.spill [clob0] = r16, 16; \ +.mem.offset 8, 0; st8.spill [clob1] = r17, 16; \ + ;; \ +.mem.offset 0, 0; st8.spill [clob0] = r18, 16; \ +.mem.offset 8, 0; st8.spill [clob1] = r19, 16; \ + ;; \ +.mem.offset 0, 0; st8.spill [clob0] = r20, 16; \ +.mem.offset 8, 0; st8.spill [clob1] = r21, 16; \ + ;; \ +.mem.offset 0, 0; st8.spill [clob0] = r22, 16; \ +.mem.offset 8, 0; st8.spill [clob1] = r23, 16; \ + ;; \ +.mem.offset 0, 0; st8.spill [clob0] = r24, 16; \ +.mem.offset 8, 0; st8.spill [clob1] = r25, 16; \ + ;; \ +.mem.offset 0, 0; st8.spill [clob0] = r26, 16; \ +.mem.offset 8, 0; st8.spill [clob1] = r27, 16; \ + ;; \ +.mem.offset 0, 0; st8.spill [clob0] = r28, 16; \ +.mem.offset 8, 0; st8.spill [clob1] = r29, 16; \ + ;; \ +.mem.offset 0, 0; st8.spill [clob0] = r30, 16; \ +.mem.offset 8, 0; st8.spill [clob1] = r31, 16; \ + ;; \ + mov clob1 = ar.unat; \ + movl clob0 = XSI_B1NAT; \ + ;; \ + st8 [clob0] = clob1; \ + mov ar.unat = clob2; \ + movl clob0 = XSI_BANKNUM; \ + ;; \ + st4 [clob0] = r0 + + + /* FIXME: THIS CODE IS NOT NaT SAFE! */ +#define XEN_BSW_1(clob) \ + mov clob = ar.unat; \ + movl r30 = XSI_B1NAT; \ + ;; \ + ld8 r30 = [r30]; \ + ;; \ + mov ar.unat = r30; \ + movl r30 = XSI_BANKNUM; \ + mov r31 = 1; \ + ;; \ + st4 [r30] = r31; \ + movl r30 = XSI_BANK1_R16; \ + movl r31 = XSI_BANK1_R16+8; \ + ;; \ + ld8.fill r16 = [r30], 16; \ + ld8.fill r17 = [r31], 16; \ + ;; \ + ld8.fill r18 = [r30], 16; \ + ld8.fill r19 = [r31], 16; \ + ;; \ + ld8.fill r20 = [r30], 16; \ + ld8.fill r21 = [r31], 16; \ + ;; \ + ld8.fill r22 = [r30], 16; \ + ld8.fill r23 = [r31], 16; \ + ;; \ + ld8.fill r24 = [r30], 16; \ + ld8.fill r25 = [r31], 16; \ + ;; \ + ld8.fill r26 = [r30], 16; \ + ld8.fill r27 = [r31], 16; \ + ;; \ + ld8.fill r28 = [r30], 16; \ + ld8.fill r29 = [r31], 16; \ + ;; \ + ld8.fill r30 = [r30]; \ + ld8.fill r31 = [r31]; \ + ;; \ + mov ar.unat = clob + +/* xen_bsw1 clobbers clob1 = r14 */ +.macro ____BSW_1 clob0, clob1 + .ifc "\clob0", "r14" + .error "clob0 \clob0 must not be r14" + .endif + .ifnc "\clob1", "r14" + .error "clob1 \clob1 must be r14" + .endif + .ifc "\clob0", "\clob1" + .error "it must be clob0 \clob0 != clob1 \clob1" + .endif + + mov \clob0 = b0 + br.call.sptk b0 = xen_bsw1 + ;; + mov b0 = \clob0 + ;; +.endm + +.macro __BSW_1 clob0, clob1 + .ifc "\clob0", "r14" + ____BSW_1 \clob1, \clob0 + .exitm + .endif + .ifc "\clob1", "r14" + ____BSW_1 \clob0, \clob1 + .exitm + .endif + .ifc "\clob0", "\clob1" + .error "it must be clob0 \clob0 != clob1 \clob1" + .endif + + .warning "use r14 as second argument \clob0 \clob1" + mov \clob1 = r14 + ____BSW_1 \clob0, r14 + mov r14 = \clob1 +.endm + +/* in place code generating causes lack of space */ +/* #define BSW_1(clob0, clob1) XEN_BSW_1(clob1) */ +#define BSW_1(clob0, clob1) __BSW_1 clob0, clob1 + + +#define COVER \ + XEN_HYPER_COVER + +#define RFI \ + XEN_HYPER_RFI; \ + dv_serialize_data -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:30
|
Signed-off-by: Isaku Yamahata <yam...@va...> --- include/asm-ia64/page.h | 8 ++++++++ include/asm-ia64/xen/page.h | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 0 deletions(-) create mode 100644 include/asm-ia64/xen/page.h diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index 4999a6c..5508dc2 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h @@ -227,4 +227,12 @@ get_order (unsigned long size) (((current->personality & READ_IMPLIES_EXEC) != 0) \ ? VM_EXEC : 0)) +/* + * XXX: to compile + * after pv_ops'fication of xen paravirtualization, this should be removed. + */ +#if !defined(__ASSEMBLY__) && defined(CONFIG_XEN) +#include <asm/xen/page.h> +#endif /* !__ASSEMBLY__ && CONFIG_XEN */ + #endif /* _ASM_IA64_PAGE_H */ diff --git a/include/asm-ia64/xen/page.h b/include/asm-ia64/xen/page.h new file mode 100644 index 0000000..c562036 --- /dev/null +++ b/include/asm-ia64/xen/page.h @@ -0,0 +1,41 @@ +#ifndef _ASM_IA64_XEN_PAGE_H +#define _ASM_IA64_XEN_PAGE_H + +#include <linux/kernel.h> +#include <asm/xen/hypervisor.h> +#include <asm/xen/hypercall.h> +#include <xen/features.h> +#include <xen/interface/xen.h> + +static inline unsigned long mfn_to_pfn(unsigned long mfn) +{ + return mfn; +} + +static inline unsigned long pfn_to_mfn(unsigned long pfn) +{ + return pfn; +} + +static inline void *mfn_to_virt(unsigned long mfn) +{ + return __va(mfn << PAGE_SHIFT); +} + +static inline unsigned long virt_to_mfn(void *virt) +{ + return __pa(virt) >> PAGE_SHIFT; +} + +/* for tpmfront.c */ +static inline unsigned long virt_to_machine(void *virt) +{ + return __pa(virt); +} + +static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) +{ + /* nothing */ +} + +#endif /* _ASM_IA64_XEN_PAGE_H */ -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:30
|
Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/xen/hypercall.S | 124 ++++++++++ include/asm-ia64/xen/privop.h | 512 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 636 insertions(+), 0 deletions(-) create mode 100644 arch/ia64/xen/hypercall.S diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S new file mode 100644 index 0000000..a96f278 --- /dev/null +++ b/arch/ia64/xen/hypercall.S @@ -0,0 +1,124 @@ +/* + * Support routines for Xen hypercalls + * + * Copyright (C) 2005 Dan Magenheimer <dan...@hp...> + */ + +#include <asm/asmmacro.h> +#include <asm/intrinsics.h> + +#ifdef __INTEL_COMPILER +# undef ASM_SUPPORTED +#else +# define ASM_SUPPORTED +#endif + +#ifndef ASM_SUPPORTED +GLOBAL_ENTRY(xen_get_psr) + XEN_HYPER_GET_PSR + br.ret.sptk.many rp + ;; +END(xen_get_psr) + +GLOBAL_ENTRY(xen_get_ivr) + XEN_HYPER_GET_IVR + br.ret.sptk.many rp + ;; +END(xen_get_ivr) + +GLOBAL_ENTRY(xen_get_tpr) + XEN_HYPER_GET_TPR + br.ret.sptk.many rp + ;; +END(xen_get_tpr) + +GLOBAL_ENTRY(xen_set_tpr) + mov r8=r32 + XEN_HYPER_SET_TPR + br.ret.sptk.many rp + ;; +END(xen_set_tpr) + +GLOBAL_ENTRY(xen_eoi) + mov r8=r32 + XEN_HYPER_EOI + br.ret.sptk.many rp + ;; +END(xen_eoi) + +GLOBAL_ENTRY(xen_thash) + mov r8=r32 + XEN_HYPER_THASH + br.ret.sptk.many rp + ;; +END(xen_thash) + +GLOBAL_ENTRY(xen_set_itm) + mov r8=r32 + XEN_HYPER_SET_ITM + br.ret.sptk.many rp + ;; +END(xen_set_itm) + +GLOBAL_ENTRY(xen_ptcga) + mov r8=r32 + mov r9=r33 + XEN_HYPER_PTC_GA + br.ret.sptk.many rp + ;; +END(xen_ptcga) + +GLOBAL_ENTRY(xen_get_rr) + mov r8=r32 + XEN_HYPER_GET_RR + br.ret.sptk.many rp + ;; +END(xen_get_rr) + +GLOBAL_ENTRY(xen_set_rr) + mov r8=r32 + mov r9=r33 + XEN_HYPER_SET_RR + br.ret.sptk.many rp + ;; +END(xen_set_rr) + +GLOBAL_ENTRY(xen_set_kr) + mov r8=r32 + mov r9=r33 + XEN_HYPER_SET_KR + br.ret.sptk.many rp +END(xen_set_kr) + +GLOBAL_ENTRY(xen_fc) + mov r8=r32 + XEN_HYPER_FC + br.ret.sptk.many rp +END(xen_fc) + +GLOBAL_ENTRY(xen_get_cpuid) + mov r8=r32 + XEN_HYPER_GET_CPUID + br.ret.sptk.many rp +END(xen_get_cpuid) + +GLOBAL_ENTRY(xen_get_pmd) + mov r8=r32 + XEN_HYPER_GET_PMD + br.ret.sptk.many rp +END(xen_get_pmd) + +#ifdef CONFIG_IA32_SUPPORT +GLOBAL_ENTRY(xen_get_eflag) + XEN_HYPER_GET_EFLAG + br.ret.sptk.many rp +END(xen_get_eflag) + +// some bits aren't set if pl!=0, see SDM vol1 3.1.8 +GLOBAL_ENTRY(xen_set_eflag) + mov r8=r32 + XEN_HYPER_SET_EFLAG + br.ret.sptk.many rp +END(xen_set_eflag) +#endif /* CONFIG_IA32_SUPPORT */ +#endif /* ASM_SUPPORTED */ diff --git a/include/asm-ia64/xen/privop.h b/include/asm-ia64/xen/privop.h index dd3e5ec..95e8e8a 100644 --- a/include/asm-ia64/xen/privop.h +++ b/include/asm-ia64/xen/privop.h @@ -70,4 +70,516 @@ #define XSI_IHA (XSI_BASE + XSI_IHA_OFS) #endif +#ifndef __ASSEMBLY__ +#define XEN_HYPER_SSM_I asm("break %0" : : "i" (HYPERPRIVOP_SSM_I)) +#define XEN_HYPER_GET_IVR asm("break %0" : : "i" (HYPERPRIVOP_GET_IVR)) + +/************************************************/ +/* Instructions paravirtualized for correctness */ +/************************************************/ + +/* "fc" and "thash" are privilege-sensitive instructions, meaning they + * may have different semantics depending on whether they are executed + * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't + * be allowed to execute directly, lest incorrect semantics result. */ +#ifdef ASM_SUPPORTED +static inline void +xen_fc(unsigned long addr) +{ + register __u64 __addr asm ("r8") = addr; + asm volatile ("break %0":: "i"(HYPERPRIVOP_FC), "r"(__addr)); +} + +static inline unsigned long +xen_thash(unsigned long addr) +{ + register __u64 ia64_intri_res asm ("r8"); + register __u64 __addr asm ("r8") = addr; + asm volatile ("break %1": + "=r"(ia64_intri_res): + "i"(HYPERPRIVOP_THASH), "0"(__addr)); + return ia64_intri_res; +} +#else +extern void xen_fc(unsigned long addr); +extern unsigned long xen_thash(unsigned long addr); +#endif + +/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" + * is not currently used (though it may be in a long-format VHPT system!) + * and the semantics of cover only change if psr.ic is off which is very + * rare (and currently non-existent outside of assembly code */ + +/* There are also privilege-sensitive registers. These registers are + * readable at any privilege level but only writable at PL0. */ +#ifdef ASM_SUPPORTED +static inline unsigned long +xen_get_cpuid(int index) +{ + register __u64 ia64_intri_res asm ("r8"); + register __u64 __index asm ("r8") = index; + asm volatile ("break %1": + "=r"(ia64_intri_res): + "i"(HYPERPRIVOP_GET_CPUID), "0"(__index)); + return ia64_intri_res; +} + +static inline unsigned long +xen_get_pmd(int index) +{ + register __u64 ia64_intri_res asm ("r8"); + register __u64 __index asm ("r8") = index; + asm volatile ("break %1": + "=r"(ia64_intri_res): + "i"(HYPERPRIVOP_GET_PMD), "0O"(__index)); + return ia64_intri_res; +} +#else +extern unsigned long xen_get_cpuid(int index); +extern unsigned long xen_get_pmd(int index); +#endif + +#ifdef ASM_SUPPORTED +static inline unsigned long +xen_get_eflag(void) +{ + register __u64 ia64_intri_res asm ("r8"); + asm volatile ("break %1": + "=r"(ia64_intri_res): "i"(HYPERPRIVOP_GET_EFLAG)); + return ia64_intri_res; +} + +static inline void +xen_set_eflag(unsigned long val) +{ + register __u64 __val asm ("r8") = val; + asm volatile ("break %0":: "i"(HYPERPRIVOP_SET_EFLAG), "r"(__val)); +} +#else +extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */ +extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ +#endif + +/************************************************/ +/* Instructions paravirtualized for performance */ +/************************************************/ + +/* Xen uses memory-mapped virtual privileged registers for access to many + * performance-sensitive privileged registers. Some, like the processor + * status register (psr), are broken up into multiple memory locations. + * Others, like "pend", are abstractions based on privileged registers. + * "Pend" is guaranteed to be set if reading cr.ivr would return a + * (non-spurious) interrupt. */ +#define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE) + +#define XSI_PSR_I \ + (*XEN_MAPPEDREGS->interrupt_mask_addr) +#define xen_get_virtual_psr_i() \ + (!XSI_PSR_I) +#define xen_set_virtual_psr_i(_val) \ + ({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; }) +#define xen_set_virtual_psr_ic(_val) \ + ({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; }) +#define xen_get_virtual_pend() \ + (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1)) + +/* Hyperprivops are "break" instructions with a well-defined API. + * In particular, the virtual psr.ic bit must be off; in this way + * it is guaranteed to never conflict with a linux break instruction. + * Normally, this is done in a xen stub but this one is frequent enough + * that we inline it */ +#define xen_hyper_ssm_i() \ +({ \ + XEN_HYPER_SSM_I; \ +}) + +/* turning off interrupts can be paravirtualized simply by writing + * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */ +#define xen_rsm_i() \ +do { \ + xen_set_virtual_psr_i(0); \ + barrier(); \ +} while (0) + +/* turning on interrupts is a bit more complicated.. write to the + * memory-mapped virtual psr.i bit first (to avoid race condition), + * then if any interrupts were pending, we have to execute a hyperprivop + * to ensure the pending interrupt gets delivered; else we're done! */ +#define xen_ssm_i() \ +do { \ + int old = xen_get_virtual_psr_i(); \ + xen_set_virtual_psr_i(1); \ + barrier(); \ + if (!old && xen_get_virtual_pend()) \ + xen_hyper_ssm_i(); \ +} while (0) + +#define xen_ia64_intrin_local_irq_restore(x) \ +do { \ + if (is_running_on_xen()) { \ + if ((x) & IA64_PSR_I) \ + xen_ssm_i(); \ + else \ + xen_rsm_i(); \ + } else { \ + native_intrin_local_irq_restore((x)); \ + } \ +} while (0) + +#define xen_get_psr_i() \ +({ \ + \ + (is_running_on_xen()) ? \ + (xen_get_virtual_psr_i() ? IA64_PSR_I : 0) \ + : native_get_psr_i() \ +}) + +#define xen_ia64_ssm(mask) \ +do { \ + if ((mask) == IA64_PSR_I) { \ + if (is_running_on_xen()) \ + xen_ssm_i(); \ + else \ + native_ssm(mask); \ + } else { \ + native_ssm(mask); \ + } \ +} while (0) + +#define xen_ia64_rsm(mask) \ +do { \ + if ((mask) == IA64_PSR_I) { \ + if (is_running_on_xen()) \ + xen_rsm_i(); \ + else \ + native_rsm(mask); \ + } else { \ + native_rsm(mask); \ + } \ +} while (0) + +/* Although all privileged operations can be left to trap and will + * be properly handled by Xen, some are frequent enough that we use + * hyperprivops for performance. */ + +#ifndef ASM_SUPPORTED +extern unsigned long xen_get_psr(void); +extern unsigned long xen_get_ivr(void); +extern unsigned long xen_get_tpr(void); +extern void xen_set_itm(unsigned long); +extern void xen_set_tpr(unsigned long); +extern void xen_eoi(unsigned long); +extern void xen_set_rr(unsigned long index, unsigned long val); +extern unsigned long xen_get_rr(unsigned long index); +extern void xen_set_kr(unsigned long index, unsigned long val); +extern void xen_ptcga(unsigned long addr, unsigned long size); +#else +static inline unsigned long +xen_get_psr(void) +{ + register __u64 ia64_intri_res asm ("r8"); + asm volatile ("break %1": + "=r"(ia64_intri_res): "i"(HYPERPRIVOP_GET_PSR)); + return ia64_intri_res; +} + +static inline unsigned long +xen_get_ivr(void) +{ + register __u64 ia64_intri_res asm ("r8"); + asm volatile ("break %1": + "=r"(ia64_intri_res): "i"(HYPERPRIVOP_GET_IVR)); + return ia64_intri_res; +} + +static inline unsigned long +xen_get_tpr(void) +{ + register __u64 ia64_intri_res asm ("r8"); + asm volatile ("break %1": + "=r"(ia64_intri_res): "i"(HYPERPRIVOP_GET_TPR)); + return ia64_intri_res; +} + +static inline void +xen_set_tpr(unsigned long val) +{ + register __u64 __val asm ("r8") = val; + asm volatile ("break %0":: + "i"(HYPERPRIVOP_GET_TPR), "r"(__val)); +} + +static inline void +xen_eoi(unsigned long val) +{ + register __u64 __val asm ("r8") = val; + asm volatile ("break %0":: + "i"(HYPERPRIVOP_EOI), "r"(__val)); +} + +static inline void +xen_set_itm(unsigned long val) +{ + register __u64 __val asm ("r8") = val; + asm volatile ("break %0":: "i"(HYPERPRIVOP_SET_ITM), "r"(__val)); +} + +static inline void +xen_ptcga(unsigned long addr, unsigned long size) +{ + register __u64 __addr asm ("r8") = addr; + register __u64 __size asm ("r9") = size; + asm volatile ("break %0":: + "i"(HYPERPRIVOP_PTC_GA), "r"(__addr), "r"(__size)); +} + +static inline unsigned long +xen_get_rr(unsigned long index) +{ + register __u64 ia64_intri_res asm ("r8"); + register __u64 __index asm ("r8") = index; + asm volatile ("break %1": + "=r"(ia64_intri_res): + "i"(HYPERPRIVOP_GET_RR), "0"(__index)); + return ia64_intri_res; +} + +static inline void +xen_set_rr(unsigned long index, unsigned long val) +{ + register __u64 __index asm ("r8") = index; + register __u64 __val asm ("r9") = val; + asm volatile ("break %0":: + "i"(HYPERPRIVOP_SET_RR), "r"(__index), "r"(__val)); +} + +static inline void +xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, + unsigned long val2, unsigned long val3, unsigned long val4) +{ + register __u64 __val0 asm ("r8") = val0; + register __u64 __val1 asm ("r9") = val1; + register __u64 __val2 asm ("r10") = val2; + register __u64 __val3 asm ("r11") = val3; + register __u64 __val4 asm ("r14") = val4; + asm volatile ("break %0" :: + "i"(HYPERPRIVOP_SET_RR0_TO_RR4), + "r"(__val0), "r"(__val1), + "r"(__val2), "r"(__val3), "r"(__val4)); +} + +static inline void +xen_set_kr(unsigned long index, unsigned long val) +{ + register __u64 __index asm ("r8") = index; + register __u64 __val asm ("r9") = val; + asm volatile ("break %0":: + "i"(HYPERPRIVOP_SET_KR), "r"(__index), "r"(__val)); +} +#endif + +/* Note: It may look wrong to test for is_running_on_xen() in each case. + * However regnum is always a constant so, as written, the compiler + * eliminates the switch statement, whereas is_running_on_xen() must be + * tested dynamically. */ +#define xen_ia64_getreg(regnum) \ +({ \ + __u64 ia64_intri_res; \ + \ + switch (regnum) { \ + case _IA64_REG_PSR: \ + ia64_intri_res = (is_running_on_xen()) ? \ + xen_get_psr() : \ + native_getreg(regnum); \ + break; \ + case _IA64_REG_CR_IVR: \ + ia64_intri_res = (is_running_on_xen()) ? \ + xen_get_ivr() : \ + native_getreg(regnum); \ + break; \ + case _IA64_REG_CR_TPR: \ + ia64_intri_res = (is_running_on_xen()) ? \ + xen_get_tpr() : \ + native_getreg(regnum); \ + break; \ + case _IA64_REG_AR_EFLAG: \ + ia64_intri_res = (is_running_on_xen()) ? \ + xen_get_eflag() : \ + native_getreg(regnum); \ + break; \ + default: \ + ia64_intri_res = native_getreg(regnum); \ + break; \ + } \ + ia64_intri_res; \ +}) + +#define xen_ia64_setreg(regnum, val) \ +({ \ + switch (regnum) { \ + case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7: \ + (is_running_on_xen()) ? \ + xen_set_kr(((regnum)-_IA64_REG_AR_KR0), (val)) :\ + native_setreg((regnum), (val)); \ + break; \ + case _IA64_REG_CR_ITM: \ + (is_running_on_xen()) ? \ + xen_set_itm(val) : \ + native_setreg((regnum), (val)); \ + break; \ + case _IA64_REG_CR_TPR: \ + (is_running_on_xen()) ? \ + xen_set_tpr(val) : \ + native_setreg((regnum), (val)); \ + break; \ + case _IA64_REG_CR_EOI: \ + (is_running_on_xen()) ? \ + xen_eoi(val) : \ + native_setreg((regnum), (val)); \ + break; \ + case _IA64_REG_AR_EFLAG: \ + (is_running_on_xen()) ? \ + xen_set_eflag(val) : \ + native_setreg((regnum), (val)); \ + break; \ + default: \ + native_setreg((regnum), (val)); \ + break; \ + } \ +}) + +#if defined(ASM_SUPPORTED) && !defined(CONFIG_PARAVIRT_ALT) + +#define IA64_PARAVIRTUALIZED_PRIVOP + +#define ia64_fc(addr) \ +do { \ + if (is_running_on_xen()) \ + xen_fc((unsigned long)(addr)); \ + else \ + native_fc(addr); \ +} while (0) + +#define ia64_thash(addr) \ +({ \ + unsigned long ia64_intri_res; \ + if (is_running_on_xen()) \ + ia64_intri_res = \ + xen_thash((unsigned long)(addr)); \ + else \ + ia64_intri_res = native_thash(addr); \ + ia64_intri_res; \ +}) + +#define ia64_get_cpuid(i) \ +({ \ + unsigned long ia64_intri_res; \ + if (is_running_on_xen()) \ + ia64_intri_res = xen_get_cpuid(i); \ + else \ + ia64_intri_res = native_get_cpuid(i); \ + ia64_intri_res; \ +}) + +#define ia64_get_pmd(i) \ +({ \ + unsigned long ia64_intri_res; \ + if (is_running_on_xen()) \ + ia64_intri_res = xen_get_pmd(i); \ + else \ + ia64_intri_res = native_get_pmd(i); \ + ia64_intri_res; \ +}) + + +#define ia64_ptcga(addr, size) \ +do { \ + if (is_running_on_xen()) \ + xen_ptcga((addr), (size)); \ + else \ + native_ptcga((addr), (size)); \ +} while (0) + +#define ia64_set_rr(index, val) \ +do { \ + if (is_running_on_xen()) \ + xen_set_rr((index), (val)); \ + else \ + native_set_rr((index), (val)); \ +} while (0) + +#define ia64_get_rr(index) \ +({ \ + __u64 ia64_intri_res; \ + if (is_running_on_xen()) \ + ia64_intri_res = xen_get_rr((index)); \ + else \ + ia64_intri_res = native_get_rr((index)); \ + ia64_intri_res; \ +}) + +#define ia64_set_rr0_to_rr4(val0, val1, val2, val3, val4) \ +do { \ + if (is_running_on_xen()) \ + xen_set_rr0_to_rr4((val0), (val1), (val2), \ + (val3), (val4)); \ + else \ + native_set_rr0_to_rr4((val0), (val1), (val2), \ + (val3), (val4)); \ +} while (0) + +#define ia64_getreg xen_ia64_getreg +#define ia64_setreg xen_ia64_setreg +#define ia64_ssm xen_ia64_ssm +#define ia64_rsm xen_ia64_rsm +#define ia64_intrin_local_irq_restore xen_ia64_intrin_local_irq_restore +#define ia64_get_psr_i xen_get_psr_i + +/* the remainder of these are not performance-sensitive so its + * OK to not paravirtualize and just take a privop trap and emulate */ +#define ia64_hint native_hint +#define ia64_set_pmd native_set_pmd +#define ia64_itci native_itci +#define ia64_itcd native_itcd +#define ia64_itri native_itri +#define ia64_itrd native_itrd +#define ia64_tpa native_tpa +#define ia64_set_ibr native_set_ibr +#define ia64_set_pkr native_set_pkr +#define ia64_set_pmc native_set_pmc +#define ia64_get_ibr native_get_ibr +#define ia64_get_pkr native_get_pkr +#define ia64_get_pmc native_get_pmc +#define ia64_ptce native_ptce +#define ia64_ptcl native_ptcl +#define ia64_ptri native_ptri +#define ia64_ptrd native_ptrd + +#endif /* ASM_SUPPORTED && !CONFIG_PARAVIRT_ALT */ + +#endif /* !__ASSEMBLY__ */ + +/* these routines utilize privilege-sensitive or performance-sensitive + * privileged instructions so the code must be replaced with + * paravirtualized versions */ +#ifndef CONFIG_PARAVIRT_ENTRY +#define IA64_PARAVIRTUALIZED_ENTRY +#define ia64_switch_to xen_switch_to +#define ia64_leave_syscall xen_leave_syscall +#define ia64_work_processed_syscall xen_work_processed_syscall_with_check +#define ia64_leave_kernel xen_leave_kernel +#define ia64_pal_call_static xen_pal_call_static +#endif /* !CONFIG_PARAVIRT_ENTRY */ + +#ifdef CONFIG_XEN +#ifdef __ASSEMBLY__ +#define BR_IF_NATIVE(target, reg, pred) \ + .body ; \ + movl reg=running_on_xen;; ; \ + ld4 reg=[reg];; ; \ + cmp.eq pred,p0=reg,r0 ; \ + (pred) br.cond.sptk.many target;; +#endif /* __ASSEMBLY__ */ +#endif + #endif /* _ASM_IA64_XEN_PRIVOP_H */ -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:30
|
With binary patching, make intrinsics paravirtualization hypervisor neutral. So far xen intrinsics doesn't allow another hypervisor. Binary patch marked privileged operations which needs paravirtualization if running on xen at early boot time. Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/xen/Makefile | 7 + arch/ia64/xen/paravirt_xen.c | 242 +++++++++++++++++++++++++++++++++++ arch/ia64/xen/privops_asm.S | 221 ++++++++++++++++++++++++++++++++ arch/ia64/xen/privops_c.c | 279 +++++++++++++++++++++++++++++++++++++++++ arch/ia64/xen/xensetup.S | 10 ++ include/asm-ia64/xen/privop.h | 24 ++++ 6 files changed, 783 insertions(+), 0 deletions(-) create mode 100644 arch/ia64/xen/Makefile create mode 100644 arch/ia64/xen/paravirt_xen.c create mode 100644 arch/ia64/xen/privops_asm.S create mode 100644 arch/ia64/xen/privops_c.c diff --git a/arch/ia64/xen/Makefile b/arch/ia64/xen/Makefile new file mode 100644 index 0000000..c219358 --- /dev/null +++ b/arch/ia64/xen/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for Xen components +# + +obj-$(CONFIG_PARAVIRT_ALT) += paravirt_xen.o privops_asm.o privops_c.o +obj-$(CONFIG_PARAVIRT_NOP_B_PATCH) += paravirt_xen.o +obj-$(CONFIG_PARAVIRT_ENTRY) += paravirt_xen.o diff --git a/arch/ia64/xen/paravirt_xen.c b/arch/ia64/xen/paravirt_xen.c new file mode 100644 index 0000000..57b9dfd --- /dev/null +++ b/arch/ia64/xen/paravirt_xen.c @@ -0,0 +1,242 @@ +/****************************************************************************** + * linux/arch/ia64/xen/paravirt_xen.c + * + * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include <linux/types.h> +#include <linux/string.h> +#include <linux/init.h> +#include <asm/intrinsics.h> +#include <asm/bugs.h> +#include <asm/kprobes.h> /* for bundle_t */ +#include <asm/paravirt_core.h> + +#ifdef CONFIG_PARAVIRT_ALT +struct xen_alt_bundle_patch_elem { + const void *sbundle; + const void *ebundle; + unsigned long type; +}; + +static unsigned long __init_or_module +__xen_alt_bundle_patch(void *sbundle, void *ebundle, unsigned long type) +{ + extern const struct xen_alt_bundle_patch_elem xen_alt_bundle_array[]; + extern const unsigned long xen_alt_bundle_array_size; + + unsigned long used = 0; + unsigned long i; + + BUG_ON((((unsigned long)sbundle) % sizeof(bundle_t)) != 0); + BUG_ON((((unsigned long)ebundle) % sizeof(bundle_t)) != 0); + + for (i = 0; + i < xen_alt_bundle_array_size / sizeof(xen_alt_bundle_array[0]); + i++) { + const struct xen_alt_bundle_patch_elem *p = + &xen_alt_bundle_array[i]; + if (p->type == type) { + used = p->ebundle - p->sbundle; + BUG_ON(used > ebundle - sbundle); + memcpy(sbundle, p->sbundle, used); + break; + } + } + + return used; +} + +static void __init +xen_alt_bundle_patch(void) +{ + extern struct paravirt_alt_bundle_patch __start_paravirt_bundles[]; + extern struct paravirt_alt_bundle_patch __stop_paravirt_bundles[]; + + paravirt_alt_bundle_patch_apply(__start_paravirt_bundles, + __stop_paravirt_bundles, + &__xen_alt_bundle_patch); +} + +#ifdef CONFIG_MODULES +void +xen_alt_bundle_patch_module(struct paravirt_alt_bundle_patch *start, + struct paravirt_alt_bundle_patch *end) +{ + if (is_running_on_xen()) + paravirt_alt_bundle_patch_apply(start, end, + &__xen_alt_bundle_patch); +} +#endif /* CONFIG_MODULES */ + + +/* + * all the native instructions of hyperprivops are M-form or I-form + * mov ar.<imm>=r1 I26, M29 + * mov r1=ar.<imm> I28, M31 + * mov r1=cr.<imm> M32 + * mov cr.<imm>=r1 M33 + * mov r1=psr M36 + * mov indirect<r1>=r2 M42 + * mov r1=indirect<r2> M43 + * ptc.ga M45 + * thash r1=r2 M46 + * + * break.{m, i} instrucitions format are same. + * So we can safely replace all signle instruction which is target of + * hyperpviops with break.{m, i} imm21 hyperprivops. + */ + +struct xen_alt_inst_patch_elem { + unsigned long stag; + unsigned long etag; + unsigned long type; +}; + +unsigned long +__xen_alt_inst_patch(unsigned long stag, unsigned long etag, + unsigned long type) +{ + extern const struct xen_alt_inst_patch_elem xen_alt_inst_array[]; + extern const unsigned long xen_alt_inst_array_size; + + unsigned long dest_tag = stag; + unsigned long i; + + for (i = 0; + i < xen_alt_inst_array_size / sizeof(xen_alt_inst_array[0]); + i++) { + const struct xen_alt_inst_patch_elem *p = + &xen_alt_inst_array[i]; + if (p->type == type) { + unsigned long src_tag; + + for (src_tag = p->stag; + src_tag < p->etag; + src_tag = paravirt_get_next_tag(src_tag)) { + const cmp_inst_t inst = + paravirt_read_inst(src_tag); + paravirt_write_inst(dest_tag, inst); + + BUG_ON(dest_tag >= etag); + dest_tag = paravirt_get_next_tag(dest_tag); + } + break; + } + } + + return dest_tag; +} + +void +xen_alt_inst_patch(void) +{ + extern struct paravirt_alt_inst_patch __start_paravirt_insts[]; + extern struct paravirt_alt_inst_patch __stop_paravirt_insts[]; + + paravirt_alt_inst_patch_apply(__start_paravirt_insts, + __stop_paravirt_insts, + &__xen_alt_inst_patch); +} + +#ifdef CONFIG_MODULES +void +xen_alt_inst_patch_module(struct paravirt_alt_inst_patch *start, + struct paravirt_alt_inst_patch *end) +{ + if (is_running_on_xen()) + paravirt_alt_inst_patch_apply(start, end, + &__xen_alt_inst_patch); +} +#endif + +#else +#define xen_alt_bundle_patch() do { } while (0) +#define xen_alt_inst_patch() do { } while (0) +#endif /* CONFIG_PARAVIRT_ALT */ + + +#ifdef CONFIG_PARAVIRT_NOP_B_PATCH +#include <asm/paravirt_nop.h> +static void __init +xen_nop_b_patch(void) +{ + extern const struct paravirt_nop_patch __start_paravirt_nop_b[]; + extern const struct paravirt_nop_patch __stop_paravirt_nop_b[]; + + paravirt_nop_b_patch_apply(__start_paravirt_nop_b, + __stop_paravirt_nop_b); +} +#else +#define xen_nop_b_patch() do { } while (0) +#endif + + +#ifdef CONFIG_PARAVIRT_ENTRY + +#include <asm/paravirt_entry.h> + +extern void *xen_switch_to; +extern void *xen_leave_syscall; +extern void *xen_leave_kernel; +extern void *xen_pal_call_static; +extern void *xen_work_processed_syscall; + +const static struct paravirt_entry xen_entries[] __initdata = { + {&xen_switch_to, PARAVIRT_ENTRY_SWITCH_TO}, + {&xen_leave_syscall, PARAVIRT_ENTRY_LEAVE_SYSCALL}, + {&xen_leave_kernel, PARAVIRT_ENTRY_LEAVE_KERNEL}, + {&xen_pal_call_static, PARAVIRT_ENTRY_PAL_CALL_STATIC}, + {&xen_work_processed_syscall, PARAVIRT_ENTRY_WORK_PROCESSED_SYSCALL}, +}; + +void __init +xen_entry_patch(void) +{ + extern const struct paravirt_entry_patch __start_paravirt_entry[]; + extern const struct paravirt_entry_patch __stop_paravirt_entry[]; + + paravirt_entry_patch_apply(__start_paravirt_entry, + __stop_paravirt_entry, + xen_entries, + sizeof(xen_entries)/sizeof(xen_entries[0])); +} +#else +#define xen_entry_patch() do { } while (0) +#endif + + +void __init +xen_paravirt_patch(void) +{ + xen_alt_bundle_patch(); + xen_alt_inst_patch(); + xen_nop_b_patch(); + xen_entry_patch(); +} + +/* + * Local variables: + * mode: C + * c-set-style: "linux" + * c-basic-offset: 8 + * tab-width: 8 + * indent-tabs-mode: t + * End: + */ diff --git a/arch/ia64/xen/privops_asm.S b/arch/ia64/xen/privops_asm.S new file mode 100644 index 0000000..40e400e --- /dev/null +++ b/arch/ia64/xen/privops_asm.S @@ -0,0 +1,221 @@ +/****************************************************************************** + * linux/arch/ia64/xen/privop_s.S + * + * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include <asm/intrinsics.h> +#include <linux/init.h> +#include <asm/paravirt_alt.h> + +#ifdef CONFIG_MODULES +#define __INIT_OR_MODULE .text +#define __INITDATA_OR_MODULE .data +#else +#define __INIT_OR_MODULE __INIT +#define __INITDATA_OR_MODULE __INITDATA +#endif /* CONFIG_MODULES */ + + __INIT_OR_MODULE + .align 32 + .proc nop_b_inst_bundle + .global nop_b_inst_bundle +nop_b_inst_bundle: + { + nop.b 0 + nop.b 0 + nop.b 0 + } + .endp nop_b_inst_bundle + __FINIT + + /* NOTE: nop.[mfi] has same format */ + __INIT_OR_MODULE + .align 32 + .proc nop_mfi_inst_bundle + .global nop_mfi_inst_bundle +nop_mfi_inst_bundle: + { + nop.m 0 + nop.f 0 + nop.i 0 + } + .endp nop_mfi_inst_bundle + __FINIT + + __INIT_OR_MODULE + .align 32 + .proc nop_bundle + .global nop_bundle +nop_bundle: +nop_bundle_start: + { + nop 0 + nop 0 + nop 0 + } +nop_bundle_end: + .endp nop_bundle + __FINIT + + __INITDATA_OR_MODULE + .align 8 + .global nop_bundle_size +nop_bundle_size: + data8 nop_bundle_end - nop_bundle_start + +#define DEFINE_PRIVOP(name, instr) \ + .align 32; \ + .proc xen_ ## name ## _instr; \ + xen_ ## name ## _instr:; \ + xen_ ## name ## _instr_start:; \ + {; \ + [xen_ ## name ## _stag:] \ + instr; \ + [xen_ ## name ## _etag:] \ + nop 0; \ + nop 0; \ + }; \ + xen_ ## name ## _instr_end:; \ + .endp xen_ ## name ## _instr; + + __INIT_OR_MODULE + DEFINE_PRIVOP(rfi, XEN_HYPER_RFI) + DEFINE_PRIVOP(rsm_psr_dt, XEN_HYPER_RSM_PSR_DT) + DEFINE_PRIVOP(ssm_psr_dt, XEN_HYPER_SSM_PSR_DT) + DEFINE_PRIVOP(cover, XEN_HYPER_COVER) + DEFINE_PRIVOP(itc_d, XEN_HYPER_ITC_D) + DEFINE_PRIVOP(itc_i, XEN_HYPER_ITC_I) + DEFINE_PRIVOP(ssm_i, XEN_HYPER_SSM_I) + DEFINE_PRIVOP(get_ivr, XEN_HYPER_GET_IVR) + DEFINE_PRIVOP(get_tpr, XEN_HYPER_GET_TPR) + DEFINE_PRIVOP(set_tpr, XEN_HYPER_SET_TPR) + DEFINE_PRIVOP(eoi, XEN_HYPER_EOI) + DEFINE_PRIVOP(set_itm, XEN_HYPER_SET_ITM) + DEFINE_PRIVOP(thash, XEN_HYPER_THASH) + DEFINE_PRIVOP(ptc_ga, XEN_HYPER_PTC_GA) + DEFINE_PRIVOP(itr_d, XEN_HYPER_ITR_D) + DEFINE_PRIVOP(get_rr, XEN_HYPER_GET_RR) + DEFINE_PRIVOP(set_rr, XEN_HYPER_SET_RR) + DEFINE_PRIVOP(set_kr, XEN_HYPER_SET_KR) + DEFINE_PRIVOP(fc, XEN_HYPER_FC) + DEFINE_PRIVOP(get_cpuid, XEN_HYPER_GET_CPUID) + DEFINE_PRIVOP(get_pmd, XEN_HYPER_GET_PMD) + DEFINE_PRIVOP(get_eflag, XEN_HYPER_GET_EFLAG) + DEFINE_PRIVOP(set_eflag, XEN_HYPER_SET_EFLAG) + DEFINE_PRIVOP(get_psr, XEN_HYPER_GET_PSR) + DEFINE_PRIVOP(set_rr0_to_rr4, XEN_HYPER_SET_RR0_TO_RR4) + __FINIT + + +#define PARAVIRT_ALT_BUNDLE_ELEM(name, type) \ + data8 xen_ ## name ## _instr_start; \ + data8 xen_ ## name ## _instr_end; \ + data8 type; + + __INITDATA_OR_MODULE + .align 8 + .global xen_alt_bundle_array +xen_alt_bundle_array: +xen_alt_bundle_array_start: + PARAVIRT_ALT_BUNDLE_ELEM(rfi, PARAVIRT_INST_RFI) + PARAVIRT_ALT_BUNDLE_ELEM(rsm_psr_dt, PARAVIRT_INST_RSM_DT) + PARAVIRT_ALT_BUNDLE_ELEM(ssm_psr_dt, PARAVIRT_INST_SSM_DT) + PARAVIRT_ALT_BUNDLE_ELEM(cover, PARAVIRT_INST_COVER) + PARAVIRT_ALT_BUNDLE_ELEM(itc_d, PARAVIRT_INST_ITC_D) + PARAVIRT_ALT_BUNDLE_ELEM(itc_i, PARAVIRT_INST_ITC_I) + PARAVIRT_ALT_BUNDLE_ELEM(ssm_i, PARAVIRT_INST_SSM_I) + PARAVIRT_ALT_BUNDLE_ELEM(get_ivr, PARAVIRT_INST_GET_IVR) + PARAVIRT_ALT_BUNDLE_ELEM(get_tpr, PARAVIRT_INST_GET_TPR) + PARAVIRT_ALT_BUNDLE_ELEM(set_tpr, PARAVIRT_INST_SET_TPR) + PARAVIRT_ALT_BUNDLE_ELEM(eoi, PARAVIRT_INST_EOI) + PARAVIRT_ALT_BUNDLE_ELEM(set_itm, PARAVIRT_INST_SET_ITM) + PARAVIRT_ALT_BUNDLE_ELEM(thash, PARAVIRT_INST_THASH) + PARAVIRT_ALT_BUNDLE_ELEM(ptc_ga, PARAVIRT_INST_PTC_GA) + PARAVIRT_ALT_BUNDLE_ELEM(itr_d, PARAVIRT_INST_ITR_D) + PARAVIRT_ALT_BUNDLE_ELEM(get_rr, PARAVIRT_INST_GET_RR) + PARAVIRT_ALT_BUNDLE_ELEM(set_rr, PARAVIRT_INST_SET_RR) + PARAVIRT_ALT_BUNDLE_ELEM(set_kr, PARAVIRT_INST_SET_KR) + PARAVIRT_ALT_BUNDLE_ELEM(fc, PARAVIRT_INST_FC) + PARAVIRT_ALT_BUNDLE_ELEM(get_cpuid, PARAVIRT_INST_GET_CPUID) + PARAVIRT_ALT_BUNDLE_ELEM(get_pmd, PARAVIRT_INST_GET_PMD) + PARAVIRT_ALT_BUNDLE_ELEM(get_eflag, PARAVIRT_INST_GET_EFLAG) + PARAVIRT_ALT_BUNDLE_ELEM(set_eflag, PARAVIRT_INST_SET_EFLAG) + PARAVIRT_ALT_BUNDLE_ELEM(get_psr, PARAVIRT_INST_GET_PSR) + + PARAVIRT_ALT_BUNDLE_ELEM(ssm_i, PARAVIRT_BNDL_SSM_I) + PARAVIRT_ALT_BUNDLE_ELEM(rsm_i, PARAVIRT_BNDL_RSM_I) + PARAVIRT_ALT_BUNDLE_ELEM(get_psr_i, PARAVIRT_BNDL_GET_PSR_I) + PARAVIRT_ALT_BUNDLE_ELEM(intrin_local_irq_restore, + PARAVIRT_BNDL_INTRIN_LOCAL_IRQ_RESTORE) +xen_alt_bundle_array_end: + + .align 8 + .global xen_alt_bundle_array_size +xen_alt_bundle_array_size: + .long xen_alt_bundle_array_end - xen_alt_bundle_array_start + + +#define PARAVIRT_ALT_INST_ELEM(name, type) \ + data8 xen_ ## name ## _stag ; \ + data8 xen_ ## name ## _etag ; \ + data8 type + + __INITDATA_OR_MODULE + .align 8 + .global xen_alt_inst_array +xen_alt_inst_array: +xen_alt_inst_array_start: + PARAVIRT_ALT_INST_ELEM(rfi, PARAVIRT_INST_RFI) + PARAVIRT_ALT_INST_ELEM(rsm_psr_dt, PARAVIRT_INST_RSM_DT) + PARAVIRT_ALT_INST_ELEM(ssm_psr_dt, PARAVIRT_INST_SSM_DT) + PARAVIRT_ALT_INST_ELEM(cover, PARAVIRT_INST_COVER) + PARAVIRT_ALT_INST_ELEM(itc_d, PARAVIRT_INST_ITC_D) + PARAVIRT_ALT_INST_ELEM(itc_i, PARAVIRT_INST_ITC_I) + PARAVIRT_ALT_INST_ELEM(ssm_i, PARAVIRT_INST_SSM_I) + PARAVIRT_ALT_INST_ELEM(get_ivr, PARAVIRT_INST_GET_IVR) + PARAVIRT_ALT_INST_ELEM(get_tpr, PARAVIRT_INST_GET_TPR) + PARAVIRT_ALT_INST_ELEM(set_tpr, PARAVIRT_INST_SET_TPR) + PARAVIRT_ALT_INST_ELEM(eoi, PARAVIRT_INST_EOI) + PARAVIRT_ALT_INST_ELEM(set_itm, PARAVIRT_INST_SET_ITM) + PARAVIRT_ALT_INST_ELEM(thash, PARAVIRT_INST_THASH) + PARAVIRT_ALT_INST_ELEM(ptc_ga, PARAVIRT_INST_PTC_GA) + PARAVIRT_ALT_INST_ELEM(itr_d, PARAVIRT_INST_ITR_D) + PARAVIRT_ALT_INST_ELEM(get_rr, PARAVIRT_INST_GET_RR) + PARAVIRT_ALT_INST_ELEM(set_rr, PARAVIRT_INST_SET_RR) + PARAVIRT_ALT_INST_ELEM(set_kr, PARAVIRT_INST_SET_KR) + PARAVIRT_ALT_INST_ELEM(fc, PARAVIRT_INST_FC) + PARAVIRT_ALT_INST_ELEM(get_cpuid, PARAVIRT_INST_GET_CPUID) + PARAVIRT_ALT_INST_ELEM(get_pmd, PARAVIRT_INST_GET_PMD) + PARAVIRT_ALT_INST_ELEM(get_eflag, PARAVIRT_INST_GET_EFLAG) + PARAVIRT_ALT_INST_ELEM(set_eflag, PARAVIRT_INST_SET_EFLAG) + PARAVIRT_ALT_INST_ELEM(get_psr, PARAVIRT_INST_GET_PSR) + PARAVIRT_ALT_INST_ELEM(set_rr0_to_rr4, PARAVIRT_INST_SET_RR0_TO_RR4) + + PARAVIRT_ALT_INST_ELEM(ssm_i, PARAVIRT_BNDL_SSM_I) + PARAVIRT_ALT_INST_ELEM(rsm_i, PARAVIRT_BNDL_RSM_I) + PARAVIRT_ALT_INST_ELEM(get_psr_i, PARAVIRT_BNDL_GET_PSR_I) + PARAVIRT_ALT_INST_ELEM(intrin_local_irq_restore, + PARAVIRT_BNDL_INTRIN_LOCAL_IRQ_RESTORE) +xen_alt_inst_array_end: + + .align 8 + .global xen_alt_inst_array_size +xen_alt_inst_array_size: + .long xen_alt_inst_array_end - xen_alt_inst_array_start diff --git a/arch/ia64/xen/privops_c.c b/arch/ia64/xen/privops_c.c new file mode 100644 index 0000000..0fa2e23 --- /dev/null +++ b/arch/ia64/xen/privops_c.c @@ -0,0 +1,279 @@ +/****************************************************************************** + * arch/ia64/xen/privops_c.c + * + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include <linux/linkage.h> +#include <linux/init.h> +#include <linux/module.h> + +#include <xen/interface/xen.h> + +#include <asm/asm-offsets.h> +#define XEN_PSR_I_ADDR_ADDR ((uint8_t **)(XSI_BASE + XSI_PSR_I_ADDR_OFS)) + + +void __init_or_module +xen_privop_ssm_i(void) +{ + /* + * int masked = !xen_get_virtual_psr_i(); + * // masked = *(*XEN_MAPPEDREGS->interrupt_mask_addr) + * xen_set_virtual_psr_i(1) + * // *(*XEN_MAPPEDREGS->interrupt_mask_addr) = 0 + * // compiler barrier + * if (masked) { + * uint8_t* pend_int_addr = + * (uint8_t*)(*XEN_MAPPEDREGS->interrupt_mask_addr) - 1; + * uint8_t pending = *pend_int_addr; + * if (pending) + * XEN_HYPER_SSM_I + * } + */ + register uint8_t *tmp asm ("r8"); + register int masked asm ("r9"); + register uint8_t *pending_intr_addr asm ("r10"); + + asm volatile(".global xen_ssm_i_instr\n\t" + "xen_ssm_i_instr:\n\t" + ".global xen_ssm_i_instr_start\n\t" + "xen_ssm_i_instr_start:\n\t" + ".global xen_ssm_i_stag\n\t" + "[xen_ssm_i_stag:]\n\t" + /* tmp = &XEN_MAPPEDREGS->interrupt_mask_addr */ + "mov %[tmp]=%[XEN_PSR_I_ADDR_ADDR_IMM]\n\t" + ";;\n\t" + /* tmp = *XEN_MAPPEDREGS->interrupt_mask_addr */ + "ld8 %[tmp]=[%[tmp]]\n\t" + ";;\n\t" + /* pending_intr_addr = tmp - 1 */ + "add %[pending_intr_addr]=-1,%[tmp]\n\t" + /* masked = *tmp */ + "ld1 %[masked]=[%[tmp]]\n\t" + ";;\n\t" + /* *tmp = 0 */ + "st1 [%[tmp]]=r0\n\t" + /* p6 = !masked */ + "cmp.ne.unc p6,p0=%[masked],r0\n\t" + ";;\n\t" + /* tmp = *pending_intr_addr */ + "(p6) ld1 %[tmp]=[%[pending_intr_addr]]\n\t" + ";;\n\t" + /* p7 = p6 && !tmp */ + "(p6) cmp.ne.unc p7,p0=%[tmp],r0\n\t" + ";;\n\t" + "(p7) break %[HYPERPRIVOP_SSM_I_IMM]\n\t" + ".global xen_ssm_i_etag\n\t" + "[xen_ssm_i_etag:]\n\t" + ".global xen_ssm_i_instr_end\n\t" + "xen_ssm_i_instr_end:\n\t" + : + [tmp] "=r"(tmp), + [pending_intr_addr] "=r"(pending_intr_addr), + [masked] "=r"(masked), + + "=m"(**((uint8_t **)XEN_PSR_I_ADDR_ADDR)) + : + [XEN_PSR_I_ADDR_ADDR_IMM] "i"(XEN_PSR_I_ADDR_ADDR), + [HYPERPRIVOP_SSM_I_IMM] "i"(HYPERPRIVOP_SSM_I), + + "m"(*((uint8_t *)XEN_PSR_I_ADDR_ADDR)), + "m"(**((uint8_t **)XEN_PSR_I_ADDR_ADDR)), + "m"(*(*((uint8_t **)XEN_PSR_I_ADDR_ADDR) - 1)) + : + "memory", + /* + * predicate registers can't be specified as C variables + * so that we use p6, p7, p8 here. + */ + "p6", /* is_old */ + "p7" /* is_pending */ + ); +} + +void __init_or_module +xen_privop_rsm_i(void) +{ + /* + * psr_i_addr_addr = XEN_MAPPEDREGS->interrupt_mask_addr + * = XEN_PSR_I_ADDR_ADDR; + * psr_i_addr = *psr_i_addr_addr; + * *psr_i_addr = 1; + */ + register unsigned long psr_i_addr asm("r8"); + register uint8_t mask asm ("r9"); + asm volatile (".global xen_rsm_i_instr\n\t" + "xen_rsm_i_instr:\n\t" + ".global xen_rsm_i_instr_start\n\t" + "xen_rsm_i_instr_start:\n\t" + ".global xen_rsm_i_stag\n\t" + "[xen_rsm_i_stag:]\n\t" + "mov %[psr_i_addr]=%[XEN_PSR_I_ADDR_ADDR_IMM]\n\t" + "mov %[mask]=%[ONE_IMM]\n\t" + ";;\n\t" + "ld8 %[psr_i_addr]=[%[psr_i_addr]]\n\t" + ";;\n\t" + "st1 [%[psr_i_addr]]=%[mask]\n\t" + ".global xen_rsm_i_etag\n\t" + "[xen_rsm_i_etag:]\n\t" + ".global xen_rsm_i_instr_end\n\t" + "xen_rsm_i_instr_end:\n\t" + : + [psr_i_addr] "=r"(psr_i_addr), + [mask] "=r"(mask), + "=m"(**((uint8_t **)XEN_PSR_I_ADDR_ADDR)): + [XEN_PSR_I_ADDR_ADDR_IMM] "i"(XEN_PSR_I_ADDR_ADDR), + [ONE_IMM] "i"(1), + "m"(*((uint8_t **)XEN_PSR_I_ADDR_ADDR)): + "memory"); +} + +void __init_or_module +xen_privop_ia64_intrin_local_irq_restore(unsigned long val) +{ + /* + * psr_i_addr_addr = XEN_PSR_I_ADDR_ADDR + * psr_i_addr = *psr_i_addr_addr + * pending_intr_addr = psr_i_addr - 1 + * if (val & IA64_PSR_I) { + * masked = *psr_i_addr + * *psr_i_addr = 0 + * compiler barrier + * if (masked) { + * uint8_t pending = *pending_intr_addr; + * if (pending) + * XEN_HYPER_SSM_I + * } + * } else { + * *psr_i_addr = 1 + * } + */ + + register unsigned long __val asm("r8") = val; + register uint8_t *psr_i_addr asm ("r9"); + register uint8_t *pending_intr_addr asm ("r10"); + register uint8_t masked asm ("r11"); + register unsigned long one_or_pending asm ("r8"); + + asm volatile ( + ".global xen_intrin_local_irq_restore_instr\n\t" + "xen_intrin_local_irq_restore_instr:\n\t" + ".global xen_intrin_local_irq_restore_instr_start\n\t" + "xen_intrin_local_irq_restore_instr_start:\n\t" + ".global xen_intrin_local_irq_restore_stag\n\t" + "[xen_intrin_local_irq_restore_stag:]\n\t" + "tbit.nz p6,p7=%[val],%[IA64_PSR_I_BIT_IMM]\n\t" + "mov %[psr_i_addr]=%[XEN_PSR_I_ADDR_ADDR_IMM]\n\t" + ";;\n\t" + "ld8 %[psr_i_addr]=[%[psr_i_addr]]\n\t" + "(p7)mov %[one_or_pending]=%[ONE_IMM]\n\t" + ";;\n\t" + "add %[pending_intr_addr]=-1,%[psr_i_addr]\n\t" + ";;\n\t" + "(p6) ld1 %[masked]=[%[psr_i_addr]]\n\t" + "(p7) st1 [%[psr_i_addr]]=%[one_or_pending]\n\t" + ";;\n\t" + "(p6) st1 [%[psr_i_addr]]=r0\n\t" + "(p6) cmp.ne.unc p8,p0=%[masked],r0\n\t" + "(p6) ld1 %[one_or_pending]=[%[pending_intr_addr]]\n\t" + ";;\n\t" + "(p8) cmp.eq.unc p9,p0=%[one_or_pending],r0\n\t" + ";;\n\t" + "(p9) break %[HYPERPRIVOP_SSM_I_IMM]\n\t" + ".global xen_intrin_local_irq_restore_etag\n\t" + "[xen_intrin_local_irq_restore_etag:]\n\t" + ".global xen_intrin_local_irq_restore_instr_end\n\t" + "xen_intrin_local_irq_restore_instr_end:\n\t" + : + [psr_i_addr] "=r"(psr_i_addr), + [pending_intr_addr] "=r"(pending_intr_addr), + [masked] "=r"(masked), + [one_or_pending] "=r"(one_or_pending), + + "=m"(**((uint8_t **)XEN_PSR_I_ADDR_ADDR)) + : + [val] "r"(__val), + [IA64_PSR_I_BIT_IMM] "i"(IA64_PSR_I_BIT), + [ONE_IMM] "i"(1), + + [XEN_PSR_I_ADDR_ADDR_IMM] "i"(XEN_PSR_I_ADDR_ADDR), + [HYPERPRIVOP_SSM_I_IMM] "i"(HYPERPRIVOP_SSM_I), + + "m"(*((uint8_t *)XEN_PSR_I_ADDR_ADDR)), + "m"(**((uint8_t **)XEN_PSR_I_ADDR_ADDR)), + "m"(*(*((uint8_t **)XEN_PSR_I_ADDR_ADDR) - 1)) + : + "memory", + "p6", /* is_psr_i_set */ + "p7", /* not_psr_i_set */ + "p8", /* is_masked && is_psr_i_set */ + "p9" /* is_pending && is_masked && is_psr_i_set */ + ); +} + +unsigned long __init_or_module +xen_privop_get_psr_i(void) +{ + /* + * tmp = XEN_MAPPEDREGS->interrupt_mask_addr = XEN_PSR_I_ADDR_ADDR; + * tmp = *tmp + * tmp = *tmp; + * psr_i = tmp? 0: IA64_PSR_I; + */ + register unsigned long psr_i asm ("r8"); + register unsigned long tmp asm ("r9"); + + asm volatile (".global xen_get_psr_i_instr\n\t" + "xen_get_psr_i_instr:\n\t" + ".global xen_get_psr_i_instr_start\n\t" + "xen_get_psr_i_instr_start:\n\t" + ".global xen_get_psr_i_stag\n\t" + "[xen_get_psr_i_stag:]\n\t" + /* tmp = XEN_PSR_I_ADDR_ADDR */ + "mov %[tmp]=%[XEN_PSR_I_ADDR_ADDR_IMM]\n\t" + ";;\n\t" + /* tmp = *tmp = *XEN_PSR_I_ADDR_ADDR */ + "ld8 %[tmp]=[%[tmp]]\n\t" + /* psr_i = 0 */ + "mov %[psr_i]=0\n\t" + ";;\n\t" + /* tmp = *(uint8_t*)tmp */ + "ld1 %[tmp]=[%[tmp]]\n\t" + ";;\n\t" + /* if (!tmp) psr_i = IA64_PSR_I */ + "cmp.eq.unc p6,p0=%[tmp],r0\n\t" + ";;\n\t" + "(p6) mov %[psr_i]=%[IA64_PSR_I_IMM]\n\t" + ".global xen_get_psr_i_etag\n\t" + "[xen_get_psr_i_etag:]\n\t" + ".global xen_get_psr_i_instr_end\n\t" + "xen_get_psr_i_instr_end:\n\t" + : + [tmp] "=r"(tmp), + [psr_i] "=r"(psr_i) + : + [XEN_PSR_I_ADDR_ADDR_IMM] "i"(XEN_PSR_I_ADDR_ADDR), + [IA64_PSR_I_IMM] "i"(IA64_PSR_I), + "m"(*((uint8_t **)XEN_PSR_I_ADDR_ADDR)), + "m"(**((uint8_t **)XEN_PSR_I_ADDR_ADDR)) + : + "p6"); + return psr_i; +} diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S index 17ad297..2d3d5d4 100644 --- a/arch/ia64/xen/xensetup.S +++ b/arch/ia64/xen/xensetup.S @@ -35,6 +35,16 @@ GLOBAL_ENTRY(early_xen_setup) (isBP) movl r28=XSI_BASE;; (isBP) break 0x1000;; +#ifdef CONFIG_PARAVIRT + /* patch privops */ +(isBP) mov r4=rp + ;; +(isBP) br.call.sptk.many rp=xen_paravirt_patch + ;; +(isBP) mov rp=r4 + ;; +#endif + br.ret.sptk.many rp ;; END(early_xen_setup) diff --git a/include/asm-ia64/xen/privop.h b/include/asm-ia64/xen/privop.h index 95e8e8a..d59cc31 100644 --- a/include/asm-ia64/xen/privop.h +++ b/include/asm-ia64/xen/privop.h @@ -557,6 +557,18 @@ do { \ #endif /* ASM_SUPPORTED && !CONFIG_PARAVIRT_ALT */ +#ifdef CONFIG_PARAVIRT_ALT +#if defined(CONFIG_MODULES) && defined(CONFIG_XEN) +void xen_alt_bundle_patch_module(struct paravirt_alt_bundle_patch *start, + struct paravirt_alt_bundle_patch *end); +void xen_alt_inst_patch_module(struct paravirt_alt_inst_patch *start, + struct paravirt_alt_inst_patch *end); +#else +#define xen_alt_bundle_patch_module(start, end) do { } while (0) +#define xen_alt_inst_patch_module(start, end) do { } while (0) +#endif +#endif /* CONFIG_PARAVIRT_ALT */ + #endif /* !__ASSEMBLY__ */ /* these routines utilize privilege-sensitive or performance-sensitive @@ -573,12 +585,24 @@ do { \ #ifdef CONFIG_XEN #ifdef __ASSEMBLY__ +#ifdef CONFIG_PARAVIRT_ENTRY +#define BR_IF_NATIVE(target, reg_unused, pred_unused) /* nothing */ +#elif defined(CONFIG_PARAVIRT_NOP_B_PATCH) +#define BR_IF_NATIVE(target, reg_unused, pred_unused) \ + .body ; \ + [1:] ; \ + br.cond.sptk.many target;; ; \ + .section .paravirt_nop_b, "a" ; \ + .previous ; \ + .xdata8 ".paravirt_nop_b", 1b +#else #define BR_IF_NATIVE(target, reg, pred) \ .body ; \ movl reg=running_on_xen;; ; \ ld4 reg=[reg];; ; \ cmp.eq pred,p0=reg,r0 ; \ (pred) br.cond.sptk.many target;; +#endif #endif /* __ASSEMBLY__ */ #endif -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:29
|
Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/xen/xenminstate.h | 137 +++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 137 insertions(+), 0 deletions(-) create mode 100644 arch/ia64/xen/xenminstate.h diff --git a/arch/ia64/xen/xenminstate.h b/arch/ia64/xen/xenminstate.h new file mode 100644 index 0000000..67bbf79 --- /dev/null +++ b/arch/ia64/xen/xenminstate.h @@ -0,0 +1,137 @@ +#ifdef __IA64_ASM_PARAVIRTUALIZED_XEN +/* + * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves + * the minimum state necessary that allows us to turn psr.ic back + * on. + * + * Assumed state upon entry: + * psr.ic: off + * r31: contains saved predicates (pr) + * + * Upon exit, the state is as follows: + * psr.ic: off + * r2 = points to &pt_regs.r16 + * r8 = contents of ar.ccv + * r9 = contents of ar.csd + * r10 = contents of ar.ssd + * r11 = FPSR_DEFAULT + * r12 = kernel sp (kernel virtual address) + * r13 = points to current task_struct (kernel virtual address) + * p15 = TRUE if psr.i is set in cr.ipsr + * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15: + * preserved + * CONFIG_XEN note: p6/p7 are not preserved + * + * Note that psr.ic is NOT turned on by this macro. This is so that + * we can pass interruption state as arguments to a handler. + */ +#define DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA) \ + mov r16=IA64_KR(CURRENT); /* M */ \ + mov r27=ar.rsc; /* M */ \ + mov r20=r1; /* A */ \ + mov r25=ar.unat; /* M */ \ + MOV_FROM_IPSR(r29); /* M */ \ + mov r26=ar.pfs; /* I */ \ + MOV_FROM_IIP(r28); /* M */ \ + mov r21=ar.fpsr; /* M */ \ + __COVER; /* B;; (or nothing) */ \ + ;; \ + adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \ + ;; \ + ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \ + st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \ + adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \ + /* switch from user to kernel RBS: */ \ + ;; \ + invala; /* M */ \ + /* SAVE_IFS;*/ /* see xen special handling below */ \ + cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \ + ;; \ +(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ + ;; \ +(pUStk) mov.m r24=ar.rnat; \ +(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \ +(pKStk) mov r1=sp; /* get sp */ \ + ;; \ +(pUStk) lfetch.fault.excl.nt1 [r22]; \ +(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ +(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \ + ;; \ +(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \ +(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \ + ;; \ +(pUStk) mov r18=ar.bsp; \ +(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \ + adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \ + adds r16=PT(CR_IPSR),r1; \ + ;; \ + lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \ + st8 [r16]=r29; /* save cr.ipsr */ \ + ;; \ + lfetch.fault.excl.nt1 [r17]; \ + tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \ + mov r29=b0 \ + ;; \ + adds r16=PT(R8),r1; /* initialize first base pointer */ \ + adds r17=PT(R9),r1; /* initialize second base pointer */ \ +(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \ + ;; \ +.mem.offset 0,0; st8.spill [r16]=r8,16; \ +.mem.offset 8,0; st8.spill [r17]=r9,16; \ + ;; \ +.mem.offset 0,0; st8.spill [r16]=r10,24; \ +.mem.offset 8,0; st8.spill [r17]=r11,24; \ + ;; \ + /* xen special handling for possibly lazy cover */ \ + /* XXX: SAVE_MIN case in dispatch_ia32_handler: mov r30=r0 */ \ + movl r8=XSI_PRECOVER_IFS; \ + ;; \ + ld8 r30=[r8]; \ + ;; \ + st8 [r16]=r28,16; /* save cr.iip */ \ + st8 [r17]=r30,16; /* save cr.ifs */ \ +(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \ + mov r8=ar.ccv; \ + mov r9=ar.csd; \ + mov r10=ar.ssd; \ + movl r11=FPSR_DEFAULT; /* L-unit */ \ + ;; \ + st8 [r16]=r25,16; /* save ar.unat */ \ + st8 [r17]=r26,16; /* save ar.pfs */ \ + shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \ + ;; \ + st8 [r16]=r27,16; /* save ar.rsc */ \ +(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \ +(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \ + ;; /* avoid RAW on r16 & r17 */ \ +(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \ + st8 [r17]=r31,16; /* save predicates */ \ +(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \ + ;; \ + st8 [r16]=r29,16; /* save b0 */ \ + st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \ + cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \ + ;; \ +.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \ +.mem.offset 8,0; st8.spill [r17]=r12,16; \ + adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \ + ;; \ +.mem.offset 0,0; st8.spill [r16]=r13,16; \ +.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \ + mov r13=IA64_KR(CURRENT); /* establish `current' */ \ + ;; \ +.mem.offset 0,0; st8.spill [r16]=r15,16; \ +.mem.offset 8,0; st8.spill [r17]=r14,16; \ + ;; \ +.mem.offset 0,0; st8.spill [r16]=r2,16; \ +.mem.offset 8,0; st8.spill [r17]=r3,16; \ + ;; \ + EXTRA; \ + BSW_1(r2, r14); \ + adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ + ;; \ + movl r1=__gp; /* establish kernel global pointer */ \ + ;; \ + /*bsw.1;*/ /* switch back to bank 1 (must be last in insn group) */ \ + ;; +#endif /* __IA64_ASM_PARAVIRTUALIZED_XEN */ -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:29
|
Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/xen/Makefile | 2 + arch/ia64/xen/xen_pv_ops.c | 69 ++++++++++++++++++++++++++++++++++++++++++++ arch/ia64/xen/xensetup.S | 10 ++++++ 3 files changed, 81 insertions(+), 0 deletions(-) create mode 100644 arch/ia64/xen/xen_pv_ops.c diff --git a/arch/ia64/xen/Makefile b/arch/ia64/xen/Makefile index c219358..4b1db56 100644 --- a/arch/ia64/xen/Makefile +++ b/arch/ia64/xen/Makefile @@ -2,6 +2,8 @@ # Makefile for Xen components # +obj-y := xen_pv_ops.o + obj-$(CONFIG_PARAVIRT_ALT) += paravirt_xen.o privops_asm.o privops_c.o obj-$(CONFIG_PARAVIRT_NOP_B_PATCH) += paravirt_xen.o obj-$(CONFIG_PARAVIRT_ENTRY) += paravirt_xen.o diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c new file mode 100644 index 0000000..18aa2f6 --- /dev/null +++ b/arch/ia64/xen/xen_pv_ops.c @@ -0,0 +1,69 @@ +/****************************************************************************** + * arch/ia64/xen/xen_pv_ops.c + * + * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include <linux/console.h> +#include <linux/kernel.h> +#include <linux/notifier.h> +#include <linux/pm.h> +#include <linux/sched.h> +#include <linux/string.h> + +#include <asm/paravirt.h> +#include <asm/unwind.h> + +#include <xen/features.h> +#include <asm/xen/hypervisor.h> +#include <asm/xen/xencomm.h> + +/*************************************************************************** + * general info + */ +static struct pv_info xen_info __initdata = { + .kernel_rpl = 2, /* or 1: determin at runtime */ + .paravirt_enabled = 1, + .name = "Xen/ia64", +}; + +#define IA64_RSC_PL_SHIFT 2 +#define IA64_RSC_PL_BIT_SIZE 2 +#define IA64_RSC_PL_MASK ((1UL << (IA64_RSC_PL_BIT_SIZE - 1)) << IA64_RSC_PL_SHIFT) + +static void __init +xen_info_init(void) +{ + /* Xenified Linux/ia64 may run on pl = 1 or 2. + * determin at run time. */ + unsigned long rsc = ia64_getreg(_IA64_REG_AR_RSC); + unsigned int rpl = (rsc & IA64_RSC_PL_MASK) >> IA64_RSC_PL_SHIFT; + xen_info.kernel_rpl = rpl; +} + +/*************************************************************************** + * pv_ops initialization + */ + +void __init +xen_setup_pv_ops(void) +{ + xen_info_init(); + pv_info = xen_info; +} diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S index 2d3d5d4..cb3432b 100644 --- a/arch/ia64/xen/xensetup.S +++ b/arch/ia64/xen/xensetup.S @@ -35,6 +35,16 @@ GLOBAL_ENTRY(early_xen_setup) (isBP) movl r28=XSI_BASE;; (isBP) break 0x1000;; +#ifdef CONFIG_PARAVIRT_GUEST + /* patch privops */ +(isBP) mov r4=rp + ;; +(isBP) br.call.sptk.many rp=xen_setup_pv_ops + ;; +(isBP) mov rp=r4 + ;; +#endif + #ifdef CONFIG_PARAVIRT /* patch privops */ (isBP) mov r4=rp -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:29
|
Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/xen/hypercall.S | 7 + include/asm-ia64/xen/hypercall.h | 426 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 433 insertions(+), 0 deletions(-) create mode 100644 include/asm-ia64/xen/hypercall.h diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S index a96f278..7c5242b 100644 --- a/arch/ia64/xen/hypercall.S +++ b/arch/ia64/xen/hypercall.S @@ -122,3 +122,10 @@ GLOBAL_ENTRY(xen_set_eflag) END(xen_set_eflag) #endif /* CONFIG_IA32_SUPPORT */ #endif /* ASM_SUPPORTED */ + +GLOBAL_ENTRY(__hypercall) + mov r2=r37 + break 0x1000 + br.ret.sptk.many b0 + ;; +END(__hypercall) diff --git a/include/asm-ia64/xen/hypercall.h b/include/asm-ia64/xen/hypercall.h new file mode 100644 index 0000000..a266e44 --- /dev/null +++ b/include/asm-ia64/xen/hypercall.h @@ -0,0 +1,426 @@ +/****************************************************************************** + * hypercall.h + * + * Linux-specific hypervisor handling. + * + * Copyright (c) 2002-2004, K A Fraser + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _ASM_IA64_XEN_HYPERCALL_H +#define _ASM_IA64_XEN_HYPERCALL_H + +#ifndef _ASM_IA64_XEN_HYPERVISOR_H +# error "please don't include this file directly" +#endif + +#include <asm/xen/xcom_hcall.h> +struct xencomm_handle; +extern unsigned long __hypercall(unsigned long a1, unsigned long a2, + unsigned long a3, unsigned long a4, + unsigned long a5, unsigned long cmd); + +/* + * Assembler stubs for hyper-calls. + */ + +#define _hypercall0(type, name) \ +({ \ + long __res; \ + __res = __hypercall(0, 0, 0, 0, 0, __HYPERVISOR_##name);\ + (type)__res; \ +}) + +#define _hypercall1(type, name, a1) \ +({ \ + long __res; \ + __res = __hypercall((unsigned long)a1, \ + 0, 0, 0, 0, __HYPERVISOR_##name); \ + (type)__res; \ +}) + +#define _hypercall2(type, name, a1, a2) \ +({ \ + long __res; \ + __res = __hypercall((unsigned long)a1, \ + (unsigned long)a2, \ + 0, 0, 0, __HYPERVISOR_##name); \ + (type)__res; \ +}) + +#define _hypercall3(type, name, a1, a2, a3) \ +({ \ + long __res; \ + __res = __hypercall((unsigned long)a1, \ + (unsigned long)a2, \ + (unsigned long)a3, \ + 0, 0, __HYPERVISOR_##name); \ + (type)__res; \ +}) + +#define _hypercall4(type, name, a1, a2, a3, a4) \ +({ \ + long __res; \ + __res = __hypercall((unsigned long)a1, \ + (unsigned long)a2, \ + (unsigned long)a3, \ + (unsigned long)a4, \ + 0, __HYPERVISOR_##name); \ + (type)__res; \ +}) + +#define _hypercall5(type, name, a1, a2, a3, a4, a5) \ +({ \ + long __res; \ + __res = __hypercall((unsigned long)a1, \ + (unsigned long)a2, \ + (unsigned long)a3, \ + (unsigned long)a4, \ + (unsigned long)a5, \ + __HYPERVISOR_##name); \ + (type)__res; \ +}) + + +static inline int +xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg) +{ + return _hypercall2(int, sched_op, cmd, arg); +} + +static inline long +HYPERVISOR_set_timer_op(u64 timeout) +{ + unsigned long timeout_hi = (unsigned long)(timeout >> 32); + unsigned long timeout_lo = (unsigned long)timeout; + return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); +} + +static inline int +xencomm_arch_hypercall_multicall(struct xencomm_handle *call_list, + int nr_calls) +{ + return _hypercall2(int, multicall, call_list, nr_calls); +} + +static inline int +xencomm_arch_hypercall_memory_op(unsigned int cmd, struct xencomm_handle *arg) +{ + return _hypercall2(int, memory_op, cmd, arg); +} + +static inline int +xencomm_arch_hypercall_event_channel_op(int cmd, struct xencomm_handle *arg) +{ + return _hypercall2(int, event_channel_op, cmd, arg); +} + +static inline int +xencomm_arch_hypercall_xen_version(int cmd, struct xencomm_handle *arg) +{ + return _hypercall2(int, xen_version, cmd, arg); +} + +static inline int +xencomm_arch_hypercall_console_io(int cmd, int count, + struct xencomm_handle *str) +{ + return _hypercall3(int, console_io, cmd, count, str); +} + +static inline int +xencomm_arch_hypercall_physdev_op(int cmd, struct xencomm_handle *arg) +{ + return _hypercall2(int, physdev_op, cmd, arg); +} + +static inline int +xencomm_arch_hypercall_grant_table_op(unsigned int cmd, + struct xencomm_handle *uop, + unsigned int count) +{ + return _hypercall3(int, grant_table_op, cmd, uop, count); +} + +int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count); + +extern int xencomm_arch_hypercall_suspend(struct xencomm_handle *arg); + +static inline int +xencomm_arch_hypercall_callback_op(int cmd, struct xencomm_handle *arg) +{ + return _hypercall2(int, callback_op, cmd, arg); +} + +static inline unsigned long +xencomm_arch_hypercall_hvm_op(int cmd, void *arg) +{ + return _hypercall2(unsigned long, hvm_op, cmd, arg); +} + +static inline long +xencomm_arch_hypercall_vcpu_op(int cmd, int cpu, void *arg) +{ + return _hypercall3(long, vcpu_op, cmd, cpu, arg); +} + +static inline int +HYPERVISOR_physdev_op(int cmd, void *arg) +{ + switch (cmd) { + case PHYSDEVOP_eoi: + return _hypercall1(int, ia64_fast_eoi, + ((struct physdev_eoi *)arg)->irq); + default: + return xencomm_hypercall_physdev_op(cmd, arg); + } +} + +static inline int +xencomm_arch_hypercall_xenoprof_op(int op, struct xencomm_handle *arg) +{ + return _hypercall2(int, xenoprof_op, op, arg); +} + +static inline long +xencomm_arch_hypercall_opt_feature(struct xencomm_handle *arg) +{ + return _hypercall1(long, opt_feature, arg); +} + +#define xen_do_IRQ(irq, regs) \ +do { \ + struct pt_regs *old_regs; \ + old_regs = set_irq_regs(regs); \ + irq_enter(); \ + __do_IRQ(irq); \ + irq_exit(); \ + set_irq_regs(old_regs); \ +} while (0) +#define irq_ctx_init(cpu) do { } while (0) + +#include <linux/err.h> +#ifdef HAVE_XEN_PLATFORM_COMPAT_H +#include <xen/platform-compat.h> +#endif + +static inline unsigned long +__HYPERVISOR_ioremap(unsigned long ioaddr, unsigned long size) +{ + return _hypercall3(unsigned long, ia64_dom0vp_op, + IA64_DOM0VP_ioremap, ioaddr, size); +} + +static inline unsigned long +HYPERVISOR_ioremap(unsigned long ioaddr, unsigned long size) +{ + unsigned long ret = ioaddr; + if (is_running_on_xen()) { + ret = __HYPERVISOR_ioremap(ioaddr, size); + if (unlikely(ret == -ENOSYS)) + panic("hypercall %s failed with %ld. " + "Please check Xen and Linux config mismatch\n", + __func__, -ret); + else if (unlikely(IS_ERR_VALUE(ret))) + ret = ioaddr; + } + return ret; +} + +static inline unsigned long +__HYPERVISOR_phystomach(unsigned long gpfn) +{ + return _hypercall2(unsigned long, ia64_dom0vp_op, + IA64_DOM0VP_phystomach, gpfn); +} + +static inline unsigned long +HYPERVISOR_phystomach(unsigned long gpfn) +{ + unsigned long ret = gpfn; + if (is_running_on_xen()) + ret = __HYPERVISOR_phystomach(gpfn); + return ret; +} + +static inline unsigned long +__HYPERVISOR_machtophys(unsigned long mfn) +{ + return _hypercall2(unsigned long, ia64_dom0vp_op, + IA64_DOM0VP_machtophys, mfn); +} + +static inline unsigned long +HYPERVISOR_machtophys(unsigned long mfn) +{ + unsigned long ret = mfn; + if (is_running_on_xen()) + ret = __HYPERVISOR_machtophys(mfn); + return ret; +} + +static inline unsigned long +__HYPERVISOR_zap_physmap(unsigned long gpfn, unsigned int extent_order) +{ + return _hypercall3(unsigned long, ia64_dom0vp_op, + IA64_DOM0VP_zap_physmap, gpfn, extent_order); +} + +static inline unsigned long +HYPERVISOR_zap_physmap(unsigned long gpfn, unsigned int extent_order) +{ + unsigned long ret = 0; + if (is_running_on_xen()) + ret = __HYPERVISOR_zap_physmap(gpfn, extent_order); + return ret; +} + +static inline unsigned long +__HYPERVISOR_add_physmap(unsigned long gpfn, unsigned long mfn, + unsigned long flags, domid_t domid) +{ + return _hypercall5(unsigned long, ia64_dom0vp_op, + IA64_DOM0VP_add_physmap, gpfn, mfn, flags, domid); +} + +static inline unsigned long +HYPERVISOR_add_physmap(unsigned long gpfn, unsigned long mfn, + unsigned long flags, domid_t domid) +{ + unsigned long ret = 0; + BUG_ON(!is_running_on_xen()); + if (is_running_on_xen()) + ret = __HYPERVISOR_add_physmap(gpfn, mfn, flags, domid); + return ret; +} + +static inline unsigned long +__HYPERVISOR_add_physmap_with_gmfn(unsigned long gpfn, unsigned long gmfn, + unsigned long flags, domid_t domid) +{ + return _hypercall5(unsigned long, ia64_dom0vp_op, + IA64_DOM0VP_add_physmap_with_gmfn, + gpfn, gmfn, flags, domid); +} + +static inline unsigned long +HYPERVISOR_add_physmap_with_gmfn(unsigned long gpfn, unsigned long gmfn, + unsigned long flags, domid_t domid) +{ + unsigned long ret = 0; + BUG_ON(!is_running_on_xen()); + if (is_running_on_xen()) + ret = __HYPERVISOR_add_physmap_with_gmfn(gpfn, gmfn, + flags, domid); + return ret; +} + +#ifdef CONFIG_XEN_IA64_EXPOSE_P2M +static inline unsigned long +HYPERVISOR_expose_p2m(unsigned long conv_start_gpfn, + unsigned long assign_start_gpfn, + unsigned long expose_size, unsigned long granule_pfn) +{ + return _hypercall5(unsigned long, ia64_dom0vp_op, + IA64_DOM0VP_expose_p2m, conv_start_gpfn, + assign_start_gpfn, expose_size, granule_pfn); +} + +static inline int +xencomm_arch_expose_foreign_p2m(unsigned long gpfn, + domid_t domid, struct xencomm_handle *arg, + unsigned long flags) +{ + return _hypercall5(int, ia64_dom0vp_op, + IA64_DOM0VP_expose_foreign_p2m, + gpfn, domid, arg, flags); +} + +static inline int +HYPERVISOR_unexpose_foreign_p2m(unsigned long gpfn, domid_t domid) +{ + return _hypercall3(int, ia64_dom0vp_op, + IA64_DOM0VP_unexpose_foreign_p2m, gpfn, domid); +} +#endif + +static inline int +xencomm_arch_hypercall_perfmon_op(unsigned long cmd, + struct xencomm_handle *arg, + unsigned long count) +{ + return _hypercall4(int, ia64_dom0vp_op, + IA64_DOM0VP_perfmon, cmd, arg, count); +} + +static inline int +xencomm_arch_hypercall_fpswa_revision(struct xencomm_handle *arg) +{ + return _hypercall2(int, ia64_dom0vp_op, + IA64_DOM0VP_fpswa_revision, arg); +} + +static inline int +xencomm_arch_hypercall_ia64_debug_op(unsigned long cmd, + unsigned long domain, + struct xencomm_handle *arg) +{ + return _hypercall3(int, ia64_debug_op, cmd, domain, arg); +} + +static inline int +HYPERVISOR_add_io_space(unsigned long phys_base, + unsigned long sparse, + unsigned long space_number) +{ + return _hypercall4(int, ia64_dom0vp_op, IA64_DOM0VP_add_io_space, + phys_base, sparse, space_number); +} + +/* for balloon driver */ +#define HYPERVISOR_update_va_mapping(va, new_val, flags) (0) + +/* Use xencomm to do hypercalls. */ +#define HYPERVISOR_sched_op xencomm_hypercall_sched_op +#define HYPERVISOR_event_channel_op xencomm_hypercall_event_channel_op +#define HYPERVISOR_callback_op xencomm_hypercall_callback_op +#define HYPERVISOR_multicall xencomm_hypercall_multicall +#define HYPERVISOR_xen_version xencomm_hypercall_xen_version +#define HYPERVISOR_console_io xencomm_hypercall_console_io +#define HYPERVISOR_hvm_op xencomm_hypercall_hvm_op +#define HYPERVISOR_memory_op xencomm_hypercall_memory_op +#define HYPERVISOR_xenoprof_op xencomm_hypercall_xenoprof_op +#define HYPERVISOR_perfmon_op xencomm_hypercall_perfmon_op +#define HYPERVISOR_fpswa_revision xencomm_hypercall_fpswa_revision +#define HYPERVISOR_suspend xencomm_hypercall_suspend +#define HYPERVISOR_vcpu_op xencomm_hypercall_vcpu_op +#define HYPERVISOR_opt_feature xencomm_hypercall_opt_feature +#define HYPERVISOR_kexec_op xencomm_hypercall_kexec_op + +/* to compile gnttab_copy_grant_page() in drivers/xen/core/gnttab.c */ +#define HYPERVISOR_mmu_update(req, count, success_count, domid) ({ BUG(); 0; }) + +#endif /* _ASM_IA64_XEN_HYPERCALL_H */ -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:29
|
Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/xen/util.c | 101 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 101 insertions(+), 0 deletions(-) create mode 100644 arch/ia64/xen/util.c diff --git a/arch/ia64/xen/util.c b/arch/ia64/xen/util.c new file mode 100644 index 0000000..242a1a4 --- /dev/null +++ b/arch/ia64/xen/util.c @@ -0,0 +1,101 @@ +/****************************************************************************** + * arch/ia64/xen/util.c + * This file is the ia64 counterpart of drivers/xen/util.c + * + * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <asm/uaccess.h> +#include <xen/interface/memory.h> +#include <asm/xen/hypercall.h> + +struct vm_struct *xen_alloc_vm_area(unsigned long size) +{ + int order; + unsigned long virt; + unsigned long nr_pages; + struct vm_struct *area; + + order = get_order(size); + virt = __get_free_pages(GFP_KERNEL, order); + if (virt == 0) + goto err0; + nr_pages = 1 << order; + scrub_pages(virt, nr_pages); + + area = kmalloc(sizeof(*area), GFP_KERNEL); + if (area == NULL) + goto err1; + + area->flags = VM_IOREMAP; + area->addr = (void *)virt; + area->size = size; + area->pages = NULL; + area->nr_pages = nr_pages; + area->phys_addr = 0; /* xenbus_map_ring_valloc uses this field! */ + + return area; + +err1: + free_pages(virt, order); +err0: + return NULL; +} +EXPORT_SYMBOL_GPL(xen_alloc_vm_area); + +void xen_free_vm_area(struct vm_struct *area) +{ + unsigned int order = get_order(area->size); + unsigned long i; + unsigned long phys_addr = __pa(area->addr); + + /* This area is used for foreign page mappping. + * So underlying machine page may not be assigned. */ + for (i = 0; i < (1 << order); i++) { + unsigned long ret; + unsigned long gpfn = (phys_addr >> PAGE_SHIFT) + i; + struct xen_memory_reservation reservation = { + .nr_extents = 1, + .address_bits = 0, + .extent_order = 0, + .domid = DOMID_SELF + }; + set_xen_guest_handle(reservation.extent_start, &gpfn); + ret = HYPERVISOR_memory_op(XENMEM_populate_physmap, + &reservation); + BUG_ON(ret != 1); + } + free_pages((unsigned long)area->addr, order); + kfree(area); +} +EXPORT_SYMBOL_GPL(xen_free_vm_area); + +/* + * Local variables: + * c-file-style: "linux" + * indent-tabs-mode: t + * c-indent-level: 8 + * c-basic-offset: 8 + * tab-width: 8 + * End: + */ -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:28
|
Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/kernel/Makefile | 15 +++++++++++++++ 1 files changed, 15 insertions(+), 0 deletions(-) diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 7849bc3..a80dd3f 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -83,3 +83,18 @@ $(obj)/gate-data.o: $(obj)/gate.so # AFLAGS_ivt.o += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE AFLAGS_switch_leave.o += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE + +# xen multi compile +$(obj)/xen_%.o: $(src)/%.S FORCE + $(call if_changed_dep,as_o_S) + +# +# xenivt.o, xen_switch_leave.o +# +obj-$(CONFIG_XEN) += xen_ivt.o xen_switch_leave.o +ifeq ($(CONFIG_XEN), y) +targets += xen_ivt.o xen_switch_leave.o +$(obj)/build-in.o: xen_ivt.o xen_switch_leave.o +endif +AFLAGS_xen_ivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN +AFLAGS_xen_switch_leave.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:28
|
Signed-off-by: Isaku Yamahata <yam...@va...> --- include/asm-ia64/sync_bitops.h | 59 ++++++++++++++++++++++++++++++++++++++++ 1 files changed, 59 insertions(+), 0 deletions(-) create mode 100644 include/asm-ia64/sync_bitops.h diff --git a/include/asm-ia64/sync_bitops.h b/include/asm-ia64/sync_bitops.h new file mode 100644 index 0000000..f56cd90 --- /dev/null +++ b/include/asm-ia64/sync_bitops.h @@ -0,0 +1,59 @@ +#ifndef _ASM_IA64_SYNC_BITOPS_H +#define _ASM_IA64_SYNC_BITOPS_H + +/* + * Copyright 1992, Linus Torvalds. + * Heavily modified to provide guaranteed strong synchronisation + * when communicating with Xen or other guest OSes running on other CPUs. + */ + +static inline void sync_set_bit(int nr, volatile void *addr) +{ + set_bit(nr, addr); +} + +static inline void sync_clear_bit(int nr, volatile void *addr) +{ + clear_bit(nr, addr); +} + +static inline void sync_change_bit(int nr, volatile void *addr) +{ + change_bit(nr, addr); +} + +static inline int sync_test_and_set_bit(int nr, volatile void *addr) +{ + return test_and_set_bit(nr, addr); +} + +static inline int sync_test_and_clear_bit(int nr, volatile void *addr) +{ + return test_and_clear_bit(nr, addr); +} + +static inline int sync_test_and_change_bit(int nr, volatile void *addr) +{ + return test_and_change_bit(nr, addr); +} + +static inline int sync_const_test_bit(int nr, const volatile void *addr) +{ + return test_bit(nr, addr); +} + +static inline int sync_var_test_bit(int nr, volatile void *addr) +{ + return test_bit(nr, addr); +} + +#define sync_cmpxchg ia64_cmpxchg4_acq + +#define sync_test_bit(nr,addr) \ + (__builtin_constant_p(nr) ? \ + sync_const_test_bit((nr), (addr)) : \ + sync_var_test_bit((nr), (addr))) + +#define sync_cmpxchg_subword sync_cmpxchg + +#endif /* _ASM_IA64_SYNC_BITOPS_H */ -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:27
|
Signed-off-by: Isaku Yamahata <yam...@va...> --- include/asm-ia64/xen/interface.h | 585 ++++++++++++++++++++++++++++++++++++++ 1 files changed, 585 insertions(+), 0 deletions(-) create mode 100644 include/asm-ia64/xen/interface.h diff --git a/include/asm-ia64/xen/interface.h b/include/asm-ia64/xen/interface.h new file mode 100644 index 0000000..4cb4515 --- /dev/null +++ b/include/asm-ia64/xen/interface.h @@ -0,0 +1,585 @@ +/****************************************************************************** + * arch-ia64/hypervisor-if.h + * + * Guest OS interface to IA64 Xen. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _ASM_IA64_XEN_INTERFACE_H +#define _ASM_IA64_XEN_INTERFACE_H + +#define __DEFINE_GUEST_HANDLE(name, type) \ + typedef struct { type *p; } __guest_handle_ ## name + +#define DEFINE_GUEST_HANDLE_STRUCT(name) \ + __DEFINE_GUEST_HANDLE(name, struct name) +#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) +#define GUEST_HANDLE(name) __guest_handle_ ## name +#define GUEST_HANDLE_64(name) GUEST_HANDLE(name) +#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0) + +#ifndef __ASSEMBLY__ +/* Guest handles for primitive C types. */ +__DEFINE_GUEST_HANDLE(uchar, unsigned char); +__DEFINE_GUEST_HANDLE(uint, unsigned int); +__DEFINE_GUEST_HANDLE(ulong, unsigned long); +__DEFINE_GUEST_HANDLE(u64, unsigned long); +DEFINE_GUEST_HANDLE(char); +DEFINE_GUEST_HANDLE(int); +DEFINE_GUEST_HANDLE(long); +DEFINE_GUEST_HANDLE(void); + +typedef unsigned long xen_pfn_t; +DEFINE_GUEST_HANDLE(xen_pfn_t); +#define PRI_xen_pfn "lx" +#endif + +/* Arch specific VIRQs definition */ +#define VIRQ_ITC VIRQ_ARCH_0 /* V. Virtual itc timer */ +#define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */ +#define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */ + +/* Maximum number of virtual CPUs in multi-processor guests. */ +/* keep sizeof(struct shared_page) <= PAGE_SIZE. + * this is checked in arch/ia64/xen/hypervisor.c. */ +#define MAX_VIRT_CPUS 64 + +#ifndef __ASSEMBLY__ + +#define INVALID_MFN (~0UL) + +struct pt_fpreg { + union { + unsigned long bits[2]; + long double __dummy; /* force 16-byte alignment */ + } u; +}; + +union vac { + unsigned long value; + struct { + int a_int:1; + int a_from_int_cr:1; + int a_to_int_cr:1; + int a_from_psr:1; + int a_from_cpuid:1; + int a_cover:1; + int a_bsw:1; + long reserved:57; + }; +}; + +union vdc { + unsigned long value; + struct { + int d_vmsw:1; + int d_extint:1; + int d_ibr_dbr:1; + int d_pmc:1; + int d_to_pmd:1; + int d_itm:1; + long reserved:58; + }; +}; + +struct mapped_regs { + union vac vac; + union vdc vdc; + unsigned long virt_env_vaddr; + unsigned long reserved1[29]; + unsigned long vhpi; + unsigned long reserved2[95]; + union { + unsigned long vgr[16]; + unsigned long bank1_regs[16]; /* bank1 regs (r16-r31) when bank0 active */ + }; + union { + unsigned long vbgr[16]; + unsigned long bank0_regs[16]; /* bank0 regs (r16-r31) when bank1 active */ + }; + unsigned long vnat; + unsigned long vbnat; + unsigned long vcpuid[5]; + unsigned long reserved3[11]; + unsigned long vpsr; + unsigned long vpr; + unsigned long reserved4[76]; + union { + unsigned long vcr[128]; + struct { + unsigned long dcr; /* CR0 */ + unsigned long itm; + unsigned long iva; + unsigned long rsv1[5]; + unsigned long pta; /* CR8 */ + unsigned long rsv2[7]; + unsigned long ipsr; /* CR16 */ + unsigned long isr; + unsigned long rsv3; + unsigned long iip; + unsigned long ifa; + unsigned long itir; + unsigned long iipa; + unsigned long ifs; + unsigned long iim; /* CR24 */ + unsigned long iha; + unsigned long rsv4[38]; + unsigned long lid; /* CR64 */ + unsigned long ivr; + unsigned long tpr; + unsigned long eoi; + unsigned long irr[4]; + unsigned long itv; /* CR72 */ + unsigned long pmv; + unsigned long cmcv; + unsigned long rsv5[5]; + unsigned long lrr0; /* CR80 */ + unsigned long lrr1; + unsigned long rsv6[46]; + }; + }; + union { + unsigned long reserved5[128]; + struct { + unsigned long precover_ifs; + unsigned long unat; /* not sure if this is needed until + NaT arch is done */ + int interrupt_collection_enabled; /* virtual psr.ic */ + /* virtual interrupt deliverable flag is evtchn_upcall_mask in + * shared info area now. interrupt_mask_addr is the address + * of evtchn_upcall_mask for current vcpu + */ + unsigned char *interrupt_mask_addr; + int pending_interruption; + unsigned char vpsr_pp; + unsigned char vpsr_dfh; + unsigned char hpsr_dfh; + unsigned char hpsr_mfh; + unsigned long reserved5_1[4]; + int metaphysical_mode; /* 1 = use metaphys mapping + 0 = use virtual */ + int banknum; /* 0 or 1, which virtual register + bank is active */ + unsigned long rrs[8]; /* region registers */ + unsigned long krs[8]; /* kernel registers */ + unsigned long tmp[16]; /* temp registers + (e.g. for hyperprivops) */ + }; + }; +}; + +struct vpd { + struct mapped_regs vpd_low; + unsigned long reserved6[3456]; + unsigned long vmm_avail[128]; + unsigned long reserved7[4096]; +}; + +struct arch_vcpu_info { + /* nothing */ +}; + +/* + * This structure is used for magic page in domain pseudo physical address + * space and the result of XENMEM_machine_memory_map. + * As the XENMEM_machine_memory_map result, + * xen_memory_map::nr_entries indicates the size in bytes + * including struct xen_ia64_memmap_info. Not the number of entries. + */ +struct xen_ia64_memmap_info { + uint64_t efi_memmap_size; /* size of EFI memory map */ + uint64_t efi_memdesc_size; /* size of an EFI memory map descriptor */ + uint32_t efi_memdesc_version; /* memory descriptor version */ + void *memdesc[0]; /* array of efi_memory_desc_t */ +}; + +struct arch_shared_info { + /* PFN of the start_info page. */ + unsigned long start_info_pfn; + + /* Interrupt vector for event channel. */ + int evtchn_vector; + + /* PFN of memmap_info page */ + unsigned int memmap_info_num_pages; /* currently only = 1 case is + supported. */ + unsigned long memmap_info_pfn; + + uint64_t pad[31]; +}; + +typedef unsigned long xen_callback_t; + +struct ia64_tr_entry { + unsigned long pte; + unsigned long itir; + unsigned long vadr; + unsigned long rid; +}; +DEFINE_GUEST_HANDLE_STRUCT(ia64_tr_entry); + +struct vcpu_tr_regs { + struct ia64_tr_entry itrs[12]; + struct ia64_tr_entry dtrs[12]; +}; + +union vcpu_ar_regs { + unsigned long ar[128]; + struct { + unsigned long kr[8]; + unsigned long rsv1[8]; + unsigned long rsc; + unsigned long bsp; + unsigned long bspstore; + unsigned long rnat; + unsigned long rsv2; + unsigned long fcr; + unsigned long rsv3[2]; + unsigned long eflag; + unsigned long csd; + unsigned long ssd; + unsigned long cflg; + unsigned long fsr; + unsigned long fir; + unsigned long fdr; + unsigned long rsv4; + unsigned long ccv; /* 32 */ + unsigned long rsv5[3]; + unsigned long unat; + unsigned long rsv6[3]; + unsigned long fpsr; + unsigned long rsv7[3]; + unsigned long itc; + unsigned long rsv8[3]; + unsigned long ign1[16]; + unsigned long pfs; /* 64 */ + unsigned long lc; + unsigned long ec; + unsigned long rsv9[45]; + unsigned long ign2[16]; + }; +}; + +union vcpu_cr_regs { + unsigned long cr[128]; + struct { + unsigned long dcr; /* CR0 */ + unsigned long itm; + unsigned long iva; + unsigned long rsv1[5]; + unsigned long pta; /* CR8 */ + unsigned long rsv2[7]; + unsigned long ipsr; /* CR16 */ + unsigned long isr; + unsigned long rsv3; + unsigned long iip; + unsigned long ifa; + unsigned long itir; + unsigned long iipa; + unsigned long ifs; + unsigned long iim; /* CR24 */ + unsigned long iha; + unsigned long rsv4[38]; + unsigned long lid; /* CR64 */ + unsigned long ivr; + unsigned long tpr; + unsigned long eoi; + unsigned long irr[4]; + unsigned long itv; /* CR72 */ + unsigned long pmv; + unsigned long cmcv; + unsigned long rsv5[5]; + unsigned long lrr0; /* CR80 */ + unsigned long lrr1; + unsigned long rsv6[46]; + }; +}; + +struct vcpu_guest_context_regs { + unsigned long r[32]; + unsigned long b[8]; + unsigned long bank[16]; + unsigned long ip; + unsigned long psr; + unsigned long cfm; + unsigned long pr; + unsigned int nats; /* NaT bits for r1-r31. */ + unsigned int bnats; /* Nat bits for banked registers. */ + union vcpu_ar_regs ar; + union vcpu_cr_regs cr; + struct pt_fpreg f[128]; + unsigned long dbr[8]; + unsigned long ibr[8]; + unsigned long rr[8]; + unsigned long pkr[16]; + + /* FIXME: cpuid,pmd,pmc */ + + unsigned long xip; + unsigned long xpsr; + unsigned long xfs; + unsigned long xr[4]; + + struct vcpu_tr_regs tr; + + /* Physical registers in case of debug event. */ + unsigned long excp_iipa; + unsigned long excp_ifa; + unsigned long excp_isr; + unsigned int excp_vector; + + /* + * The rbs is intended to be the image of the stacked registers still + * in the cpu (not yet stored in memory). It is laid out as if it + * were written in memory at a 512 (64*8) aligned address + offset. + * rbs_voff is (offset / 8). rbs_nat contains NaT bits for the + * remaining rbs registers. rbs_rnat contains NaT bits for in memory + * rbs registers. + * Note: loadrs is 2**14 bytes == 2**11 slots. + */ + unsigned int rbs_voff; + unsigned long rbs[2048]; + unsigned long rbs_rnat; + + /* + * RSE.N_STACKED_PHYS via PAL_RSE_INFO + * Strictly this isn't cpu context, but this value is necessary + * for domain save/restore. So is here. + */ + unsigned long num_phys_stacked; +}; + +struct vcpu_guest_context { +#define VGCF_EXTRA_REGS (1UL << 1) /* Set extra regs. */ +#define VGCF_SET_CR_IRR (1UL << 2) /* Set cr_irr[0:3]. */ + unsigned long flags; /* VGCF_* flags */ + + struct vcpu_guest_context_regs regs; + + unsigned long event_callback_ip; + + /* xen doesn't share privregs pages with hvm domain so that this member + * doesn't make sense for hvm domain. + * ~0UL is already used for INVALID_P2M_ENTRY. */ +#define VGC_PRIVREGS_HVM (~(-2UL)) + unsigned long privregs_pfn; +}; +DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context); + +/* dom0 vp op */ +#define __HYPERVISOR_ia64_dom0vp_op __HYPERVISOR_arch_0 +/* Map io space in machine address to dom0 physical address space. + Currently physical assigned address equals to machine address. */ +#define IA64_DOM0VP_ioremap 0 + +/* Convert a pseudo physical page frame number to the corresponding + machine page frame number. If no page is assigned, INVALID_MFN or + GPFN_INV_MASK is returned depending on domain's non-vti/vti mode. */ +#define IA64_DOM0VP_phystomach 1 + +/* Convert a machine page frame number to the corresponding pseudo physical + page frame number of the caller domain. */ +#define IA64_DOM0VP_machtophys 3 + +/* Reserved for future use. */ +#define IA64_DOM0VP_iounmap 4 + +/* Unmap and free pages contained in the specified pseudo physical region. */ +#define IA64_DOM0VP_zap_physmap 5 + +/* Assign machine page frame to dom0's pseudo physical address space. */ +#define IA64_DOM0VP_add_physmap 6 + +/* expose the p2m table into domain */ +#define IA64_DOM0VP_expose_p2m 7 + +/* xen perfmon */ +#define IA64_DOM0VP_perfmon 8 + +/* gmfn version of IA64_DOM0VP_add_physmap */ +#define IA64_DOM0VP_add_physmap_with_gmfn 9 + +/* get fpswa revision */ +#define IA64_DOM0VP_fpswa_revision 10 + +/* Add an I/O port space range */ +#define IA64_DOM0VP_add_io_space 11 + +/* expose the foreign domain's p2m table into privileged domain */ +#define IA64_DOM0VP_expose_foreign_p2m 12 +#define IA64_DOM0VP_EFP_ALLOC_PTE 0x1 /* allocate p2m table */ + +/* unexpose the foreign domain's p2m table into privileged domain */ +#define IA64_DOM0VP_unexpose_foreign_p2m 13 + +/* replace this page with newly allocated one and track tlb insert on it. */ +#define IA64_DOM0VP_tlb_track_page 32 + +/* assign a page with newly allocated one and track tlb insert on it. + if page is already assigned to pseudo physical address it results + in error. */ +#define IA64_DOM0VP_tlb_add_track_page 33 + +/* disable tlb traking of this page */ +#define IA64_DOM0VP_tlb_untrack_page 34 + + +/* flags for page assignement to pseudo physical address space */ +#define _ASSIGN_readonly 0 +#define ASSIGN_readonly (1UL << _ASSIGN_readonly) +#define ASSIGN_writable (0UL << _ASSIGN_readonly) /* dummy flag */ +/* Internal only: memory attribute must be WC/UC/UCE. */ +#define _ASSIGN_nocache 1 +#define ASSIGN_nocache (1UL << _ASSIGN_nocache) +/* tlb tracking */ +#define _ASSIGN_tlb_track 2 +#define ASSIGN_tlb_track (1UL << _ASSIGN_tlb_track) +/* Internal only: associated with PGC_allocated bit */ +#define _ASSIGN_pgc_allocated 3 +#define ASSIGN_pgc_allocated (1UL << _ASSIGN_pgc_allocated) + +/* This structure has the same layout of struct ia64_boot_param, defined in + <asm/system.h>. It is redefined here to ease use. */ +struct xen_ia64_boot_param { + unsigned long command_line; /* physical address of cmd line args */ + unsigned long efi_systab; /* physical address of EFI system table */ + unsigned long efi_memmap; /* physical address of EFI memory map */ + unsigned long efi_memmap_size; /* size of EFI memory map */ + unsigned long efi_memdesc_size; /* size of an EFI memory map descriptor */ + unsigned int efi_memdesc_version; /* memory descriptor version */ + struct { + unsigned short num_cols; /* number of columns on console. */ + unsigned short num_rows; /* number of rows on console. */ + unsigned short orig_x; /* cursor's x position */ + unsigned short orig_y; /* cursor's y position */ + } console_info; + unsigned long fpswa; /* physical address of the fpswa interface */ + unsigned long initrd_start; + unsigned long initrd_size; + unsigned long domain_start; /* va where the boot time domain begins */ + unsigned long domain_size; /* how big is the boot domain */ +}; + +#endif /* !__ASSEMBLY__ */ + +/* Size of the shared_info area (this is not related to page size). */ +#define XSI_SHIFT 14 +#define XSI_SIZE (1 << XSI_SHIFT) +/* Log size of mapped_regs area (64 KB - only 4KB is used). */ +#define XMAPPEDREGS_SHIFT 12 +#define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT) +/* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */ +#define XMAPPEDREGS_OFS XSI_SIZE + +/* Hyperprivops. */ +#define HYPERPRIVOP_START 0x1 +#define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0) +#define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1) +#define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2) +#define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3) +#define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4) +#define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5) +#define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6) +#define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7) +#define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8) +#define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9) +#define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa) +#define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb) +#define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc) +#define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd) +#define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe) +#define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf) +#define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10) +#define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11) +#define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12) +#define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13) +#define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14) +#define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15) +#define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16) +#define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17) +#define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18) +#define HYPERPRIVOP_SET_RR0_TO_RR4 (HYPERPRIVOP_START + 0x19) +#define HYPERPRIVOP_MAX (0x1a) + +/* Fast and light hypercalls. */ +#define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1 + +/* Extra debug features. */ +#define __HYPERVISOR_ia64_debug_op __HYPERVISOR_arch_2 + +/* Xencomm macros. */ +#define XENCOMM_INLINE_MASK 0xf800000000000000UL +#define XENCOMM_INLINE_FLAG 0x8000000000000000UL + +#ifndef __ASSEMBLY__ + +/* + * Optimization features. + * The hypervisor may do some special optimizations for guests. This hypercall + * can be used to switch on/of these special optimizations. + */ +#define __HYPERVISOR_opt_feature 0x700UL + +#define XEN_IA64_OPTF_OFF 0x0 +#define XEN_IA64_OPTF_ON 0x1 + +/* + * If this feature is switched on, the hypervisor inserts the + * tlb entries without calling the guests traphandler. + * This is useful in guests using region 7 for identity mapping + * like the linux kernel does. + */ +#define XEN_IA64_OPTF_IDENT_MAP_REG7 1 + +/* Identity mapping of region 4 addresses in HVM. */ +#define XEN_IA64_OPTF_IDENT_MAP_REG4 2 + +/* Identity mapping of region 5 addresses in HVM. */ +#define XEN_IA64_OPTF_IDENT_MAP_REG5 3 + +#define XEN_IA64_OPTF_IDENT_MAP_NOT_SET (0) + +struct xen_ia64_opt_feature { + unsigned long cmd; /* Which feature */ + unsigned char on; /* Switch feature on/off */ + union { + struct { + /* The page protection bit mask of the pte. + * This will be or'ed with the pte. */ + unsigned long pgprot; + unsigned long key; /* A protection key for itir. */ + }; + }; +}; + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_IA64_XEN_INTERFACE_H */ + +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:27
|
Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/xen/xenivt.S | 59 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 59 insertions(+), 0 deletions(-) create mode 100644 arch/ia64/xen/xenivt.S diff --git a/arch/ia64/xen/xenivt.S b/arch/ia64/xen/xenivt.S new file mode 100644 index 0000000..99bb37a --- /dev/null +++ b/arch/ia64/xen/xenivt.S @@ -0,0 +1,59 @@ +/* + * arch/ia64/xen/ivt.S + * + * Copyright (C) 2005 Hewlett-Packard Co + * Dan Magenheimer <dan...@hp...> + * + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * pv_ops. + */ + +#include <asm/asmmacro.h> +#include <asm/kregs.h> +#include <asm/pgtable.h> + +#define __IA64_ASM_PARAVIRTUALIZED_XEN +#include "inst_xen.h" +#include "xenminstate.h" +#include "../kernel/minstate.h" + + .section .text,"ax" +GLOBAL_ENTRY(xen_event_callback) + mov r31=pr // prepare to save predicates + ;; + SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 + ;; + movl r3=XSI_PSR_IC + mov r14=1 + ;; + st4 [r3]=r14 + ;; + adds r3=8,r2 // set up second base pointer for SAVE_REST + srlz.i // ensure everybody knows psr.ic is back on + ;; + SAVE_REST + ;; +1: + alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group + add out0=16,sp // pass pointer to pt_regs as first arg + ;; + br.call.sptk.many b0=xen_evtchn_do_upcall + ;; + movl r20=XSI_PSR_I_ADDR + ;; + ld8 r20=[r20] + ;; + adds r20=-1,r20 // vcpu_info->evtchn_upcall_pending + ;; + ld1 r20=[r20] + ;; + cmp.ne p6,p0=r20,r0 // if there are pending events, + (p6) br.spnt.few 1b // call evtchn_do_upcall again. + br.sptk.many ia64_leave_kernel +END(xen_event_callback) + +GLOBAL_ENTRY(xen_bsw1) + XEN_BSW_1(r14) + br.ret.sptk.many b0 +END(xen_bsw1) -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:27
|
Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/kernel/asm-offsets.c | 25 ++++++++++++++ include/asm-ia64/xen/privop.h | 73 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 98 insertions(+), 0 deletions(-) create mode 100644 include/asm-ia64/xen/privop.h diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 0aebc6f..1a81c64 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c @@ -278,4 +278,29 @@ void foo(void) offsetof (struct itc_jitter_data_t, itc_jitter)); DEFINE(IA64_ITC_LASTCYCLE_OFFSET, offsetof (struct itc_jitter_data_t, itc_lastcycle)); + +#ifdef CONFIG_XEN + BLANK(); + +#define DEFINE_MAPPED_REG_OFS(sym, field) \ + DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(struct mapped_regs, field))) + + DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr); + DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr); + DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip); + DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs); + DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs); + DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr); + DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa); + DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa); + DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim); + DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha); + DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir); + DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled); + DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum); + DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]); + DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]); + DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat); + DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat); +#endif /* CONFIG_XEN */ } diff --git a/include/asm-ia64/xen/privop.h b/include/asm-ia64/xen/privop.h new file mode 100644 index 0000000..dd3e5ec --- /dev/null +++ b/include/asm-ia64/xen/privop.h @@ -0,0 +1,73 @@ +#ifndef _ASM_IA64_XEN_PRIVOP_H +#define _ASM_IA64_XEN_PRIVOP_H + +/* + * Copyright (C) 2005 Hewlett-Packard Co + * Dan Magenheimer <dan...@hp...> + * + * Paravirtualizations of privileged operations for Xen/ia64 + * + * + * inline privop and paravirt_alt support + * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + */ + +#ifndef __ASSEMBLY__ +#include <linux/types.h> /* arch-ia64.h requires uint64_t */ +#include <linux/stringify.h> +#endif +#include <asm/xen/interface.h> + +/* At 1 MB, before per-cpu space but still addressable using addl instead + of movl. */ +#define XSI_BASE 0xfffffffffff00000 + +/* Address of mapped regs. */ +#define XMAPPEDREGS_BASE (XSI_BASE + XSI_SIZE) + +#ifdef __ASSEMBLY__ +#define XEN_HYPER_RFI break HYPERPRIVOP_RFI +#define XEN_HYPER_RSM_PSR_DT break HYPERPRIVOP_RSM_DT +#define XEN_HYPER_SSM_PSR_DT break HYPERPRIVOP_SSM_DT +#define XEN_HYPER_COVER break HYPERPRIVOP_COVER +#define XEN_HYPER_ITC_D break HYPERPRIVOP_ITC_D +#define XEN_HYPER_ITC_I break HYPERPRIVOP_ITC_I +#define XEN_HYPER_SSM_I break HYPERPRIVOP_SSM_I +#define XEN_HYPER_GET_IVR break HYPERPRIVOP_GET_IVR +#define XEN_HYPER_GET_TPR break HYPERPRIVOP_GET_TPR +#define XEN_HYPER_SET_TPR break HYPERPRIVOP_SET_TPR +#define XEN_HYPER_EOI break HYPERPRIVOP_EOI +#define XEN_HYPER_SET_ITM break HYPERPRIVOP_SET_ITM +#define XEN_HYPER_THASH break HYPERPRIVOP_THASH +#define XEN_HYPER_PTC_GA break HYPERPRIVOP_PTC_GA +#define XEN_HYPER_ITR_D break HYPERPRIVOP_ITR_D +#define XEN_HYPER_GET_RR break HYPERPRIVOP_GET_RR +#define XEN_HYPER_SET_RR break HYPERPRIVOP_SET_RR +#define XEN_HYPER_SET_KR break HYPERPRIVOP_SET_KR +#define XEN_HYPER_FC break HYPERPRIVOP_FC +#define XEN_HYPER_GET_CPUID break HYPERPRIVOP_GET_CPUID +#define XEN_HYPER_GET_PMD break HYPERPRIVOP_GET_PMD +#define XEN_HYPER_GET_EFLAG break HYPERPRIVOP_GET_EFLAG +#define XEN_HYPER_SET_EFLAG break HYPERPRIVOP_SET_EFLAG +#define XEN_HYPER_GET_PSR break HYPERPRIVOP_GET_PSR +#define XEN_HYPER_SET_RR0_TO_RR4 break HYPERPRIVOP_SET_RR0_TO_RR4 + +#define XSI_IFS (XSI_BASE + XSI_IFS_OFS) +#define XSI_PRECOVER_IFS (XSI_BASE + XSI_PRECOVER_IFS_OFS) +#define XSI_IFA (XSI_BASE + XSI_IFA_OFS) +#define XSI_ISR (XSI_BASE + XSI_ISR_OFS) +#define XSI_IIM (XSI_BASE + XSI_IIM_OFS) +#define XSI_ITIR (XSI_BASE + XSI_ITIR_OFS) +#define XSI_PSR_I_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS) +#define XSI_PSR_IC (XSI_BASE + XSI_PSR_IC_OFS) +#define XSI_IPSR (XSI_BASE + XSI_IPSR_OFS) +#define XSI_IIP (XSI_BASE + XSI_IIP_OFS) +#define XSI_B1NAT (XSI_BASE + XSI_B1NATS_OFS) +#define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS) +#define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS) +#define XSI_IHA (XSI_BASE + XSI_IHA_OFS) +#endif + +#endif /* _ASM_IA64_XEN_PRIVOP_H */ -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:24
|
Make ia64 privileged instruction intrinsics paravirtualizable with binary patching allowing each pv instances to override each intrinsics. Mark privileged instructions which needs paravirtualization and allow pv instance can binary patch at early boot time. Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/kernel/paravirtentry.S | 37 +++ include/asm-ia64/privop.h | 4 + include/asm-ia64/privop_paravirt.h | 587 ++++++++++++++++++++++++++++++++++++ 3 files changed, 628 insertions(+), 0 deletions(-) create mode 100644 arch/ia64/kernel/paravirtentry.S create mode 100644 include/asm-ia64/privop_paravirt.h diff --git a/arch/ia64/kernel/paravirtentry.S b/arch/ia64/kernel/paravirtentry.S new file mode 100644 index 0000000..013511f --- /dev/null +++ b/arch/ia64/kernel/paravirtentry.S @@ -0,0 +1,37 @@ +/****************************************************************************** + * linux/arch/ia64/xen/paravirtentry.S + * + * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include <asm/types.h> +#include <asm/asmmacro.h> +#include <asm/paravirt_entry.h> +#include <asm/privop_paravirt.h> + +#define BRANCH(sym, type) \ + GLOBAL_ENTRY(paravirt_ ## sym) ; \ + BR_COND_SPTK_MANY(native_ ## sym, type) ; \ + END(paravirt_ ## sym) + + BRANCH(switch_to, PARAVIRT_ENTRY_SWITCH_TO) + BRANCH(leave_syscall, PARAVIRT_ENTRY_LEAVE_SYSCALL) + BRANCH(work_processed_syscall, PARAVIRT_ENTRY_WORK_PROCESSED_SYSCALL) + BRANCH(leave_kernel, PARAVIRT_ENTRY_LEAVE_KERNEL) + BRANCH(pal_call_static, PARAVIRT_ENTRY_PAL_CALL_STATIC) diff --git a/include/asm-ia64/privop.h b/include/asm-ia64/privop.h index b0b74fd..69591e0 100644 --- a/include/asm-ia64/privop.h +++ b/include/asm-ia64/privop.h @@ -10,6 +10,10 @@ * */ +#ifdef CONFIG_PARAVIRT +#include <asm/privop_paravirt.h> +#endif + #ifdef CONFIG_XEN #include <asm/xen/privop.h> #endif diff --git a/include/asm-ia64/privop_paravirt.h b/include/asm-ia64/privop_paravirt.h new file mode 100644 index 0000000..bd7de70 --- /dev/null +++ b/include/asm-ia64/privop_paravirt.h @@ -0,0 +1,587 @@ +/****************************************************************************** + * privops_paravirt.h + * + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef _ASM_IA64_PRIVOP_PARAVIRT_H +#define _ASM_IA64_PRIVOP_PARAVIRT_H + +#define PARAVIRT_INST_START 0x1 +#define PARAVIRT_INST_RFI (PARAVIRT_INST_START + 0x0) +#define PARAVIRT_INST_RSM_DT (PARAVIRT_INST_START + 0x1) +#define PARAVIRT_INST_SSM_DT (PARAVIRT_INST_START + 0x2) +#define PARAVIRT_INST_COVER (PARAVIRT_INST_START + 0x3) +#define PARAVIRT_INST_ITC_D (PARAVIRT_INST_START + 0x4) +#define PARAVIRT_INST_ITC_I (PARAVIRT_INST_START + 0x5) +#define PARAVIRT_INST_SSM_I (PARAVIRT_INST_START + 0x6) +#define PARAVIRT_INST_GET_IVR (PARAVIRT_INST_START + 0x7) +#define PARAVIRT_INST_GET_TPR (PARAVIRT_INST_START + 0x8) +#define PARAVIRT_INST_SET_TPR (PARAVIRT_INST_START + 0x9) +#define PARAVIRT_INST_EOI (PARAVIRT_INST_START + 0xa) +#define PARAVIRT_INST_SET_ITM (PARAVIRT_INST_START + 0xb) +#define PARAVIRT_INST_THASH (PARAVIRT_INST_START + 0xc) +#define PARAVIRT_INST_PTC_GA (PARAVIRT_INST_START + 0xd) +#define PARAVIRT_INST_ITR_D (PARAVIRT_INST_START + 0xe) +#define PARAVIRT_INST_GET_RR (PARAVIRT_INST_START + 0xf) +#define PARAVIRT_INST_SET_RR (PARAVIRT_INST_START + 0x10) +#define PARAVIRT_INST_SET_KR (PARAVIRT_INST_START + 0x11) +#define PARAVIRT_INST_FC (PARAVIRT_INST_START + 0x12) +#define PARAVIRT_INST_GET_CPUID (PARAVIRT_INST_START + 0x13) +#define PARAVIRT_INST_GET_PMD (PARAVIRT_INST_START + 0x14) +#define PARAVIRT_INST_GET_EFLAG (PARAVIRT_INST_START + 0x15) +#define PARAVIRT_INST_SET_EFLAG (PARAVIRT_INST_START + 0x16) +#define PARAVIRT_INST_RSM_BE (PARAVIRT_INST_START + 0x17) +#define PARAVIRT_INST_GET_PSR (PARAVIRT_INST_START + 0x18) +#define PARAVIRT_INST_SET_RR0_TO_RR4 (PARAVIRT_INST_START + 0x19) + +#define PARAVIRT_BNDL_START 0x10000000 +#define PARAVIRT_BNDL_SSM_I (PARAVIRT_BNDL_START + 0x0) +#define PARAVIRT_BNDL_RSM_I (PARAVIRT_BNDL_START + 0x1) +#define PARAVIRT_BNDL_GET_PSR_I (PARAVIRT_BNDL_START + 0x2) +#define PARAVIRT_BNDL_INTRIN_LOCAL_IRQ_RESTORE (PARAVIRT_BNDL_START + 0x3) + +/* + * struct task_struct* (*ia64_switch_to)(void* next_task); + * void *ia64_leave_syscall; + * void *ia64_work_processed_syscall + * void *ia64_leave_kernel; + * struct ia64_pal_retval (*pal_call_static)(u64, u64, u64, u64, u64); + */ + +#define PARAVIRT_ENTRY_START 0x20000000 +#define PARAVIRT_ENTRY_SWITCH_TO (PARAVIRT_ENTRY_START + 0) +#define PARAVIRT_ENTRY_LEAVE_SYSCALL (PARAVIRT_ENTRY_START + 1) +#define PARAVIRT_ENTRY_WORK_PROCESSED_SYSCALL (PARAVIRT_ENTRY_START + 2) +#define PARAVIRT_ENTRY_LEAVE_KERNEL (PARAVIRT_ENTRY_START + 3) +#define PARAVIRT_ENTRY_PAL_CALL_STATIC (PARAVIRT_ENTRY_START + 4) + + +#ifndef __ASSEMBLER__ + +#include <linux/stringify.h> +#include <linux/types.h> +#include <asm/paravirt_alt.h> +#include <asm/kregs.h> /* for IA64_PSR_I */ +#include <asm/xen/interface.h> + +/************************************************/ +/* Instructions paravirtualized for correctness */ +/************************************************/ +/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" + * is not currently used (though it may be in a long-format VHPT system!) */ +#ifdef ASM_SUPPORTED +static inline unsigned long +paravirt_fc(unsigned long addr) +{ + register __u64 ia64_intri_res asm ("r8"); + register __u64 __addr asm ("r8") = addr; + asm volatile (paravirt_alt_inst("fc %1", PARAVIRT_INST_THASH): + "=r"(ia64_intri_res): "0"(__addr): "memory"); + return ia64_intri_res; +} +#define paravirt_fc(addr) paravirt_fc((unsigned long)addr) + +static inline unsigned long +paravirt_thash(unsigned long addr) +{ + register __u64 ia64_intri_res asm ("r8"); + register __u64 __addr asm ("r8") = addr; + asm volatile (paravirt_alt_inst("thash %0=%1", PARAVIRT_INST_THASH): + "=r"(ia64_intri_res): "0"(__addr)); + return ia64_intri_res; +} + +static inline unsigned long +paravirt_get_cpuid(int index) +{ + register __u64 ia64_intri_res asm ("r8"); + register __u64 __index asm ("r8") = index; + asm volatile (paravirt_alt_inst("mov %0=cpuid[%r1]", + PARAVIRT_INST_GET_CPUID): + "=r"(ia64_intri_res): "0O"(__index)); + return ia64_intri_res; +} + +static inline unsigned long +paravirt_get_pmd(int index) +{ + register __u64 ia64_intri_res asm ("r8"); + register __u64 __index asm ("r8") = index; + asm volatile (paravirt_alt_inst("mov %0=pmd[%1]", + PARAVIRT_INST_GET_PMD): + "=r"(ia64_intri_res): "0"(__index)); + return ia64_intri_res; +} + +static inline unsigned long +paravirt_get_eflag(void) +{ + register __u64 ia64_intri_res asm ("r8"); + asm volatile (paravirt_alt_inst("mov %0=ar%1", + PARAVIRT_INST_GET_EFLAG): + "=r"(ia64_intri_res): + "i"(_IA64_REG_AR_EFLAG - _IA64_REG_AR_KR0): "memory"); + return ia64_intri_res; +} + +static inline void +paravirt_set_eflag(unsigned long val) +{ + register __u64 __val asm ("r8") = val; + asm volatile (paravirt_alt_inst("mov ar%0=%1", + PARAVIRT_INST_SET_EFLAG):: + "i"(_IA64_REG_AR_EFLAG - _IA64_REG_AR_KR0), "r"(__val): + "memory"); +} + +/************************************************/ +/* Instructions paravirtualized for performance */ +/************************************************/ + +static inline unsigned long +paravirt_get_psr(void) +{ + register __u64 ia64_intri_res asm ("r8"); + asm volatile (paravirt_alt_inst("mov %0=psr", PARAVIRT_INST_GET_PSR): + "=r"(ia64_intri_res)); + return ia64_intri_res; +} + +static inline unsigned long +paravirt_get_ivr(void) +{ + register __u64 ia64_intri_res asm ("r8"); + asm volatile (paravirt_alt_inst("mov %0=cr%1", PARAVIRT_INST_GET_IVR): + "=r"(ia64_intri_res): + "i" (_IA64_REG_CR_IVR - _IA64_REG_CR_DCR)); + return ia64_intri_res; +} + +static inline unsigned long +paravirt_get_tpr(void) +{ + register __u64 ia64_intri_res asm ("r8"); + asm volatile (paravirt_alt_inst("mov %0=cr%1", PARAVIRT_INST_GET_TPR): + "=r"(ia64_intri_res): + "i" (_IA64_REG_CR_TPR - _IA64_REG_CR_DCR)); + return ia64_intri_res; +} + +static inline void +paravirt_set_tpr(unsigned long val) +{ + register __u64 __val asm ("r8") = val; + asm volatile (paravirt_alt_inst("mov cr%0=%1", PARAVIRT_INST_SET_TPR):: + "i" (_IA64_REG_CR_TPR - _IA64_REG_CR_DCR), "r"(__val): + "memory"); +} + +static inline void +paravirt_eoi(unsigned long val) +{ + register __u64 __val asm ("r8") = val; + asm volatile (paravirt_alt_inst("mov cr%0=%1", PARAVIRT_INST_EOI):: + "i" (_IA64_REG_CR_EOI - _IA64_REG_CR_DCR), "r"(__val): + "memory"); +} + +static inline void +paravirt_set_itm(unsigned long val) +{ + register __u64 __val asm ("r8") = val; + asm volatile (paravirt_alt_inst("mov cr%0=%1", PARAVIRT_INST_SET_ITM):: + "i" (_IA64_REG_CR_ITM - _IA64_REG_CR_DCR), "r"(__val): + "memory"); +} + +static inline void +paravirt_ptcga(unsigned long addr, unsigned long size) +{ + register __u64 __addr asm ("r8") = addr; + register __u64 __size asm ("r9") = size; + asm volatile (paravirt_alt_inst("ptc.ga %0,%1", PARAVIRT_INST_PTC_GA):: + "r"(__addr), "r"(__size): "memory"); + ia64_dv_serialize_data(); +} + +static inline unsigned long +paravirt_get_rr(unsigned long index) +{ + register __u64 ia64_intri_res asm ("r8"); + register __u64 __index asm ("r8") = index; + asm volatile (paravirt_alt_inst("mov %0=rr[%1]", PARAVIRT_INST_GET_RR): + "=r"(ia64_intri_res) : "0" (__index)); + return ia64_intri_res; +} + +static inline void +paravirt_set_rr(unsigned long index, unsigned long val) +{ + register __u64 __index asm ("r8") = index; + register __u64 __val asm ("r9") = val; + asm volatile (paravirt_alt_inst("mov rr[%0]=%1", PARAVIRT_INST_SET_RR):: + "r"(__index), "r"(__val): "memory"); +} + +static inline void +paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1, + unsigned long val2, unsigned long val3, + unsigned long val4) +{ + register __u64 __val0 asm ("r8") = val0; + register __u64 __val1 asm ("r9") = val1; + register __u64 __val2 asm ("r10") = val2; + register __u64 __val3 asm ("r11") = val3; + register __u64 __val4 asm ("r14") = val4; + asm volatile (paravirt_alt_inst("\t;;\n" + "\t{.mmi\n" + "\tmov rr[%0]=%1\n" + /* + * without this stop bit + * assembler complains. + */ + "\t;;\n" + "\tmov rr[%2]=%3\n" + "\tnop.i 0\n" + "\t}\n" + "\t{.mmi\n" + "\tmov rr[%4]=%5\n" + "\tmov rr[%6]=%7\n" + "\tnop.i 0\n" + "\t}\n" + "\tmov rr[%8]=%9;;\n", + PARAVIRT_INST_SET_RR0_TO_RR4):: + "r"(0x0000000000000000UL), "r"(__val0), + "r"(0x2000000000000000UL), "r"(__val1), + "r"(0x4000000000000000UL), "r"(__val2), + "r"(0x6000000000000000UL), "r"(__val3), + "r"(0x8000000000000000UL), "r"(__val4) : + "memory"); +} + +static inline void +paravirt_set_kr(unsigned long index, unsigned long val) +{ + register __u64 __index asm ("r8") = index - _IA64_REG_AR_KR0; + register __u64 __val asm ("r9") = val; + + /* + * asm volatile ("break %0":: + * "i"(PARAVIRT_INST_SET_KR), "r"(__index), "r"(__val)); + */ +#ifndef BUILD_BUG_ON +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#endif + BUILD_BUG_ON(!__builtin_constant_p(__index)); + switch (index) { + case _IA64_REG_AR_KR0: + asm volatile (paravirt_alt_inst("mov ar%0=%2", + PARAVIRT_INST_SET_KR):: + "i" (_IA64_REG_AR_KR0 - _IA64_REG_AR_KR0), + "r"(__index), "r"(__val): + "memory"); + break; + case _IA64_REG_AR_KR1: + asm volatile (paravirt_alt_inst("mov ar%0=%2", + PARAVIRT_INST_SET_KR):: + "i" (_IA64_REG_AR_KR1 - _IA64_REG_AR_KR0), + "r"(__index), "r"(__val): + "memory"); + break; + case _IA64_REG_AR_KR2: + asm volatile (paravirt_alt_inst("mov ar%0=%2", + PARAVIRT_INST_SET_KR):: + "i" (_IA64_REG_AR_KR2 - _IA64_REG_AR_KR0), + "r"(__index), "r"(__val): + "memory"); + break; + case _IA64_REG_AR_KR3: + asm volatile (paravirt_alt_inst("mov ar%0=%2", + PARAVIRT_INST_SET_KR):: + "i" (_IA64_REG_AR_KR3 - _IA64_REG_AR_KR0), + "r"(__index), "r"(__val): + "memory"); + break; + case _IA64_REG_AR_KR4: + asm volatile (paravirt_alt_inst("mov ar%0=%2", + PARAVIRT_INST_SET_KR):: + "i" (_IA64_REG_AR_KR4 - _IA64_REG_AR_KR0), + "r"(__index), "r"(__val): + "memory"); + break; + case _IA64_REG_AR_KR5: + asm volatile (paravirt_alt_inst("mov ar%0=%2", + PARAVIRT_INST_SET_KR):: + "i" (_IA64_REG_AR_KR5 - _IA64_REG_AR_KR0), + "r"(__index), "r"(__val): + "memory"); + break; + case _IA64_REG_AR_KR6: + asm volatile (paravirt_alt_inst("mov ar%0=%2", + PARAVIRT_INST_SET_KR):: + "i" (_IA64_REG_AR_KR6 - _IA64_REG_AR_KR0), + "r"(__index), "r"(__val): + "memory"); + break; + case _IA64_REG_AR_KR7: + asm volatile (paravirt_alt_inst("mov ar%0=%2", + PARAVIRT_INST_SET_KR):: + "i" (_IA64_REG_AR_KR7 - _IA64_REG_AR_KR0), + "r"(__index), "r"(__val): + "memory"); + break; + default: { + extern void compile_error_ar_kr_index_must_be_copmile_time_constant(void); + compile_error_ar_kr_index_must_be_copmile_time_constant(); + break; + } + } +} +#endif /* ASM_SUPPORTED */ + +static inline unsigned long +paravirt_getreg(unsigned long regnum) +{ + __u64 ia64_intri_res; + + switch (regnum) { + case _IA64_REG_PSR: + ia64_intri_res = paravirt_get_psr(); + break; + case _IA64_REG_CR_IVR: + ia64_intri_res = paravirt_get_ivr(); + break; + case _IA64_REG_CR_TPR: + ia64_intri_res = paravirt_get_tpr(); + break; + case _IA64_REG_AR_EFLAG: + ia64_intri_res = paravirt_get_eflag(); + break; + default: + ia64_intri_res = native_getreg(regnum); + break; + } + return ia64_intri_res; + } + +static inline void +paravirt_setreg(unsigned long regnum, unsigned long val) +{ + switch (regnum) { + case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7: + paravirt_set_kr(regnum, val); + break; + case _IA64_REG_CR_ITM: + paravirt_set_itm(val); + break; + case _IA64_REG_CR_TPR: + paravirt_set_tpr(val); + break; + case _IA64_REG_CR_EOI: + paravirt_eoi(val); + break; + case _IA64_REG_AR_EFLAG: + paravirt_set_eflag(val); + break; + default: + native_setreg(regnum, val); + break; + } +} + +#ifdef ASM_SUPPORTED + +#define NOP_BUNDLE \ + "{\n\t" \ + "nop 0\n\t" \ + "nop 0\n\t" \ + "nop 0\n\t" \ + "}\n\t" + +static inline void +paravirt_ssm_i(void) +{ + /* five bundles */ + asm volatile (paravirt_alt_bundle("{\n\t" + "ssm psr.i\n\t" + "nop 0\n\t" + "nop 0\n\t" + "}\n\t" + NOP_BUNDLE + NOP_BUNDLE + NOP_BUNDLE + NOP_BUNDLE, + PARAVIRT_BNDL_SSM_I)::: + "r8", "r9", "r10", + "p6", "p7", + "memory"); +} + +static inline void +paravirt_rsm_i(void) +{ + /* two budles */ + asm volatile (paravirt_alt_bundle("{\n\t" + "rsm psr.i\n\t" + "nop 0\n\t" + "nop 0\n\t" + "}\n\t" + NOP_BUNDLE, + PARAVIRT_BNDL_RSM_I)::: + "r8", "r9", + "memory"); +} + +static inline unsigned long +paravirt_get_psr_i(void) +{ + register unsigned long psr_i asm ("r8"); + register unsigned long mask asm ("r9"); + + /* three bundles */ + asm volatile (paravirt_alt_bundle("{\n\t" + "mov %0=psr\n\t" + "mov %1=%2\n\t" + ";;\n\t" + "and %0=%0,%1\n\t" + "}\n\t" + NOP_BUNDLE + NOP_BUNDLE, + PARAVIRT_BNDL_GET_PSR_I): + "=r"(psr_i), + "=r"(mask) + : + "i"(IA64_PSR_I) + : + /* "r8", "r9", */ + "p6"); + return psr_i; +} + +static inline void +paravirt_intrin_local_irq_restore(unsigned long flags) +{ + register unsigned long __flags asm ("r8") = flags; + + /* six bundles */ + asm volatile (paravirt_alt_bundle(";;\n\t" + "{\n\t" + "cmp.ne p6,p7=%0,r0;;\n\t" + "(p6) ssm psr.i;\n\t" + "nop 0\n\t" + "}\n\t" + "{\n\t" + "(p7) rsm psr.i;;\n\t" + "(p6) srlz.d\n\t" + "nop 0\n\t" + "}\n\t" + NOP_BUNDLE + NOP_BUNDLE + NOP_BUNDLE + NOP_BUNDLE, + PARAVIRT_BNDL_INTRIN_LOCAL_IRQ_RESTORE):: + "r"(__flags) : + /* "r8",*/ "r9", "r10", "r11", + "p6", "p7", "p8", "p9", + "memory"); + +} + +#undef NOP_BUNDLE + +#endif /* ASM_SUPPORTED */ + +static inline void +paravirt_ssm(unsigned long mask) +{ + if (mask == IA64_PSR_I) + paravirt_ssm_i(); + else + native_ssm(mask); +} + +static inline void +paravirt_rsm(unsigned long mask) +{ + if (mask == IA64_PSR_I) + paravirt_rsm_i(); + else + native_rsm(mask); +} + +#if defined(ASM_SUPPORTED) && defined(CONFIG_PARAVIRT_ALT) + +#define IA64_PARAVIRTUALIZED_PRIVOP + +#define ia64_fc(addr) paravirt_fc(addr) +#define ia64_thash(addr) paravirt_thash(addr) +#define ia64_get_cpuid(i) paravirt_get_cpuid(i) +#define ia64_get_pmd(i) paravirt_get_pmd(i) +#define ia64_ptcga(addr, size) paravirt_ptcga((addr), (size)) +#define ia64_set_rr(index, val) paravirt_set_rr((index), (val)) +#define ia64_get_rr(index) paravirt_get_rr(index) +#define ia64_getreg(regnum) paravirt_getreg(regnum) +#define ia64_setreg(regnum, val) paravirt_setreg((regnum), (val)) +#define ia64_set_rr0_to_rr4(val0, val1, val2, val3, val4) \ + paravirt_set_rr0_to_rr4((val0), (val1), (val2), (val3), (val4)) + +#define ia64_ssm(mask) paravirt_ssm(mask) +#define ia64_rsm(mask) paravirt_rsm(mask) +#define ia64_get_psr_i() paravirt_get_psr_i() +#define ia64_intrin_local_irq_restore(x) \ + paravirt_intrin_local_irq_restore(x) + +/* the remainder of these are not performance-sensitive so its + * OK to not paravirtualize and just take a privop trap and emulate */ +#define ia64_hint native_hint +#define ia64_set_pmd native_set_pmd +#define ia64_itci native_itci +#define ia64_itcd native_itcd +#define ia64_itri native_itri +#define ia64_itrd native_itrd +#define ia64_tpa native_tpa +#define ia64_set_ibr native_set_ibr +#define ia64_set_pkr native_set_pkr +#define ia64_set_pmc native_set_pmc +#define ia64_get_ibr native_get_ibr +#define ia64_get_pkr native_get_pkr +#define ia64_get_pmc native_get_pmc +#define ia64_ptce native_ptce +#define ia64_ptcl native_ptcl +#define ia64_ptri native_ptri +#define ia64_ptrd native_ptrd + +#endif /* ASM_SUPPORTED && CONFIG_PARAVIRT_ALT */ + +#endif /* __ASSEMBLER__*/ + +/* these routines utilize privilege-sensitive or performance-sensitive + * privileged instructions so the code must be replaced with + * paravirtualized versions */ +#ifdef CONFIG_PARAVIRT_ENTRY +#define IA64_PARAVIRTUALIZED_ENTRY +#define ia64_switch_to paravirt_switch_to +#define ia64_work_processed_syscall paravirt_work_processed_syscall +#define ia64_leave_syscall paravirt_leave_syscall +#define ia64_leave_kernel paravirt_leave_kernel +#define ia64_pal_call_static paravirt_pal_call_static +#endif /* CONFIG_PARAVIRT_ENTRY */ + +#endif /* _ASM_IA64_PRIVOP_PARAVIRT_H */ -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:24
|
This isn't necessary because xen defines its own DO_SAVE_MIN. But this makes the difference of them smaller. Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/kernel/minstate.h | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h index fc99141..10a412c 100644 --- a/arch/ia64/kernel/minstate.h +++ b/arch/ia64/kernel/minstate.h @@ -34,9 +34,9 @@ mov r27=ar.rsc; /* M */ \ mov r20=r1; /* A */ \ mov r25=ar.unat; /* M */ \ - mov r29=cr.ipsr; /* M */ \ + MOV_FROM_IPSR(r29); /* M */ \ mov r26=ar.pfs; /* I */ \ - mov r28=cr.iip; /* M */ \ + MOV_FROM_IIP(r28); /* M */ \ mov r21=ar.fpsr; /* M */ \ __COVER; /* B;; (or nothing) */ \ ;; \ -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:24
|
Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/kernel/Makefile | 2 + arch/ia64/kernel/paravirt.c | 34 ++++++++++++++++++++++++ include/asm-ia64/paravirt.h | 61 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 97 insertions(+), 0 deletions(-) create mode 100644 arch/ia64/kernel/paravirt.c create mode 100644 include/asm-ia64/paravirt.h diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 185e0e2..7849bc3 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -41,6 +41,8 @@ obj-$(CONFIG_PARAVIRT_ALT) += paravirt_alt.o obj-$(CONFIG_PARAVIRT_ENTRY) += paravirt_entry.o paravirtentry.o obj-$(CONFIG_PARAVIRT_NOP_B_PATCH) += paravirt_nop.o +obj-$(CONFIG_PARAVIRT_GUEST) += paravirt.o + obj-$(CONFIG_IA64_ESI) += esi.o ifneq ($(CONFIG_IA64_ESI),) obj-y += esi_stub.o # must be in kernel proper diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c new file mode 100644 index 0000000..b31fa91 --- /dev/null +++ b/arch/ia64/kernel/paravirt.c @@ -0,0 +1,34 @@ +/****************************************************************************** + * arch/ia64/kernel/paravirt.c + * + * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include <linux/init.h> + +#include <asm/paravirt.h> + +/*************************************************************************** + * general info + */ +struct pv_info pv_info = { + .kernel_rpl = 0, + .paravirt_enabled = 0, + .name = "bare hardware" +}; diff --git a/include/asm-ia64/paravirt.h b/include/asm-ia64/paravirt.h new file mode 100644 index 0000000..c2d4809 --- /dev/null +++ b/include/asm-ia64/paravirt.h @@ -0,0 +1,61 @@ +/****************************************************************************** + * include/asm-ia64/paravirt.h + * + * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + + +#ifndef __ASM_PARAVIRT_H +#define __ASM_PARAVIRT_H + +#ifdef CONFIG_PARAVIRT_GUEST + +#ifndef __ASSEMBLY__ + +/****************************************************************************** + * general info + */ +struct pv_info { + unsigned int kernel_rpl; + int paravirt_enabled; + const char *name; +}; + +extern struct pv_info pv_info; + +static inline int paravirt_enabled(void) +{ + return pv_info.paravirt_enabled; +} + +static inline unsigned int get_kernel_rpl(void) +{ + return pv_info.kernel_rpl; +} + +#endif /* __ASSEMBLY__ */ + +#else +/* fallback for native case */ + +/* XXX: TODO */ + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __ASM_PARAVIRT_H */ -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:24
|
Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/kernel/inst_paravirt.h | 28 ++++++++++++++++++++++++++++ 1 files changed, 28 insertions(+), 0 deletions(-) create mode 100644 arch/ia64/kernel/inst_paravirt.h diff --git a/arch/ia64/kernel/inst_paravirt.h b/arch/ia64/kernel/inst_paravirt.h new file mode 100644 index 0000000..689c343 --- /dev/null +++ b/arch/ia64/kernel/inst_paravirt.h @@ -0,0 +1,28 @@ +/****************************************************************************** + * linux/arch/ia64/xen/inst_paravirt.h + * + * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifdef __IA64_ASM_PARAVIRTUALIZED_XEN +#include "../xen/inst_xen.h" +#include "../xen/xenminstate.h" +#else +#include "inst_native.h" +#endif -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:24
|
import ia64 specific part of xencomm which converts hypercall argument in virtual address into pseudo physical address (guest physical address). Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/xen/xcom_asm.S | 27 +++ arch/ia64/xen/xcom_hcall.c | 458 +++++++++++++++++++++++++++++++++++++ arch/ia64/xen/xencomm.c | 108 +++++++++ include/asm-ia64/xen/xcom_hcall.h | 55 +++++ include/asm-ia64/xen/xencomm.h | 33 +++ 5 files changed, 681 insertions(+), 0 deletions(-) create mode 100644 arch/ia64/xen/xcom_asm.S create mode 100644 arch/ia64/xen/xcom_hcall.c create mode 100644 arch/ia64/xen/xencomm.c create mode 100644 include/asm-ia64/xen/xcom_hcall.h create mode 100644 include/asm-ia64/xen/xencomm.h diff --git a/arch/ia64/xen/xcom_asm.S b/arch/ia64/xen/xcom_asm.S new file mode 100644 index 0000000..8747908 --- /dev/null +++ b/arch/ia64/xen/xcom_asm.S @@ -0,0 +1,27 @@ +/* + * xencomm suspend support + * Support routines for Xen + * + * Copyright (C) 2005 Dan Magenheimer <dan...@hp...> + */ +#include <asm/asmmacro.h> +#include <xen/interface/xen.h> + +/* + * Stub for suspend. + * Just force the stacked registers to be written in memory. + */ +GLOBAL_ENTRY(xencomm_arch_hypercall_suspend) + ;; + alloc r20=ar.pfs,0,0,6,0 + mov r2=__HYPERVISOR_sched_op + ;; + /* We don't want to deal with RSE. */ + flushrs + mov r33=r32 + mov r32=2 // SCHEDOP_shutdown + ;; + break 0x1000 + ;; + br.ret.sptk.many b0 +END(xencomm_arch_hypercall_suspend) diff --git a/arch/ia64/xen/xcom_hcall.c b/arch/ia64/xen/xcom_hcall.c new file mode 100644 index 0000000..bfddbd7 --- /dev/null +++ b/arch/ia64/xen/xcom_hcall.c @@ -0,0 +1,458 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Tristan Gingold <tri...@bu...> + * + * Copyright (c) 2007 + * Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * consolidate mini and inline version. + */ +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/gfp.h> +#include <linux/module.h> +#include <xen/interface/xen.h> +#include <xen/interface/memory.h> +#include <xen/interface/xencomm.h> +#include <xen/interface/version.h> +#include <xen/interface/sched.h> +#include <xen/interface/event_channel.h> +#include <xen/interface/physdev.h> +#include <xen/interface/grant_table.h> +#include <xen/interface/callback.h> +#include <xen/interface/vcpu.h> +#include <asm/xen/hypervisor.h> +#include <asm/page.h> +#include <asm/uaccess.h> +#include <asm/xen/xencomm.h> + +/* Xencomm notes: + * This file defines hypercalls to be used by xencomm. The hypercalls simply + * create inlines or mini descriptors for pointers and then call the raw arch + * hypercall xencomm_arch_hypercall_XXX + * + * If the arch wants to directly use these hypercalls, simply define macros + * in asm/xen/hypercall.h, eg: + * #define HYPERVISOR_sched_op xencomm_hypercall_sched_op + * + * The arch may also define HYPERVISOR_xxx as a function and do more operations + * before/after doing the hypercall. + * + * Note: because only inline or mini descriptors are created these functions + * must only be called with in kernel memory parameters. + */ + +int +xencomm_hypercall_console_io(int cmd, int count, char *str) +{ + return xencomm_arch_hypercall_console_io + (cmd, count, xencomm_map_no_alloc(str, count)); +} +EXPORT_SYMBOL_GPL(xencomm_hypercall_console_io); + +int +xencomm_hypercall_event_channel_op(int cmd, void *op) +{ + struct xencomm_handle *desc; + desc = xencomm_map_no_alloc(op, sizeof(struct evtchn_op)); + if (desc == NULL) + return -EINVAL; + + return xencomm_arch_hypercall_event_channel_op(cmd, desc); +} +EXPORT_SYMBOL_GPL(xencomm_hypercall_event_channel_op); + +int +xencomm_hypercall_xen_version(int cmd, void *arg) +{ + struct xencomm_handle *desc; + unsigned int argsize; + + switch (cmd) { + case XENVER_version: + /* do not actually pass an argument */ + return xencomm_arch_hypercall_xen_version(cmd, 0); + case XENVER_extraversion: + argsize = sizeof(struct xen_extraversion); + break; + case XENVER_compile_info: + argsize = sizeof(struct xen_compile_info); + break; + case XENVER_capabilities: + argsize = sizeof(struct xen_capabilities_info); + break; + case XENVER_changeset: + argsize = sizeof(struct xen_changeset_info); + break; + case XENVER_platform_parameters: + argsize = sizeof(struct xen_platform_parameters); + break; + case XENVER_get_features: + argsize = (arg == NULL) ? 0 : sizeof(struct xen_feature_info); + break; + + default: + printk(KERN_DEBUG + "%s: unknown version op %d\n", __func__, cmd); + return -ENOSYS; + } + + desc = xencomm_map_no_alloc(arg, argsize); + if (desc == NULL) + return -EINVAL; + + return xencomm_arch_hypercall_xen_version(cmd, desc); +} +EXPORT_SYMBOL_GPL(xencomm_hypercall_xen_version); + +int +xencomm_hypercall_physdev_op(int cmd, void *op) +{ + unsigned int argsize; + + switch (cmd) { + case PHYSDEVOP_apic_read: + case PHYSDEVOP_apic_write: + argsize = sizeof(struct physdev_apic); + break; + case PHYSDEVOP_alloc_irq_vector: + case PHYSDEVOP_free_irq_vector: + argsize = sizeof(struct physdev_irq); + break; + case PHYSDEVOP_irq_status_query: + argsize = sizeof(struct physdev_irq_status_query); + break; + + default: + printk(KERN_DEBUG + "%s: unknown physdev op %d\n", __func__, cmd); + return -ENOSYS; + } + + return xencomm_arch_hypercall_physdev_op + (cmd, xencomm_map_no_alloc(op, argsize)); +} + +static int +xencommize_grant_table_op(struct xencomm_mini **xc_area, + unsigned int cmd, void *op, unsigned int count, + struct xencomm_handle **desc) +{ + struct xencomm_handle *desc1; + unsigned int argsize; + + switch (cmd) { + case GNTTABOP_map_grant_ref: + argsize = sizeof(struct gnttab_map_grant_ref); + break; + case GNTTABOP_unmap_grant_ref: + argsize = sizeof(struct gnttab_unmap_grant_ref); + break; + case GNTTABOP_setup_table: + { + struct gnttab_setup_table *setup = op; + + argsize = sizeof(*setup); + + if (count != 1) + return -EINVAL; + desc1 = __xencomm_map_no_alloc + (xen_guest_handle(setup->frame_list), + setup->nr_frames * + sizeof(*xen_guest_handle(setup->frame_list)), + *xc_area); + if (desc1 == NULL) + return -EINVAL; + (*xc_area)++; + set_xen_guest_handle(setup->frame_list, (void *)desc1); + break; + } + case GNTTABOP_dump_table: + argsize = sizeof(struct gnttab_dump_table); + break; + case GNTTABOP_transfer: + argsize = sizeof(struct gnttab_transfer); + break; + case GNTTABOP_copy: + argsize = sizeof(struct gnttab_copy); + break; + case GNTTABOP_query_size: + argsize = sizeof(struct gnttab_query_size); + break; + default: + printk(KERN_DEBUG "%s: unknown hypercall grant table op %d\n", + __func__, cmd); + BUG(); + } + + *desc = __xencomm_map_no_alloc(op, count * argsize, *xc_area); + if (*desc == NULL) + return -EINVAL; + (*xc_area)++; + + return 0; +} + +int +xencomm_hypercall_grant_table_op(unsigned int cmd, void *op, + unsigned int count) +{ + int rc; + struct xencomm_handle *desc; + XENCOMM_MINI_ALIGNED(xc_area, 2); + + rc = xencommize_grant_table_op(&xc_area, cmd, op, count, &desc); + if (rc) + return rc; + + return xencomm_arch_hypercall_grant_table_op(cmd, desc, count); +} +EXPORT_SYMBOL_GPL(xencomm_hypercall_grant_table_op); + +int +xencomm_hypercall_sched_op(int cmd, void *arg) +{ + struct xencomm_handle *desc; + unsigned int argsize; + + switch (cmd) { + case SCHEDOP_yield: + case SCHEDOP_block: + argsize = 0; + break; + case SCHEDOP_shutdown: + argsize = sizeof(struct sched_shutdown); + break; + case SCHEDOP_poll: + { + struct sched_poll *poll = arg; + struct xencomm_handle *ports; + + argsize = sizeof(struct sched_poll); + ports = xencomm_map_no_alloc(xen_guest_handle(poll->ports), + sizeof(*xen_guest_handle(poll->ports))); + + set_xen_guest_handle(poll->ports, (void *)ports); + break; + } + default: + printk(KERN_DEBUG "%s: unknown sched op %d\n", __func__, cmd); + return -ENOSYS; + } + + desc = xencomm_map_no_alloc(arg, argsize); + if (desc == NULL) + return -EINVAL; + + return xencomm_arch_hypercall_sched_op(cmd, desc); +} +EXPORT_SYMBOL_GPL(xencomm_hypercall_sched_op); + +int +xencomm_hypercall_multicall(void *call_list, int nr_calls) +{ + int rc; + int i; + struct multicall_entry *mce; + struct xencomm_handle *desc; + XENCOMM_MINI_ALIGNED(xc_area, nr_calls * 2); + + for (i = 0; i < nr_calls; i++) { + mce = (struct multicall_entry *)call_list + i; + + switch (mce->op) { + case __HYPERVISOR_update_va_mapping: + case __HYPERVISOR_mmu_update: + /* No-op on ia64. */ + break; + case __HYPERVISOR_grant_table_op: + rc = xencommize_grant_table_op + (&xc_area, + mce->args[0], (void *)mce->args[1], + mce->args[2], &desc); + if (rc) + return rc; + mce->args[1] = (unsigned long)desc; + break; + case __HYPERVISOR_memory_op: + default: + printk(KERN_DEBUG + "%s: unhandled multicall op entry op %lu\n", + __func__, mce->op); + return -ENOSYS; + } + } + + desc = xencomm_map_no_alloc(call_list, + nr_calls * sizeof(struct multicall_entry)); + if (desc == NULL) + return -EINVAL; + + return xencomm_arch_hypercall_multicall(desc, nr_calls); +} +EXPORT_SYMBOL_GPL(xencomm_hypercall_multicall); + +int +xencomm_hypercall_callback_op(int cmd, void *arg) +{ + unsigned int argsize; + switch (cmd) { + case CALLBACKOP_register: + argsize = sizeof(struct callback_register); + break; + case CALLBACKOP_unregister: + argsize = sizeof(struct callback_unregister); + break; + default: + printk(KERN_DEBUG + "%s: unknown callback op %d\n", __func__, cmd); + return -ENOSYS; + } + + return xencomm_arch_hypercall_callback_op + (cmd, xencomm_map_no_alloc(arg, argsize)); +} + +static int +xencommize_memory_reservation(struct xencomm_mini *xc_area, + struct xen_memory_reservation *mop) +{ + struct xencomm_handle *desc; + + desc = __xencomm_map_no_alloc(xen_guest_handle(mop->extent_start), + mop->nr_extents * + sizeof(*xen_guest_handle(mop->extent_start)), + xc_area); + if (desc == NULL) + return -EINVAL; + + set_xen_guest_handle(mop->extent_start, (void *)desc); + return 0; +} + +int +xencomm_hypercall_memory_op(unsigned int cmd, void *arg) +{ + GUEST_HANDLE(xen_pfn_t) extent_start_va[2] = {{NULL}, {NULL}}; + struct xen_memory_reservation *xmr = NULL; + int rc; + struct xencomm_handle *desc; + unsigned int argsize; + XENCOMM_MINI_ALIGNED(xc_area, 2); + + switch (cmd) { + case XENMEM_increase_reservation: + case XENMEM_decrease_reservation: + case XENMEM_populate_physmap: + xmr = (struct xen_memory_reservation *)arg; + set_xen_guest_handle(extent_start_va[0], + xen_guest_handle(xmr->extent_start)); + + argsize = sizeof(*xmr); + rc = xencommize_memory_reservation(xc_area, xmr); + if (rc) + return rc; + xc_area++; + break; + + case XENMEM_maximum_ram_page: + argsize = 0; + break; + + case XENMEM_add_to_physmap: + argsize = sizeof(struct xen_add_to_physmap); + break; + + default: + printk(KERN_DEBUG "%s: unknown memory op %d\n", __func__, cmd); + return -ENOSYS; + } + + desc = xencomm_map_no_alloc(arg, argsize); + if (desc == NULL) + return -EINVAL; + + rc = xencomm_arch_hypercall_memory_op(cmd, desc); + + switch (cmd) { + case XENMEM_increase_reservation: + case XENMEM_decrease_reservation: + case XENMEM_populate_physmap: + set_xen_guest_handle(xmr->extent_start, + xen_guest_handle(extent_start_va[0])); + break; + } + + return rc; +} +EXPORT_SYMBOL_GPL(xencomm_hypercall_memory_op); + +int +xencomm_hypercall_suspend(unsigned long srec) +{ + struct sched_shutdown arg; + + arg.reason = SHUTDOWN_suspend; + + return xencomm_arch_hypercall_suspend( + xencomm_map_no_alloc(&arg, sizeof(arg))); +} + +long +xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg) +{ + unsigned int argsize; + switch (cmd) { + case VCPUOP_register_runstate_memory_area: { + struct vcpu_register_runstate_memory_area *area = + (struct vcpu_register_runstate_memory_area *)arg; + argsize = sizeof(*arg); + set_xen_guest_handle(area->addr.h, + (void *)xencomm_map_no_alloc(area->addr.v, + sizeof(area->addr.v))); + break; + } + + default: + printk(KERN_DEBUG "%s: unknown vcpu op %d\n", __func__, cmd); + return -ENOSYS; + } + + return xencomm_arch_hypercall_vcpu_op(cmd, cpu, + xencomm_map_no_alloc(arg, argsize)); +} + +long +xencomm_hypercall_opt_feature(void *arg) +{ + return xencomm_arch_hypercall_opt_feature( + xencomm_map_no_alloc(arg, + sizeof(struct xen_ia64_opt_feature))); +} + +int +xencomm_hypercall_fpswa_revision(unsigned int *revision) +{ + struct xencomm_handle *desc; + + desc = xencomm_map_no_alloc(revision, sizeof(*revision)); + if (desc == NULL) + return -EINVAL; + + return xencomm_arch_hypercall_fpswa_revision(desc); +} +EXPORT_SYMBOL_GPL(xencomm_hypercall_fpswa_revision); diff --git a/arch/ia64/xen/xencomm.c b/arch/ia64/xen/xencomm.c new file mode 100644 index 0000000..6e9da66 --- /dev/null +++ b/arch/ia64/xen/xencomm.c @@ -0,0 +1,108 @@ +/* + * Copyright (C) 2006 Hollis Blanchard <ho...@us...>, IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/gfp.h> +#include <linux/mm.h> +#include <xen/interface/xen.h> +#include <asm/page.h> + +#ifdef HAVE_XEN_PLATFORM_COMPAT_H +#include <xen/platform-compat.h> +#endif + +#include <asm/xen/xencomm.h> + +static unsigned long kernel_start_pa; + +void +xencomm_initialize(void) +{ + kernel_start_pa = KERNEL_START - ia64_tpa(KERNEL_START); +} + +/* Translate virtual address to physical address. */ +unsigned long +xencomm_vtop(unsigned long vaddr) +{ +#ifndef CONFIG_VMX_GUEST + struct page *page; + struct vm_area_struct *vma; +#endif + + if (vaddr == 0) + return 0; + +#ifdef __ia64__ + if (REGION_NUMBER(vaddr) == 5) { + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *ptep; + + /* On ia64, TASK_SIZE refers to current. It is not initialized + during boot. + Furthermore the kernel is relocatable and __pa() doesn't + work on addresses. */ + if (vaddr >= KERNEL_START + && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE)) + return vaddr - kernel_start_pa; + + /* In kernel area -- virtually mapped. */ + pgd = pgd_offset_k(vaddr); + if (pgd_none(*pgd) || pgd_bad(*pgd)) + return ~0UL; + + pud = pud_offset(pgd, vaddr); + if (pud_none(*pud) || pud_bad(*pud)) + return ~0UL; + + pmd = pmd_offset(pud, vaddr); + if (pmd_none(*pmd) || pmd_bad(*pmd)) + return ~0UL; + + ptep = pte_offset_kernel(pmd, vaddr); + if (!ptep) + return ~0UL; + + return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK); + } +#endif + + if (vaddr > TASK_SIZE) { + /* kernel address */ + return __pa(vaddr); + } + + +#ifdef CONFIG_VMX_GUEST + /* No privcmd within vmx guest. */ + return ~0UL; +#else + /* XXX double-check (lack of) locking */ + vma = find_extend_vma(current->mm, vaddr); + if (!vma) + return ~0UL; + + /* We assume the page is modified. */ + page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH); + if (!page) + return ~0UL; + + return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK); +#endif +} diff --git a/include/asm-ia64/xen/xcom_hcall.h b/include/asm-ia64/xen/xcom_hcall.h new file mode 100644 index 0000000..8b1f74e --- /dev/null +++ b/include/asm-ia64/xen/xcom_hcall.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2006 Tristan Gingold <tri...@bu...>, Bull SAS + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_IA64_XEN_XCOM_HCALL_H +#define _ASM_IA64_XEN_XCOM_HCALL_H + +/* These function creates inline or mini descriptor for the parameters and + calls the corresponding xencomm_arch_hypercall_X. + Architectures should defines HYPERVISOR_xxx as xencomm_hypercall_xxx unless + they want to use their own wrapper. */ +extern int xencomm_hypercall_console_io(int cmd, int count, char *str); + +extern int xencomm_hypercall_event_channel_op(int cmd, void *op); + +extern int xencomm_hypercall_xen_version(int cmd, void *arg); + +extern int xencomm_hypercall_physdev_op(int cmd, void *op); + +extern int xencomm_hypercall_grant_table_op(unsigned int cmd, void *op, + unsigned int count); + +extern int xencomm_hypercall_sched_op(int cmd, void *arg); + +extern int xencomm_hypercall_multicall(void *call_list, int nr_calls); + +extern int xencomm_hypercall_callback_op(int cmd, void *arg); + +extern int xencomm_hypercall_memory_op(unsigned int cmd, void *arg); + +extern unsigned long xencomm_hypercall_hvm_op(int cmd, void *arg); + +extern int xencomm_hypercall_suspend(unsigned long srec); + +extern long xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg); + +extern long xencomm_hypercall_opt_feature(void *arg); + +extern int xencomm_hypercall_kexec_op(int cmd, void *arg); + +#endif /* _ASM_IA64_XEN_XCOM_HCALL_H */ diff --git a/include/asm-ia64/xen/xencomm.h b/include/asm-ia64/xen/xencomm.h new file mode 100644 index 0000000..e95db51 --- /dev/null +++ b/include/asm-ia64/xen/xencomm.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2006 Hollis Blanchard <ho...@us...>, IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_IA64_XEN_XENCOMM_H +#define _ASM_IA64_XEN_XENCOMM_H + +#define is_kernel_addr(x) \ + ((PAGE_OFFSET <= (x) && \ + (x) < (PAGE_OFFSET + (1UL << IA64_MAX_PHYS_BITS))) || \ + (KERNEL_START <= (x) && \ + (x) < KERNEL_START + KERNEL_TR_PAGE_SIZE)) + +/* Must be called before any hypercall. */ +extern void xencomm_initialize(void); + +#include <xen/xencomm.h> + +#endif /* _ASM_IA64_XEN_XENCOMM_H */ -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:24
|
Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/kernel/iosapic.c | 43 +++++++++++++++++++++++++++---------------- arch/ia64/kernel/paravirt.c | 30 ++++++++++++++++++++++++++++++ include/asm-ia64/iosapic.h | 18 ++++++++++++++++-- include/asm-ia64/paravirt.h | 40 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 113 insertions(+), 18 deletions(-) diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 7b32922..7380d6d 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -587,6 +587,15 @@ static inline int irq_is_shared (int irq) return (iosapic_intr_info[irq].count > 1); } +struct irq_chip* +native_iosapic_get_irq_chip(unsigned long trigger) +{ + if (trigger == IOSAPIC_EDGE) + return &irq_type_iosapic_edge; + else + return &irq_type_iosapic_level; +} + static int register_intr (unsigned int gsi, int irq, unsigned char delivery, unsigned long polarity, unsigned long trigger) @@ -637,13 +646,10 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery, iosapic_intr_info[irq].dmode = delivery; iosapic_intr_info[irq].trigger = trigger; - if (trigger == IOSAPIC_EDGE) - irq_type = &irq_type_iosapic_edge; - else - irq_type = &irq_type_iosapic_level; + irq_type = iosapic_get_irq_chip(trigger); idesc = irq_desc + irq; - if (idesc->chip != irq_type) { + if (irq_type != NULL && idesc->chip != irq_type) { if (idesc->chip != &no_irq_type) printk(KERN_WARNING "%s: changing vector %d from %s to %s\n", @@ -976,6 +982,20 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, } void __init +native_iosapic_pcat_compat_init(void) +{ + /* + * Disable the compatibility mode interrupts (8259 style), + * needs IN/OUT support enabled. + */ + printk(KERN_INFO + "%s: Disabling PC-AT compatible 8259 interrupts\n", + __FUNCTION__); + outb(0xff, 0xA1); + outb(0xff, 0x21); +} + +void __init iosapic_system_init (int system_pcat_compat) { int irq; @@ -989,17 +1009,8 @@ iosapic_system_init (int system_pcat_compat) } pcat_compat = system_pcat_compat; - if (pcat_compat) { - /* - * Disable the compatibility mode interrupts (8259 style), - * needs IN/OUT support enabled. - */ - printk(KERN_INFO - "%s: Disabling PC-AT compatible 8259 interrupts\n", - __FUNCTION__); - outb(0xff, 0xA1); - outb(0xff, 0x21); - } + if (pcat_compat) + iosapic_pcat_compat_init(); } static inline int diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c index 4282b00..7e6a2d0 100644 --- a/arch/ia64/kernel/paravirt.c +++ b/arch/ia64/kernel/paravirt.c @@ -22,6 +22,12 @@ #include <linux/init.h> +#include <linux/compiler.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/types.h> + +#include <asm/iosapic.h> #include <asm/paravirt.h> /*************************************************************************** @@ -40,3 +46,27 @@ struct pv_info pv_info = { struct pv_init_ops pv_init_ops; +/*************************************************************************** + * pv_iosapic_ops + * iosapic read/write hooks. + */ + +static unsigned int +native_iosapic_read(char __iomem *iosapic, unsigned int reg) +{ + return __native_iosapic_read(iosapic, reg); +} + +static void +native_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) +{ + __native_iosapic_write(iosapic, reg, val); +} + +struct pv_iosapic_ops pv_iosapic_ops = { + .pcat_compat_init = native_iosapic_pcat_compat_init, + .get_irq_chip = native_iosapic_get_irq_chip, + + .__read = native_iosapic_read, + .__write = native_iosapic_write, +}; diff --git a/include/asm-ia64/iosapic.h b/include/asm-ia64/iosapic.h index a3a4288..73ee754 100644 --- a/include/asm-ia64/iosapic.h +++ b/include/asm-ia64/iosapic.h @@ -55,13 +55,27 @@ #define NR_IOSAPICS 256 -static inline unsigned int __iosapic_read(char __iomem *iosapic, unsigned int reg) +#ifdef CONFIG_PARAVIRT_GUEST +#include <asm/paravirt.h> +#else +#define iosapic_pcat_compat_init native_iosapic_pcat_compat_init +#define __iosapic_read __native_iosapic_read +#define __iosapic_write __native_iosapic_write +#define iosapic_get_irq_chip native_iosapic_get_irq_chip +#endif + +extern void __init native_iosapic_pcat_compat_init(void); +extern struct irq_chip *native_iosapic_get_irq_chip(unsigned long trigger); + +static inline unsigned int +__native_iosapic_read(char __iomem *iosapic, unsigned int reg) { writel(reg, iosapic + IOSAPIC_REG_SELECT); return readl(iosapic + IOSAPIC_WINDOW); } -static inline void __iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) +static inline void +__native_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) { writel(reg, iosapic + IOSAPIC_REG_SELECT); writel(val, iosapic + IOSAPIC_WINDOW); diff --git a/include/asm-ia64/paravirt.h b/include/asm-ia64/paravirt.h index dd585fc..9efeda9 100644 --- a/include/asm-ia64/paravirt.h +++ b/include/asm-ia64/paravirt.h @@ -152,6 +152,46 @@ paravirt_inst_patch_module(struct paravirt_alt_inst_patch *start, pv_init_ops.inst_patch_module(start, end); } +/****************************************************************************** + * replacement of iosapic operations. + */ + +struct pv_iosapic_ops { + void (*pcat_compat_init)(void); + + struct irq_chip *(*get_irq_chip)(unsigned long trigger); + + unsigned int (*__read)(char __iomem *iosapic, unsigned int reg); + void (*__write)(char __iomem *iosapic, unsigned int reg, u32 val); +}; + +extern struct pv_iosapic_ops pv_iosapic_ops; + +static inline void +iosapic_pcat_compat_init(void) +{ + if (pv_iosapic_ops.pcat_compat_init) + pv_iosapic_ops.pcat_compat_init(); +} + +static inline struct irq_chip* +iosapic_get_irq_chip(unsigned long trigger) +{ + return pv_iosapic_ops.get_irq_chip(trigger); +} + +static inline unsigned int +__iosapic_read(char __iomem *iosapic, unsigned int reg) +{ + return pv_iosapic_ops.__read(iosapic, reg); +} + +static inline void +__iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) +{ + return pv_iosapic_ops.__write(iosapic, reg, val); +} + #endif /* __ASSEMBLY__ */ #else -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:24
|
Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/kernel/ivt.S | 153 ++++++++++++++++++++++++----------------------- 1 files changed, 78 insertions(+), 75 deletions(-) diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 34f44d8..d1cebe5 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S @@ -12,6 +12,13 @@ * * 00/08/23 Asit Mallick <asi...@in...> TLB handling for SMP * 00/12/20 David Mosberger-Tang <da...@hp...> DTLB/ITLB handler now uses virtual PT. + * + * Copyright (C) 2005 Hewlett-Packard Co + * Dan Magenheimer <dan...@hp...> + * Xen paravirtualization + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * pv_ops. */ /* * This file defines the interruption vector table used by the CPU. @@ -68,6 +75,7 @@ # define DBG_FAULT(i) #endif +#include "inst_paravirt.h" #include "minstate.h" #define FAULT(n) \ @@ -102,13 +110,13 @@ ENTRY(vhpt_miss) * - the faulting virtual address uses unimplemented address bits * - the faulting virtual address has no valid page table mapping */ - mov r16=cr.ifa // get address that caused the TLB miss + MOV_FROM_IFA(r16) // get address that caused the TLB miss #ifdef CONFIG_HUGETLB_PAGE movl r18=PAGE_SHIFT - mov r25=cr.itir + MOV_FROM_ITIR(r25) #endif ;; - rsm psr.dt // use physical addressing for data + RSM_PSR_DT // use physical addressing for data mov r31=pr // save the predicate registers mov r19=IA64_KR(PT_BASE) // get page table base address shl r21=r16,3 // shift bit 60 into sign bit @@ -168,21 +176,20 @@ ENTRY(vhpt_miss) dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr) ;; (p7) ld8 r18=[r21] // read *pte - mov r19=cr.isr // cr.isr bit 32 tells us if this is an insn miss + MOV_FROM_ISR(r19) // cr.isr bit 32 tells us if this is an insn miss ;; (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared? - mov r22=cr.iha // get the VHPT address that caused the TLB miss + MOV_FROM_IHA(r22) // get the VHPT address that caused the TLB miss ;; // avoid RAW on p7 (p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss? dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address ;; -(p10) itc.i r18 // insert the instruction TLB entry -(p11) itc.d r18 // insert the data TLB entry + ITC_I_AND_D(p10, p11, r18, r24) // insert the instruction TLB entry and + // insert the data TLB entry (p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault) - mov cr.ifa=r22 - + MOV_TO_IFA(r22, r24) #ifdef CONFIG_HUGETLB_PAGE -(p8) mov cr.itir=r25 // change to default page-size for VHPT + MOV_TO_ITIR(p8, r25, r24) // change to default page-size for VHPT #endif /* @@ -192,7 +199,7 @@ ENTRY(vhpt_miss) */ adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23 ;; -(p7) itc.d r24 + ITC_D(p7, r24, r25) ;; #ifdef CONFIG_SMP /* @@ -234,7 +241,7 @@ ENTRY(vhpt_miss) #endif mov pr=r31,-1 // restore predicate registers - rfi + RFI END(vhpt_miss) .org ia64_ivt+0x400 @@ -248,11 +255,11 @@ ENTRY(itlb_miss) * mode, walk the page table, and then re-execute the PTE read and * go on normally after that. */ - mov r16=cr.ifa // get virtual address + MOV_FROM_IFA(r16) // get virtual address mov r29=b0 // save b0 mov r31=pr // save predicates .itlb_fault: - mov r17=cr.iha // get virtual address of PTE + MOV_FROM_IHA(r17) // get virtual address of PTE movl r30=1f // load nested fault continuation point ;; 1: ld8 r18=[r17] // read *pte @@ -261,7 +268,7 @@ ENTRY(itlb_miss) tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? (p6) br.cond.spnt page_fault ;; - itc.i r18 + ITC_I(p0, r18, r19) ;; #ifdef CONFIG_SMP /* @@ -278,7 +285,7 @@ ENTRY(itlb_miss) (p7) ptc.l r16,r20 #endif mov pr=r31,-1 - rfi + RFI END(itlb_miss) .org ia64_ivt+0x0800 @@ -292,11 +299,11 @@ ENTRY(dtlb_miss) * mode, walk the page table, and then re-execute the PTE read and * go on normally after that. */ - mov r16=cr.ifa // get virtual address + MOV_FROM_IFA(r16) // get virtual address mov r29=b0 // save b0 mov r31=pr // save predicates dtlb_fault: - mov r17=cr.iha // get virtual address of PTE + MOV_FROM_IHA(r17) // get virtual address of PTE movl r30=1f // load nested fault continuation point ;; 1: ld8 r18=[r17] // read *pte @@ -305,7 +312,7 @@ dtlb_fault: tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? (p6) br.cond.spnt page_fault ;; - itc.d r18 + ITC_D(p0, r18, r19) ;; #ifdef CONFIG_SMP /* @@ -322,7 +329,7 @@ dtlb_fault: (p7) ptc.l r16,r20 #endif mov pr=r31,-1 - rfi + RFI END(dtlb_miss) .org ia64_ivt+0x0c00 @@ -330,9 +337,9 @@ END(dtlb_miss) // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) ENTRY(alt_itlb_miss) DBG_FAULT(3) - mov r16=cr.ifa // get address that caused the TLB miss + MOV_FROM_IFA(r16) // get address that caused the TLB miss movl r17=PAGE_KERNEL - mov r21=cr.ipsr + MOV_FROM_IPSR(r21) movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) mov r31=pr ;; @@ -341,9 +348,9 @@ ENTRY(alt_itlb_miss) ;; cmp.gt p8,p0=6,r22 // user mode ;; -(p8) thash r17=r16 + THASH(p8, r17, r16, r23) ;; -(p8) mov cr.iha=r17 + MOV_TO_IHA(p8, r17, r23) (p8) mov r29=b0 // save b0 (p8) br.cond.dptk .itlb_fault #endif @@ -358,9 +365,9 @@ ENTRY(alt_itlb_miss) or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 (p8) br.cond.spnt page_fault ;; - itc.i r19 // insert the TLB entry + ITC_I(p0, r19, r18) // insert the TLB entry mov pr=r31,-1 - rfi + RFI END(alt_itlb_miss) .org ia64_ivt+0x1000 @@ -368,11 +375,11 @@ END(alt_itlb_miss) // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) ENTRY(alt_dtlb_miss) DBG_FAULT(4) - mov r16=cr.ifa // get address that caused the TLB miss + MOV_FROM_IFA(r16) // get address that caused the TLB miss movl r17=PAGE_KERNEL - mov r20=cr.isr + MOV_FROM_ISR(r20) movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) - mov r21=cr.ipsr + MOV_FROM_IPSR(r21) mov r31=pr mov r24=PERCPU_ADDR ;; @@ -381,9 +388,9 @@ ENTRY(alt_dtlb_miss) ;; cmp.gt p8,p0=6,r22 // access to region 0-5 ;; -(p8) thash r17=r16 + THASH(p8, r17, r16, r25) ;; -(p8) mov cr.iha=r17 + MOV_TO_IHA(r17, r25) (p8) mov r29=b0 // save b0 (p8) br.cond.dptk dtlb_fault #endif @@ -402,7 +409,7 @@ ENTRY(alt_dtlb_miss) tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? ;; (p10) sub r19=r19,r26 -(p10) mov cr.itir=r25 + MOV_TO_ITIR(p10, r25, r24) cmp.ne p8,p0=r0,r23 (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field (p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr @@ -413,9 +420,9 @@ ENTRY(alt_dtlb_miss) or r19=r19,r17 // insert PTE control bits into r19 (p6) mov cr.ipsr=r21 ;; -(p7) itc.d r19 // insert the TLB entry + ITC_D(p7, r19, r18) // insert the TLB entry mov pr=r31,-1 - rfi + RFI END(alt_dtlb_miss) .org ia64_ivt+0x1400 @@ -444,10 +451,10 @@ ENTRY(nested_dtlb_miss) * * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared) */ - rsm psr.dt // switch to using physical data addressing + RSM_PSR_DT_AND_SRLZ_I // switch to using physical data addressing mov r19=IA64_KR(PT_BASE) // get the page table base address shl r21=r16,3 // shift bit 60 into sign bit - mov r18=cr.itir + MOV_FROM_ITIR(r18) ;; shr.u r17=r16,61 // get the region number into r17 extr.u r18=r18,2,6 // get the faulting page size @@ -510,21 +517,15 @@ END(ikey_miss) //----------------------------------------------------------------------------------- // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address) ENTRY(page_fault) - ssm psr.dt - ;; - srlz.i + SSM_PSR_DT_AND_SRLZ_I ;; SAVE_MIN_WITH_COVER alloc r15=ar.pfs,0,0,3,0 - mov out0=cr.ifa - mov out1=cr.isr + MOV_FROM_IFA(out0) + MOV_FROM_ISR(out1) + SSM_PSR_IC_AND_DEFAULT_BITS(r14, r3) adds r3=8,r2 // set up second base pointer - ;; - ssm psr.ic | PSR_DEFAULT_BITS - ;; - srlz.i // guarantee that interruption collectin is on - ;; -(p15) ssm psr.i // restore psr.i + SSM_PSR_I(p15, r14) // restore psr.i movl r14=ia64_leave_kernel ;; SAVE_REST @@ -556,10 +557,10 @@ ENTRY(dirty_bit) * page table TLB entry isn't present, we take a nested TLB miss hit where we look * up the physical address of the L3 PTE and then continue at label 1 below. */ - mov r16=cr.ifa // get the address that caused the fault + MOV_FROM_IFA(r16) // get the address that caused the fault movl r30=1f // load continuation point in case of nested fault ;; - thash r17=r16 // compute virtual address of L3 PTE + THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE mov r29=b0 // save b0 in case of nested fault mov r31=pr // save pr #ifdef CONFIG_SMP @@ -576,7 +577,7 @@ ENTRY(dirty_bit) ;; (p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present ;; -(p6) itc.d r25 // install updated PTE + ITC_D(p6, r25, r18) // install updated PTE ;; /* * Tell the assemblers dependency-violation checker that the above "itc" instructions @@ -602,7 +603,7 @@ ENTRY(dirty_bit) itc.d r18 // install updated PTE #endif mov pr=r31,-1 // restore pr - rfi + RFI END(dirty_bit) .org ia64_ivt+0x2400 @@ -611,7 +612,7 @@ END(dirty_bit) ENTRY(iaccess_bit) DBG_FAULT(9) // Like Entry 8, except for instruction access - mov r16=cr.ifa // get the address that caused the fault + MOV_FROM_IFA(r16) // get the address that caused the fault movl r30=1f // load continuation point in case of nested fault mov r31=pr // save predicates #ifdef CONFIG_ITANIUM @@ -626,7 +627,7 @@ ENTRY(iaccess_bit) (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa #endif /* CONFIG_ITANIUM */ ;; - thash r17=r16 // compute virtual address of L3 PTE + THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE mov r29=b0 // save b0 in case of nested fault) #ifdef CONFIG_SMP mov r28=ar.ccv // save ar.ccv @@ -642,7 +643,7 @@ ENTRY(iaccess_bit) ;; (p6) cmp.eq p6,p7=r26,r18 // Only if page present ;; -(p6) itc.i r25 // install updated PTE + ITC_I(p6, r25, r26) // install updated PTE ;; /* * Tell the assemblers dependency-violation checker that the above "itc" instructions @@ -668,7 +669,7 @@ ENTRY(iaccess_bit) itc.i r18 // install updated PTE #endif /* !CONFIG_SMP */ mov pr=r31,-1 - rfi + RFI END(iaccess_bit) .org ia64_ivt+0x2800 @@ -677,10 +678,10 @@ END(iaccess_bit) ENTRY(daccess_bit) DBG_FAULT(10) // Like Entry 8, except for data access - mov r16=cr.ifa // get the address that caused the fault + MOV_FROM_IFA(r16) // get the address that caused the fault movl r30=1f // load continuation point in case of nested fault ;; - thash r17=r16 // compute virtual address of L3 PTE + THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE mov r31=pr mov r29=b0 // save b0 in case of nested fault) #ifdef CONFIG_SMP @@ -697,7 +698,7 @@ ENTRY(daccess_bit) ;; (p6) cmp.eq p6,p7=r26,r18 // Only if page is present ;; -(p6) itc.d r25 // install updated PTE + ITC_D(p6, r25, r26) // install updated PTE /* * Tell the assemblers dependency-violation checker that the above "itc" instructions * cannot possibly affect the following loads: @@ -721,7 +722,7 @@ ENTRY(daccess_bit) #endif mov b0=r29 // restore b0 mov pr=r31,-1 - rfi + RFI END(daccess_bit) .org ia64_ivt+0x2c00 @@ -745,10 +746,10 @@ ENTRY(break_fault) */ DBG_FAULT(11) mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc) - mov r29=cr.ipsr // M2 (12 cyc) + MOV_FROM_IPSR(r29) // M2 (12 cyc) mov r31=pr // I0 (2 cyc) - mov r17=cr.iim // M2 (2 cyc) + MOV_FROM_IIM(r17) // M2 (2 cyc) mov.m r27=ar.rsc // M2 (12 cyc) mov r18=__IA64_BREAK_SYSCALL // A @@ -767,7 +768,7 @@ ENTRY(break_fault) nop.m 0 movl r30=sys_call_table // X - mov r28=cr.iip // M2 (2 cyc) + MOV_FROM_IIP(r28) // M2 (2 cyc) cmp.eq p0,p7=r18,r17 // I0 is this a system call? (p7) br.cond.spnt non_syscall // B no -> // @@ -831,10 +832,10 @@ ENTRY(break_fault) 1: mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 nop 0 - bsw.1 // B (6 cyc) regs are saved, switch to bank 1 + BSW_1(r2, r14) // B (6 cyc) regs are saved, switch to bank 1 ;; - ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection + SSM_PSR_IC_AND_DEFAULT_BITS(r3, r16) // M2 now it's safe to re-enable intr.-collection movl r3=ia64_ret_from_syscall // X ;; @@ -842,7 +843,7 @@ ENTRY(break_fault) mov rp=r3 // I0 set the real return addr (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT -(p15) ssm psr.i // M2 restore psr.i + SSM_PSR_I(p15, r16) // M2 restore psr.i (p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr) br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic // NOT REACHED @@ -866,7 +867,7 @@ ENTRY(interrupt) mov r31=pr // prepare to save predicates ;; SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 - ssm psr.ic | PSR_DEFAULT_BITS + SSM_PSR_IC_AND_DEFAULT_BITS(r3, r14) ;; adds r3=8,r2 // set up second base pointer for SAVE_REST srlz.i // ensure everybody knows psr.ic is back on @@ -875,7 +876,7 @@ ENTRY(interrupt) ;; MCA_RECOVER_RANGE(interrupt) alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group - mov out0=cr.ivr // pass cr.ivr as first arg + MOV_FROM_IVR(out0, r8) // pass cr.ivr as first arg add out1=16,sp // pass pointer to pt_regs as second arg ;; srlz.d // make sure we see the effect of cr.ivr @@ -944,6 +945,7 @@ END(interrupt) * - ar.fpsr: set to kernel settings * - b6: preserved (same as on entry) */ +#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE GLOBAL_ENTRY(ia64_syscall_setup) #if PT(B6) != 0 # error This code assumes that b6 is the first field in pt_regs. @@ -1035,6 +1037,7 @@ GLOBAL_ENTRY(ia64_syscall_setup) (p10) mov r8=-EINVAL br.ret.sptk.many b7 END(ia64_syscall_setup) +#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ .org ia64_ivt+0x3c00 ///////////////////////////////////////////////////////////////////////////////////////// @@ -1181,10 +1184,10 @@ ENTRY(dispatch_to_fault_handler) SAVE_MIN_WITH_COVER_R19 alloc r14=ar.pfs,0,0,5,0 mov out0=r15 - mov out1=cr.isr - mov out2=cr.ifa - mov out3=cr.iim - mov out4=cr.itir + MOV_FROM_ISR(out1) + MOV_FROM_IFA(out2) + MOV_FROM_IIM(out3) + MOV_FROM_ITIR(out4) ;; ssm psr.ic | PSR_DEFAULT_BITS ;; @@ -1255,8 +1258,8 @@ END(iaccess_rights) // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) ENTRY(daccess_rights) DBG_FAULT(23) - mov r16=cr.ifa - rsm psr.dt + MOV_FROM_IFA(r16) + RSM_PSR_DT mov r31=pr ;; srlz.d @@ -1352,7 +1355,7 @@ ENTRY(speculation_vector) mov cr.ipsr=r16 ;; - rfi // and go back + RFI END(speculation_vector) .org ia64_ivt+0x5800 @@ -1506,7 +1509,7 @@ ENTRY(ia32_intercept) (p6) br.cond.spnt 1f // eflags.ac bit didn't change ;; mov pr=r31,-1 // restore predicate registers - rfi + RFI 1: #endif // CONFIG_IA32_SUPPORT -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:24
|
Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/kernel/switch_leave.S | 80 +++++++++++++++++++++----------------- 1 files changed, 44 insertions(+), 36 deletions(-) diff --git a/arch/ia64/kernel/switch_leave.S b/arch/ia64/kernel/switch_leave.S index 9918160..d6d0f08 100644 --- a/arch/ia64/kernel/switch_leave.S +++ b/arch/ia64/kernel/switch_leave.S @@ -44,16 +44,17 @@ #include <asm/pgtable.h> #include <asm/thread_info.h> +#include "inst_paravirt.h" #include "minstate.h" - /* * prev_task <- ia64_switch_to(struct task_struct *next) * With Ingo's new scheduler, interrupts are disabled when this routine gets * called. The code starting at .map relies on this. The rest of the code * doesn't care about the interrupt masking status. */ -GLOBAL_ENTRY(native_switch_to) +GLOBAL_ENTRY(__paravirt_switch_to) + BR_IF_NATIVE(native_switch_to, r22, p7) .prologue alloc r16=ar.pfs,1,0,0,0 DO_SAVE_SWITCH_STACK @@ -77,7 +78,7 @@ GLOBAL_ENTRY(native_switch_to) ;; .done: ld8 sp=[r21] // load kernel stack pointer of new task - mov IA64_KR(CURRENT)=in0 // update "current" application register + MOV_TO_KR(CURRENT, in0, r8, r9) // update "current" application register mov r8=r13 // return pointer to previously running task mov r13=in0 // set "current" pointer ;; @@ -89,25 +90,30 @@ GLOBAL_ENTRY(native_switch_to) br.ret.sptk.many rp // boogie on out in new context .map: - rsm psr.ic // interrupts (psr.i) are already disabled here + RSM_PSR_IC(r25) // interrupts (psr.i) are already disabled here movl r25=PAGE_KERNEL ;; srlz.d or r23=r25,r20 // construct PA | page properties mov r25=IA64_GRANULE_SHIFT<<2 ;; - mov cr.itir=r25 - mov cr.ifa=in0 // VA of next task... + MOV_TO_ITIR(p0, r25, r8) + MOV_TO_IFA(in0, r8) // VA of next task... ;; mov r25=IA64_TR_CURRENT_STACK - mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped... + MOV_TO_KR(CURRENT_STACK, r26, r8, r9) // remember last page we mapped... ;; itr.d dtr[r25]=r23 // wire in new mapping... - ssm psr.ic // reenable the psr.ic bit - ;; - srlz.d + SSM_PSR_IC_AND_SRLZ_D(r8, r9) // reenable the psr.ic bit br.cond.sptk .done -END(native_switch_to) +END(__paravirt_switch_to) + +#ifdef IA64_ASM_PARAVIRTUALIZED_XEN +GLOBAL_ENTRY(xen_work_processed_syscall_with_check) + BR_IF_NATIVE(native_work_processed_syscall, r2, p7) + br.cond.sptk xen_work_processed_syscall +END(xen_work_processed_syscall_with_check) +#endif /* IA64_ASM_PARAVIRTUALIZED_XEN */ /* * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't @@ -153,8 +159,9 @@ END(native_switch_to) * ar.csd: cleared * ar.ssd: cleared */ -GLOBAL_ENTRY(native_leave_syscall) +GLOBAL_ENTRY(__paravirt_leave_syscall) PT_REGS_UNWIND_INFO(0) + BR_IF_NATIVE(native_leave_syscall, r22, p7) /* * work.need_resched etc. mustn't get changed by this CPU before it returns to * user- or fsys-mode, hence we disable interrupts early on. @@ -177,12 +184,12 @@ GLOBAL_ENTRY(native_leave_syscall) ;; cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) #else /* !CONFIG_PREEMPT */ -(pUStk) rsm psr.i + RSM_PSR_I(pUStk, r2, r18) cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk #endif -.global native_work_processed_syscall; -native_work_processed_syscall: +.global __paravirt_work_processed_syscall; +__paravirt_work_processed_syscall: adds r2=PT(LOADRS)+16,r12 adds r3=PT(AR_BSPSTORE)+16,r12 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 @@ -205,7 +212,7 @@ native_work_processed_syscall: (pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE! ;; invala // M0|1 invalidate ALAT - rsm psr.i | psr.ic // M2 turn off interrupts and interruption collection + RSM_PSR_I_IC(r28, r29, r30) // M2 turn off interrupts and interruption collection cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs ld8 r29=[r2],16 // M0|1 load cr.ipsr @@ -217,7 +224,7 @@ native_work_processed_syscall: (pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 ;; ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs -(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled + MOV_FROM_PSR(pKStk, r22, r21) // M2 read PSR now that interrupts are disabled nop 0 ;; ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0 @@ -246,7 +253,7 @@ native_work_processed_syscall: srlz.d // M0 ensure interruption collection is off (for cover) shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition - cover // B add current frame into dirty partition & set cr.ifs + COVER // B add current frame into dirty partition & set cr.ifs ;; mov r19=ar.bsp // M2 get new backing store pointer mov f10=f0 // F clear f10 @@ -261,10 +268,11 @@ native_work_processed_syscall: mov.m ar.ssd=r0 // M2 clear ar.ssd mov f11=f0 // F clear f11 br.cond.sptk.many rbs_switch // B -END(native_leave_syscall) +END(__paravirt_leave_syscall) -GLOBAL_ENTRY(native_leave_kernel) +GLOBAL_ENTRY(__paravirt_leave_kernel) PT_REGS_UNWIND_INFO(0) + BR_IF_NATIVE(native_leave_kernel, r22, p7) /* * work.need_resched etc. mustn't get changed by this CPU before it returns to * user- or fsys-mode, hence we disable interrupts early on. @@ -287,7 +295,7 @@ GLOBAL_ENTRY(native_leave_kernel) ;; cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) #else -(pUStk) rsm psr.i + RSM_PSR_I(pUStk, r17, r31) cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk #endif @@ -335,7 +343,7 @@ GLOBAL_ENTRY(native_leave_kernel) mov ar.csd=r30 mov ar.ssd=r31 ;; - rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection + RSM_PSR_I_IC(r23, r22, r25) // initiate turning off of interrupt and interruption collection invala // invalidate ALAT ;; ld8.fill r22=[r2],24 @@ -367,13 +375,13 @@ GLOBAL_ENTRY(native_leave_kernel) mov ar.ccv=r15 ;; ldf.fill f11=[r2] - bsw.0 // switch back to bank 0 (no stop bit required beforehand...) + BSW_0(r2, r3, r15) // switch back to bank 0 (no stop bit required beforehand...) ;; (pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency) adds r16=PT(CR_IPSR)+16,r12 adds r17=PT(CR_IIP)+16,r12 -(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled + MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled nop.i 0 nop.i 0 ;; @@ -421,7 +429,7 @@ GLOBAL_ENTRY(native_leave_kernel) * NOTE: alloc, loadrs, and cover can't be predicated. */ (pNonSys) br.cond.dpnt dont_preserve_current_frame - cover // add current frame into dirty partition and set cr.ifs + COVER // add current frame into dirty partition and set cr.ifs ;; mov r19=ar.bsp // get new backing store pointer rbs_switch: @@ -524,16 +532,16 @@ skip_rbs_switch: (pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp (pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise ;; - mov cr.ipsr=r29 // M2 + MOV_TO_IPSR(r29, r25) // M2 mov ar.pfs=r26 // I0 (pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise -(p9) mov cr.ifs=r30 // M2 + MOV_TO_IFS(p9, r30, r25)// M2 mov b0=r21 // I0 (pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise mov ar.fpsr=r20 // M2 - mov cr.iip=r28 // M2 + MOV_TO_IIP(r28, r25) // M2 nop 0 ;; (pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode @@ -542,7 +550,7 @@ skip_rbs_switch: mov ar.rsc=r27 // M2 mov pr=r31,-1 // I0 - rfi // B + RFI // B /* * On entry: @@ -568,28 +576,28 @@ skip_rbs_switch: #endif br.call.spnt.many rp=schedule .ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 - rsm psr.i // disable interrupts + RSM_PSR_I(p0, r2, r20) // disable interrupts ;; #ifdef CONFIG_PREEMPT (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 ;; (pKStk) st4 [r20]=r0 // preempt_count() <- 0 #endif -(pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end +(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end br.cond.sptk.many .work_processed_kernel // re-check .notify: (pUStk) br.call.spnt.many rp=notify_resume_user .ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 -(pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end +(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end br.cond.sptk.many .work_processed_kernel // don't re-check -.global ia64_work_pending_syscall_end; -ia64_work_pending_syscall_end: +.global __paravirt_pending_syscall_end; +__paravirt_pending_syscall_end: adds r2=PT(R8)+16,r12 adds r3=PT(R10)+16,r12 ;; ld8 r8=[r2] ld8 r10=[r3] - br.cond.sptk.many ia64_work_processed_syscall // re-check -END(native_leave_kernel) + br.cond.sptk.many __paravirt_work_processed_syscall_target // re-check +END(__paravirt_leave_kernel) -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:23
|
Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/kernel/irq_ia64.c | 21 ++++++++++---- arch/ia64/kernel/paravirt.c | 22 +++++++++++++++ include/asm-ia64/hw_irq.h | 20 ++++++++++--- include/asm-ia64/paravirt.h | 63 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 115 insertions(+), 11 deletions(-) diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 2b8cf6e..5259faa 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -196,7 +196,7 @@ static void clear_irq_vector(int irq) } int -assign_irq_vector (int irq) +native_assign_irq_vector (int irq) { unsigned long flags; int vector, cpu; @@ -222,7 +222,7 @@ assign_irq_vector (int irq) } void -free_irq_vector (int vector) +native_free_irq_vector (int vector) { if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR) @@ -623,7 +623,7 @@ static struct irqaction tlb_irqaction = { #endif void -register_percpu_irq (ia64_vector vec, struct irqaction *action) +native_register_percpu_irq (ia64_vector vec, struct irqaction *action) { irq_desc_t *desc; unsigned int irq; @@ -638,13 +638,21 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action) } void __init +native_init_IRQ_early(void) +{ +#ifdef CONFIG_SMP + register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); + register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction); +#endif +} + +void __init init_IRQ (void) { + paravirt_init_IRQ_early(); register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); #ifdef CONFIG_SMP register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); - register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); - register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction); #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) if (vector_domain_type != VECTOR_DOMAIN_NONE) { BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR); @@ -657,10 +665,11 @@ init_IRQ (void) pfm_init_percpu(); #endif platform_irq_init(); + paravirt_init_IRQ_late(); } void -ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect) +native_send_ipi (int cpu, int vector, int delivery_mode, int redirect) { void __iomem *ipi_addr; unsigned long ipi_data; diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c index 7e6a2d0..ce0b23b 100644 --- a/arch/ia64/kernel/paravirt.c +++ b/arch/ia64/kernel/paravirt.c @@ -70,3 +70,25 @@ struct pv_iosapic_ops pv_iosapic_ops = { .__read = native_iosapic_read, .__write = native_iosapic_write, }; + +/*************************************************************************** + * pv_irq_ops + * irq operations + */ + +void +ia64_send_ipi(int cpu, int vector, int delivery_mode, int redirect) +{ + pv_irq_ops.send_ipi(cpu, vector, delivery_mode, redirect); +} + +struct pv_irq_ops pv_irq_ops = { + .init_IRQ_early = native_init_IRQ_early, + + .assign_irq_vector = native_assign_irq_vector, + .free_irq_vector = native_free_irq_vector, + .register_percpu_irq = native_register_percpu_irq, + + .send_ipi = native_send_ipi, + .resend_irq = native_resend_irq, +}; diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h index 76366dc..678efec 100644 --- a/include/asm-ia64/hw_irq.h +++ b/include/asm-ia64/hw_irq.h @@ -104,13 +104,23 @@ DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq); extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */ +#ifdef CONFIG_PARAVIRT_GUEST +#include <asm/paravirt.h> +#else +#define assign_irq_vector native_assign_irq_vector +#define free_irq_vector native_free_irq_vector +#define ia64_send_ipi native_send_ipi +#define ia64_resend_irq native_resend_irq +#endif + +extern void native_init_IRQ_early(void); extern int bind_irq_vector(int irq, int vector, cpumask_t domain); -extern int assign_irq_vector (int irq); /* allocate a free vector */ -extern void free_irq_vector (int vector); +extern int native_assign_irq_vector (int irq); /* allocate a free vector */ +extern void native_free_irq_vector (int vector); extern int reserve_irq_vector (int vector); extern void __setup_vector_irq(int cpu); -extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect); -extern void register_percpu_irq (ia64_vector vec, struct irqaction *action); +extern void native_send_ipi (int cpu, int vector, int delivery_mode, int redirect); +extern void native_register_percpu_irq (ia64_vector vec, struct irqaction *action); extern int check_irq_used (int irq); extern void destroy_and_reserve_irq (unsigned int irq); @@ -122,7 +132,7 @@ static inline int irq_prepare_move(int irq, int cpu) { return 0; } static inline void irq_complete_move(unsigned int irq) {} #endif -static inline void ia64_resend_irq(unsigned int vector) +static inline void native_resend_irq(unsigned int vector) { platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0); } diff --git a/include/asm-ia64/paravirt.h b/include/asm-ia64/paravirt.h index 9efeda9..ace6653 100644 --- a/include/asm-ia64/paravirt.h +++ b/include/asm-ia64/paravirt.h @@ -28,6 +28,7 @@ #ifndef __ASSEMBLY__ +#include <asm/hw_irq.h> #include <asm/meminit.h> /****************************************************************************** @@ -192,6 +193,65 @@ __iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) return pv_iosapic_ops.__write(iosapic, reg, val); } +/****************************************************************************** + * replacement of irq operations. + */ + +struct pv_irq_ops { + void (*init_IRQ_early)(void); + void (*init_IRQ_late)(void); + + int (*assign_irq_vector)(int irq); + void (*free_irq_vector)(int vector); + + void (*register_percpu_irq)(ia64_vector vec, + struct irqaction *action); + + void (*send_ipi)(int cpu, int vector, int delivery_mode, int redirect); + void (*resend_irq)(unsigned int vector); +}; + +extern struct pv_irq_ops pv_irq_ops; + +static inline void +paravirt_init_IRQ_early(void) +{ + pv_irq_ops.init_IRQ_early(); +} + +static inline void +paravirt_init_IRQ_late(void) +{ + if (pv_irq_ops.init_IRQ_late) + pv_irq_ops.init_IRQ_late(); +} + +static inline int +assign_irq_vector(int irq) +{ + return pv_irq_ops.assign_irq_vector(irq); +} + +static inline void +free_irq_vector(int vector) +{ + return pv_irq_ops.free_irq_vector(vector); +} + +static inline void +register_percpu_irq(ia64_vector vec, struct irqaction *action) +{ + pv_irq_ops.register_percpu_irq(vec, action); +} + +void ia64_send_ipi(int cpu, int vector, int delivery_mode, int redirect); + +static inline void +ia64_resend_irq(unsigned int vector) +{ + pv_irq_ops.resend_irq(vector); +} + #endif /* __ASSEMBLY__ */ #else @@ -213,6 +273,9 @@ __iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) #define paravirt_bundle_patch_module(start, end) do { } while (0) #define paravirt_inst_patch_module(start, end) do { } while (0) +#define paravirt_init_IRQ_early() do { } while (0) +#define paravirt_init_IRQ_late() do { } while (0) + #endif /* __ASSEMBLY__ */ -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:23
|
Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/kernel/acpi.c | 4 ++++ arch/ia64/xen/machvec.c | 4 ++++ include/asm-ia64/machvec.h | 2 ++ include/asm-ia64/machvec_xen.h | 22 ++++++++++++++++++++++ 4 files changed, 32 insertions(+), 0 deletions(-) create mode 100644 arch/ia64/xen/machvec.c create mode 100644 include/asm-ia64/machvec_xen.h diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 78f28d8..adf475a 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -118,6 +118,8 @@ acpi_get_sysname(void) return "hpzx1"; } else if (!strcmp(hdr->oem_id, "SGI")) { return "sn2"; + } else if (is_running_on_xen() && !strcmp(hdr->oem_id, "XEN")) { + return "xen"; } return "dig"; @@ -132,6 +134,8 @@ acpi_get_sysname(void) return "sn2"; # elif defined (CONFIG_IA64_DIG) return "dig"; +# elif defined (CONFIG_IA64_XEN) + return "xen"; # else # error Unknown platform. Fix acpi.c. # endif diff --git a/arch/ia64/xen/machvec.c b/arch/ia64/xen/machvec.c new file mode 100644 index 0000000..4ad588a --- /dev/null +++ b/arch/ia64/xen/machvec.c @@ -0,0 +1,4 @@ +#define MACHVEC_PLATFORM_NAME xen +#define MACHVEC_PLATFORM_HEADER <asm/machvec_xen.h> +#include <asm/machvec_init.h> + diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h index c201a20..cea8d63 100644 --- a/include/asm-ia64/machvec.h +++ b/include/asm-ia64/machvec.h @@ -120,6 +120,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); # include <asm/machvec_hpzx1_swiotlb.h> # elif defined (CONFIG_IA64_SGI_SN2) # include <asm/machvec_sn2.h> +# elif defined (CONFIG_IA64_XEN) +# include <asm/machvec_xen.h> # elif defined (CONFIG_IA64_GENERIC) # ifdef MACHVEC_PLATFORM_HEADER diff --git a/include/asm-ia64/machvec_xen.h b/include/asm-ia64/machvec_xen.h new file mode 100644 index 0000000..ed0f84d --- /dev/null +++ b/include/asm-ia64/machvec_xen.h @@ -0,0 +1,22 @@ +#ifndef _ASM_IA64_MACHVEC_XEN_h +#define _ASM_IA64_MACHVEC_XEN_h + +extern ia64_mv_setup_t xen_setup; +extern ia64_mv_cpu_init_t xen_cpu_init; +extern ia64_mv_irq_init_t xen_irq_init; +extern ia64_mv_send_ipi_t xen_platform_send_ipi; + +/* + * This stuff has dual use! + * + * For a generic kernel, the macros are used to initialize the + * platform's machvec structure. When compiling a non-generic kernel, + * the macros are used directly. + */ +#define platform_name "xen" +#define platform_setup xen_setup +#define platform_cpu_init xen_cpu_init +#define platform_irq_init xen_irq_init +#define platform_send_ipi xen_platform_send_ipi + +#endif /* _ASM_IA64_MACHVEC_XEN_h */ -- 1.5.3 |
From: Isaku Y. <yam...@va...> - 2008-03-05 18:19:23
|
Signed-off-by: Isaku Yamahata <yam...@va...> --- arch/ia64/xen/xen_pv_ops.c | 53 ++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 53 insertions(+), 0 deletions(-) diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c index a2a7493..c35bb23 100644 --- a/arch/ia64/xen/xen_pv_ops.c +++ b/arch/ia64/xen/xen_pv_ops.c @@ -21,6 +21,7 @@ */ #include <linux/console.h> +#include <linux/irq.h> #include <linux/kernel.h> #include <linux/notifier.h> #include <linux/pm.h> @@ -251,6 +252,57 @@ static const struct pv_init_ops xen_init_ops __initdata = { /*************************************************************************** + * pv_iosapic_ops + * iosapic read/write hooks. + */ +static void +xen_pcat_compat_init(void) +{ + /* nothing */ +} + +static struct irq_chip* +xen_iosapic_get_irq_chip(unsigned long trigger) +{ + return NULL; +} + +static unsigned int +xen_iosapic_read(char __iomem *iosapic, unsigned int reg) +{ + struct physdev_apic apic_op; + int ret; + + apic_op.apic_physbase = (unsigned long)iosapic - + __IA64_UNCACHED_OFFSET; + apic_op.reg = reg; + ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op); + if (ret) + return ret; + return apic_op.value; +} + +static void +xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) +{ + struct physdev_apic apic_op; + + apic_op.apic_physbase = (unsigned long)iosapic - + __IA64_UNCACHED_OFFSET; + apic_op.reg = reg; + apic_op.value = val; + HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op); +} + +static const struct pv_iosapic_ops xen_iosapic_ops __initdata = { + .pcat_compat_init = xen_pcat_compat_init, + .get_irq_chip = xen_iosapic_get_irq_chip, + + .__read = xen_iosapic_read, + .__write = xen_iosapic_write, +}; + +/*************************************************************************** * pv_ops initialization */ @@ -260,4 +312,5 @@ xen_setup_pv_ops(void) xen_info_init(); pv_info = xen_info; pv_init_ops = xen_init_ops; + pv_iosapic_ops = xen_iosapic_ops; } -- 1.5.3 |