@@ -33,6 +33,7 @@ obj-y += hvm.o
obj-y += device.o
obj-y += decode.o
obj-y += processor.o
+obj-y += save.o
#obj-bin-y += ....o
@@ -9,31 +9,115 @@
#include <xen/lib.h>
#include <xen/errno.h>
#include <xen/sched.h>
+#include <xen/hvm/save.h>
+#include <xen/guest_access.h>
#include <xen/hypercall.h>
#include <public/domctl.h>
long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
{
+ long ret = 0;
+ bool_t copyback = 0;
+
switch ( domctl->cmd )
{
+ case XEN_DOMCTL_sethvmcontext:
+ {
+ struct hvm_domain_context c = { .size = domctl->u.hvmcontext.size };
+
+ ret = -ENOMEM;
+ if ( (c.data = xmalloc_bytes(c.size)) == NULL )
+ goto sethvmcontext_out;
+
+ ret = -EFAULT;
+ if ( copy_from_guest(c.data, domctl->u.hvmcontext.buffer, c.size) != 0)
+ goto sethvmcontext_out;
+
+ domain_pause(d);
+ ret = hvm_load(d, &c);
+ domain_unpause(d);
+
+ sethvmcontext_out:
+ if ( c.data != NULL )
+ xfree(c.data);
+ }
+ break;
+
+ case XEN_DOMCTL_gethvmcontext:
+ {
+ struct hvm_domain_context c = { 0 };
+
+ ret = -EINVAL;
+
+ c.size = hvm_save_size(d);
+
+ if ( guest_handle_is_null(domctl->u.hvmcontext.buffer) )
+ {
+ /* Client is querying for the correct buffer size */
+ domctl->u.hvmcontext.size = c.size;
+ ret = 0;
+ goto gethvmcontext_out;
+ }
+
+ /* Check that the client has a big enough buffer */
+ ret = -ENOSPC;
+ if ( domctl->u.hvmcontext.size < c.size )
+ {
+ printk("(gethvmcontext) size error: %d and %d\n",
+ domctl->u.hvmcontext.size, c.size );
+ goto gethvmcontext_out;
+ }
+
+ /* Allocate our own marshalling buffer */
+ ret = -ENOMEM;
+ if ( (c.data = xmalloc_bytes(c.size)) == NULL )
+ {
+ printk("(gethvmcontext) xmalloc_bytes failed: %d\n", c.size );
+ goto gethvmcontext_out;
+ }
+
+ domain_pause(d);
+ ret = hvm_save(d, &c);
+ domain_unpause(d);
+
+ domctl->u.hvmcontext.size = c.cur;
+ if ( copy_to_guest(domctl->u.hvmcontext.buffer, c.data, c.size) != 0 )
+ {
+ printk("(gethvmcontext) copy to guest failed\n");
+ ret = -EFAULT;
+ }
+
+ gethvmcontext_out:
+ copyback = 1;
+
+ if ( c.data != NULL )
+ xfree(c.data);
+ }
+ break;
+
case XEN_DOMCTL_cacheflush:
{
unsigned long s = domctl->u.cacheflush.start_pfn;
unsigned long e = s + domctl->u.cacheflush.nr_pfns;
if ( domctl->u.cacheflush.nr_pfns > (1U<<MAX_ORDER) )
- return -EINVAL;
+ ret = -EINVAL;
if ( e < s )
- return -EINVAL;
+ ret = -EINVAL;
- return p2m_cache_flush(d, s, e);
+ ret = p2m_cache_flush(d, s, e);
}
default:
- return subarch_do_domctl(domctl, d, u_domctl);
+ ret = subarch_do_domctl(domctl, d, u_domctl);
}
+
+ if ( copyback && __copy_to_guest(u_domctl, domctl, 1) )
+ ret = -EFAULT;
+
+ return ret;
}
void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
@@ -7,14 +7,15 @@
#include <xsm/xsm.h>
+#include <xen/hvm/save.h>
#include <public/xen.h>
#include <public/hvm/params.h>
#include <public/hvm/hvm_op.h>
#include <asm/hypercall.h>
+#include <asm/gic.h>
long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
-
{
long rc = 0;
@@ -65,3 +66,505 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
return rc;
}
+
+static int vgic_irq_rank_save(struct vcpu *v,
+ struct vgic_rank *ext,
+ struct vgic_irq_rank *rank)
+{
+ spin_lock(&rank->lock);
+
+ /* Some of VGIC registers are not used yet, it is for a future usage */
+ /* IENABLE, IACTIVE, IPEND, PENDSGI registers */
+ ext->ienable = rank->ienable;
+ ext->iactive = rank->iactive;
+ ext->ipend = rank->ipend;
+ ext->pendsgi = rank->pendsgi;
+
+ /* ICFG */
+ ext->icfg[0] = rank->icfg[0];
+ ext->icfg[1] = rank->icfg[1];
+
+ /* IPRIORITY */
+ if ( sizeof(rank->ipriority) != sizeof (ext->ipriority) )
+ {
+ dprintk(XENLOG_G_ERR, "hvm_hw_gic: check ipriority dumping space\n");
+ return -EINVAL;
+ }
+ memcpy(ext->ipriority, rank->ipriority, sizeof(rank->ipriority));
+
+ /* ITARGETS */
+ if ( sizeof(rank->itargets) != sizeof (ext->itargets) )
+ {
+ dprintk(XENLOG_G_ERR, "hvm_hw_gic: check itargets dumping space\n");
+ return -EINVAL;
+ }
+ memcpy(ext->itargets, rank->itargets, sizeof(rank->itargets));
+
+ spin_unlock(&rank->lock);
+ return 0;
+}
+
+static int vgic_irq_rank_restore(struct vcpu *v,
+ struct vgic_irq_rank *rank,
+ struct vgic_rank *ext)
+{
+ struct pending_irq *p;
+ unsigned int irq = 0;
+ const unsigned long enable_bits = ext->ienable;
+
+ spin_lock(&rank->lock);
+
+ /* IENABLE, IACTIVE, IPEND, PENDSGI registers */
+ rank->ienable = ext->ienable;
+ rank->iactive = ext->iactive;
+ rank->ipend = ext->ipend;
+ rank->pendsgi = ext->pendsgi;
+
+ /* ICFG */
+ rank->icfg[0] = ext->icfg[0];
+ rank->icfg[1] = ext->icfg[1];
+
+ /* IPRIORITY */
+ if ( sizeof(rank->ipriority) != sizeof (ext->ipriority) )
+ {
+ dprintk(XENLOG_G_ERR, "hvm_hw_gic: check ipriority dumping space\n");
+ return -EINVAL;
+ }
+ memcpy(rank->ipriority, ext->ipriority, sizeof(rank->ipriority));
+
+ /* ITARGETS */
+ if ( sizeof(rank->itargets) != sizeof (ext->itargets) )
+ {
+ dprintk(XENLOG_G_ERR, "hvm_hw_gic: check itargets dumping space\n");
+ return -EINVAL;
+ }
+ memcpy(rank->itargets, ext->itargets, sizeof(rank->itargets));
+
+ /* Set IRQ status as enabled by iterating through rank->ienable register */
+ while ( (irq = find_next_bit(&enable_bits, 32, irq)) < 32 ) {
+ p = irq_to_pending(v, irq);
+ set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
+ irq++;
+ }
+
+ spin_unlock(&rank->lock);
+ return 0;
+}
+
+
+static int gic_save(struct domain *d, hvm_domain_context_t *h)
+{
+ struct hvm_hw_gic ctxt;
+ struct vcpu *v;
+
+ /* Save the state of GICs */
+ for_each_vcpu( d, v )
+ {
+ ctxt.gic_hcr = v->arch.gic_hcr;
+ ctxt.gic_vmcr = v->arch.gic_vmcr;
+ ctxt.gic_apr = v->arch.gic_apr;
+
+ /* Save list registers and masks
+ * NOTE: It is not necessary to save/restore them, but LR state can
+ * have influence on downtime after Live Migration (to be tested)
+ */
+ if ( sizeof(v->arch.gic_lr) > sizeof (ctxt.gic_lr) )
+ {
+ dprintk(XENLOG_G_ERR, "hvm_hw_gic: increase LR dumping space\n");
+ return -EINVAL;
+ }
+ memcpy(ctxt.gic_lr, v->arch.gic_lr, sizeof(v->arch.gic_lr));
+ ctxt.lr_mask = v->arch.lr_mask;
+ ctxt.event_mask = v->arch.event_mask;
+
+ /* Save PPI states (per-CPU), necessary for SMP-enabled guests */
+ if ( vgic_irq_rank_save(v, &ctxt.ppi_state,
+ &v->arch.vgic.private_irqs) )
+ return 1;
+
+ if ( hvm_save_entry(GIC, v->vcpu_id, h, &ctxt) != 0 )
+ return 1;
+ }
+
+ return 0;
+}
+
+static int gic_load(struct domain *d, hvm_domain_context_t *h)
+{
+ int vcpuid;
+ struct hvm_hw_gic ctxt;
+ struct vcpu *v;
+
+ /* Which vcpu is this? */
+ vcpuid = hvm_load_instance(h);
+ if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
+ {
+ dprintk(XENLOG_G_ERR, "HVM restore: dom%u has no vcpu%u\n",
+ d->domain_id, vcpuid);
+ return -EINVAL;
+ }
+
+ if ( hvm_load_entry(GIC, h, &ctxt) != 0 )
+ return -EINVAL;
+
+ v->arch.gic_hcr = ctxt.gic_hcr;
+ v->arch.gic_vmcr = ctxt.gic_vmcr;
+ v->arch.gic_apr = ctxt.gic_apr;
+
+ /* Restore list registers and masks */
+ if ( sizeof(v->arch.gic_lr) > sizeof (ctxt.gic_lr) )
+ {
+ dprintk(XENLOG_G_ERR, "hvm_hw_gic: increase LR dumping space\n");
+ return -EINVAL;
+ }
+ memcpy(v->arch.gic_lr, ctxt.gic_lr, sizeof(v->arch.gic_lr));
+ v->arch.lr_mask = ctxt.lr_mask;
+ v->arch.event_mask = ctxt.event_mask;
+
+ /* Restore PPI states */
+ if ( vgic_irq_rank_restore(v, &v->arch.vgic.private_irqs,
+ &ctxt.ppi_state) )
+ return 1;
+
+ return 0;
+}
+
+HVM_REGISTER_SAVE_RESTORE(GIC, gic_save, gic_load, 1, HVMSR_PER_VCPU);
+
+static int timer_save(struct domain *d, hvm_domain_context_t *h)
+{
+ struct hvm_hw_timer ctxt;
+ struct vcpu *v;
+ struct vtimer *t;
+ int i;
+
+ /* Save the state of vtimer and ptimer */
+ for_each_vcpu( d, v )
+ {
+ t = &v->arch.virt_timer;
+ for ( i = 0; i < 2; i++ )
+ {
+ ctxt.cval = t->cval;
+ ctxt.ctl = t->ctl;
+ ctxt.vtb_offset = i ? d->arch.phys_timer_base.offset :
+ d->arch.virt_timer_base.offset;
+ ctxt.type = i ? TIMER_TYPE_PHYS : TIMER_TYPE_VIRT;
+
+ if ( hvm_save_entry(A15_TIMER, v->vcpu_id, h, &ctxt) != 0 )
+ return 1;
+
+ t = &v->arch.phys_timer;
+ }
+ }
+
+ return 0;
+}
+
+static int timer_load(struct domain *d, hvm_domain_context_t *h)
+{
+ int vcpuid;
+ struct hvm_hw_timer ctxt;
+ struct vcpu *v;
+ struct vtimer *t = NULL;
+
+ /* Which vcpu is this? */
+ vcpuid = hvm_load_instance(h);
+
+ if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
+ {
+ dprintk(XENLOG_G_ERR, "HVM restore: dom%u has no vcpu%u\n",
+ d->domain_id, vcpuid);
+ return -EINVAL;
+ }
+
+ if ( hvm_load_entry(A15_TIMER, h, &ctxt) != 0 )
+ return -EINVAL;
+
+ if ( ctxt.type == TIMER_TYPE_VIRT )
+ {
+ t = &v->arch.virt_timer;
+ d->arch.virt_timer_base.offset = ctxt.vtb_offset;
+
+ }
+ else
+ {
+ t = &v->arch.phys_timer;
+ d->arch.phys_timer_base.offset = ctxt.vtb_offset;
+ }
+
+ t->cval = ctxt.cval;
+ t->ctl = ctxt.ctl;
+ t->v = v;
+
+ return 0;
+}
+
+HVM_REGISTER_SAVE_RESTORE(A15_TIMER, timer_save, timer_load, 2, HVMSR_PER_VCPU);
+
+static int cpu_save(struct domain *d, hvm_domain_context_t *h)
+{
+ struct hvm_hw_cpu ctxt;
+ struct vcpu_guest_core_regs c;
+ struct vcpu *v;
+
+ /* Save the state of CPU */
+ for_each_vcpu( d, v )
+ {
+ memset(&ctxt, 0, sizeof(ctxt));
+
+ ctxt.sctlr = v->arch.sctlr;
+ ctxt.ttbr0 = v->arch.ttbr0;
+ ctxt.ttbr1 = v->arch.ttbr1;
+ ctxt.ttbcr = v->arch.ttbcr;
+
+ ctxt.dacr = v->arch.dacr;
+ ctxt.ifsr = v->arch.ifsr;
+#ifdef CONFIG_ARM_32
+ ctxt.ifar = v->arch.ifar;
+ ctxt.dfar = v->arch.dfar;
+ ctxt.dfsr = v->arch.dfsr;
+#else
+ ctxt.far = v->arch.far;
+ ctxt.esr = v->arch.esr;
+#endif
+
+#ifdef CONFIG_ARM_32
+ ctxt.mair0 = v->arch.mair0;
+ ctxt.mair1 = v->arch.mair1;
+#else
+ ctxt.mair0 = v->arch.mair;
+#endif
+ /* Control Registers */
+ ctxt.actlr = v->arch.actlr;
+ ctxt.sctlr = v->arch.sctlr;
+ ctxt.cpacr = v->arch.cpacr;
+
+ ctxt.contextidr = v->arch.contextidr;
+ ctxt.tpidr_el0 = v->arch.tpidr_el0;
+ ctxt.tpidr_el1 = v->arch.tpidr_el1;
+ ctxt.tpidrro_el0 = v->arch.tpidrro_el0;
+
+ /* CP 15 */
+ ctxt.csselr = v->arch.csselr;
+
+ ctxt.afsr0 = v->arch.afsr0;
+ ctxt.afsr1 = v->arch.afsr1;
+ ctxt.vbar = v->arch.vbar;
+ ctxt.par = v->arch.par;
+ ctxt.teecr = v->arch.teecr;
+ ctxt.teehbr = v->arch.teehbr;
+
+#ifdef CONFIG_ARM_32
+ ctxt.joscr = v->arch.joscr;
+ ctxt.jmcr = v->arch.jmcr;
+#endif
+
+ memset(&c, 0, sizeof(c));
+
+ /* get guest core registers */
+ vcpu_regs_hyp_to_user(v, &c);
+
+ ctxt.x0 = c.x0;
+ ctxt.x1 = c.x1;
+ ctxt.x2 = c.x2;
+ ctxt.x3 = c.x3;
+ ctxt.x4 = c.x4;
+ ctxt.x5 = c.x5;
+ ctxt.x6 = c.x6;
+ ctxt.x7 = c.x7;
+ ctxt.x8 = c.x8;
+ ctxt.x9 = c.x9;
+ ctxt.x10 = c.x10;
+ ctxt.x11 = c.x11;
+ ctxt.x12 = c.x12;
+ ctxt.x13 = c.x13;
+ ctxt.x14 = c.x14;
+ ctxt.x15 = c.x15;
+ ctxt.x16 = c.x16;
+ ctxt.x17 = c.x17;
+ ctxt.x18 = c.x18;
+ ctxt.x19 = c.x19;
+ ctxt.x20 = c.x20;
+ ctxt.x21 = c.x21;
+ ctxt.x22 = c.x22;
+ ctxt.x23 = c.x23;
+ ctxt.x24 = c.x24;
+ ctxt.x25 = c.x25;
+ ctxt.x26 = c.x26;
+ ctxt.x27 = c.x27;
+ ctxt.x28 = c.x28;
+ ctxt.x29 = c.x29;
+ ctxt.x30 = c.x30;
+ ctxt.pc64 = c.pc64;
+ ctxt.cpsr = c.cpsr;
+ ctxt.spsr_el1 = c.spsr_el1; /* spsr_svc */
+
+#ifdef CONFIG_ARM_32
+ ctxt.spsr_fiq = c.spsr_fiq;
+ ctxt.spsr_irq = c.spsr_irq;
+ ctxt.spsr_und = c.spsr_und;
+ ctxt.spsr_abt = c.spsr_abt;
+#endif
+#ifdef CONFIG_ARM_64
+ ctxt.sp_el0 = c.sp_el0;
+ ctxt.sp_el1 = c.sp_el1;
+ ctxt.elr_el1 = c.elr_el1;
+#endif
+
+ /* check VFP state size before dumping */
+ if ( sizeof(v->arch.vfp) > sizeof (ctxt.vfp) )
+ {
+ dprintk(XENLOG_G_ERR, "hvm_hw_cpu: increase VFP dumping space\n");
+ return -EINVAL;
+ }
+ memcpy((void*) &ctxt.vfp, (void*) &v->arch.vfp, sizeof(v->arch.vfp));
+
+ ctxt.pause_flags = v->pause_flags;
+
+ if ( hvm_save_entry(VCPU, v->vcpu_id, h, &ctxt) != 0 )
+ return 1;
+ }
+ return 0;
+}
+
+static int cpu_load(struct domain *d, hvm_domain_context_t *h)
+{
+ int vcpuid;
+ struct hvm_hw_cpu ctxt;
+ struct vcpu *v;
+ struct vcpu_guest_core_regs c;
+
+ /* Which vcpu is this? */
+ vcpuid = hvm_load_instance(h);
+ if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
+ {
+ dprintk(XENLOG_G_ERR, "HVM restore: dom%u has no vcpu%u\n",
+ d->domain_id, vcpuid);
+ return -EINVAL;
+ }
+
+ if ( hvm_load_entry(VCPU, h, &ctxt) != 0 )
+ return -EINVAL;
+
+ v->arch.sctlr = ctxt.sctlr;
+ v->arch.ttbr0 = ctxt.ttbr0;
+ v->arch.ttbr1 = ctxt.ttbr1;
+ v->arch.ttbcr = ctxt.ttbcr;
+
+ v->arch.dacr = ctxt.dacr;
+ v->arch.ifsr = ctxt.ifsr;
+#ifdef CONFIG_ARM_32
+ v->arch.ifar = ctxt.ifar;
+ v->arch.dfar = ctxt.dfar;
+ v->arch.dfsr = ctxt.dfsr;
+#else
+ v->arch.far = ctxt.far;
+ v->arch.esr = ctxt.esr;
+#endif
+
+#ifdef CONFIG_ARM_32
+ v->arch.mair0 = ctxt.mair0;
+ v->arch.mair1 = ctxt.mair1;
+#else
+ v->arch.mair = ctxt.mair0;
+#endif
+
+ /* Control Registers */
+ v->arch.actlr = ctxt.actlr;
+ v->arch.cpacr = ctxt.cpacr;
+ v->arch.contextidr = ctxt.contextidr;
+ v->arch.tpidr_el0 = ctxt.tpidr_el0;
+ v->arch.tpidr_el1 = ctxt.tpidr_el1;
+ v->arch.tpidrro_el0 = ctxt.tpidrro_el0;
+
+ /* CP 15 */
+ v->arch.csselr = ctxt.csselr;
+
+ v->arch.afsr0 = ctxt.afsr0;
+ v->arch.afsr1 = ctxt.afsr1;
+ v->arch.vbar = ctxt.vbar;
+ v->arch.par = ctxt.par;
+ v->arch.teecr = ctxt.teecr;
+ v->arch.teehbr = ctxt.teehbr;
+#ifdef CONFIG_ARM_32
+ v->arch.joscr = ctxt.joscr;
+ v->arch.jmcr = ctxt.jmcr;
+#endif
+
+ /* fill guest core registers */
+ memset(&c, 0, sizeof(c));
+ c.x0 = ctxt.x0;
+ c.x1 = ctxt.x1;
+ c.x2 = ctxt.x2;
+ c.x3 = ctxt.x3;
+ c.x4 = ctxt.x4;
+ c.x5 = ctxt.x5;
+ c.x6 = ctxt.x6;
+ c.x7 = ctxt.x7;
+ c.x8 = ctxt.x8;
+ c.x9 = ctxt.x9;
+ c.x10 = ctxt.x10;
+ c.x11 = ctxt.x11;
+ c.x12 = ctxt.x12;
+ c.x13 = ctxt.x13;
+ c.x14 = ctxt.x14;
+ c.x15 = ctxt.x15;
+ c.x16 = ctxt.x16;
+ c.x17 = ctxt.x17;
+ c.x18 = ctxt.x18;
+ c.x19 = ctxt.x19;
+ c.x20 = ctxt.x20;
+ c.x21 = ctxt.x21;
+ c.x22 = ctxt.x22;
+ c.x23 = ctxt.x23;
+ c.x24 = ctxt.x24;
+ c.x25 = ctxt.x25;
+ c.x26 = ctxt.x26;
+ c.x27 = ctxt.x27;
+ c.x28 = ctxt.x28;
+ c.x29 = ctxt.x29;
+ c.x30 = ctxt.x30;
+ c.pc64 = ctxt.pc64;
+ c.cpsr = ctxt.cpsr;
+ c.spsr_el1 = ctxt.spsr_el1; /* spsr_svc */
+
+#ifdef CONFIG_ARM_32
+ c.spsr_fiq = ctxt.spsr_fiq;
+ c.spsr_irq = ctxt.spsr_irq;
+ c.spsr_und = ctxt.spsr_und;
+ c.spsr_abt = ctxt.spsr_abt;
+#endif
+#ifdef CONFIG_ARM_64
+ c.sp_el0 = ctxt.sp_el0;
+ c.sp_el1 = ctxt.sp_el1;
+ c.elr_el1 = ctxt.elr_el1;
+#endif
+
+ /* set guest core registers */
+ vcpu_regs_user_to_hyp(v, &c);
+
+ if ( sizeof(v->arch.vfp) > sizeof (ctxt.vfp) )
+ {
+ dprintk(XENLOG_G_ERR, "hvm_hw_cpu: increase VFP dumping space\n");
+ return -EINVAL;
+ }
+
+ memcpy(&v->arch.vfp, &ctxt, sizeof(v->arch.vfp));
+
+ v->is_initialised = 1;
+ v->pause_flags = ctxt.pause_flags;
+
+ return 0;
+}
+
+HVM_REGISTER_SAVE_RESTORE(VCPU, cpu_save, cpu_load, 1, HVMSR_PER_VCPU);
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
new file mode 100644
@@ -0,0 +1,66 @@
+/*
+ * hvm/save.c: Save and restore HVM guest's emulated hardware state for ARM.
+ *
+ * Copyright (c) 2013, Samsung Electronics.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <asm/hvm/support.h>
+#include <public/hvm/save.h>
+
+void arch_hvm_save(struct domain *d, struct hvm_save_header *hdr)
+{
+ hdr->cpuid = READ_SYSREG32(MIDR_EL1);
+}
+
+int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr)
+{
+ uint32_t cpuid;
+
+ if ( hdr->magic != HVM_FILE_MAGIC )
+ {
+ printk(XENLOG_G_ERR "HVM%d restore: bad magic number %#"PRIx32"\n",
+ d->domain_id, hdr->magic);
+ return -1;
+ }
+
+ if ( hdr->version != HVM_FILE_VERSION )
+ {
+ printk(XENLOG_G_ERR "HVM%d restore: unsupported version %u\n",
+ d->domain_id, hdr->version);
+ return -1;
+ }
+
+ cpuid = READ_SYSREG32(MIDR_EL1);
+ if ( hdr->cpuid != cpuid )
+ {
+ printk(XENLOG_G_INFO "HVM%d restore: VM saved on one CPU "
+ "(%#"PRIx32") and restored on another (%#"PRIx32").\n",
+ d->domain_id, hdr->cpuid, cpuid);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
@@ -64,6 +64,8 @@ subdir-$(CONFIG_COMPAT) += compat
subdir-$(x86_64) += hvm
+subdir-$(CONFIG_ARM) += hvm
+
subdir-$(coverage) += gcov
subdir-y += libelf
new file mode 100644
@@ -0,0 +1,29 @@
+/*
+ * support.h: HVM support routines used by ARMv7 VE.
+ *
+ * Copyright (c) 2012, Citrix Systems
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef __ASM_ARM_HVM_SUPPORT_H__
+#define __ASM_ARM_HVM_SUPPORT_H__
+
+#include <xen/types.h>
+#include <public/hvm/ioreq.h>
+#include <xen/sched.h>
+#include <xen/hvm/save.h>
+#include <asm/processor.h>
+
+#endif /* __ASM_ARM_HVM_SUPPORT_H__ */
@@ -26,6 +26,142 @@
#ifndef __XEN_PUBLIC_HVM_SAVE_ARM_H__
#define __XEN_PUBLIC_HVM_SAVE_ARM_H__
+#define HVM_FILE_MAGIC 0x92385520
+#define HVM_FILE_VERSION 0x00000001
+
+
+struct hvm_save_header
+{
+ uint32_t magic; /* Must be HVM_FILE_MAGIC */
+ uint32_t version; /* File format version */
+ uint64_t changeset; /* Version of Xen that saved this file */
+ uint32_t cpuid; /* MIDR_EL1 on the saving machine */
+};
+
+DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
+
+struct vgic_rank
+{
+ uint32_t ienable, iactive, ipend, pendsgi;
+ uint32_t icfg[2];
+ uint32_t ipriority[8];
+ uint32_t itargets[8];
+};
+
+struct hvm_hw_gic
+{
+ uint32_t gic_hcr;
+ uint32_t gic_vmcr;
+ uint32_t gic_apr;
+ uint32_t gic_lr[64];
+ uint64_t event_mask;
+ uint64_t lr_mask;
+ struct vgic_rank ppi_state;
+};
+
+DECLARE_HVM_SAVE_TYPE(GIC, 2, struct hvm_hw_gic);
+
+#define TIMER_TYPE_VIRT 0
+#define TIMER_TYPE_PHYS 1
+
+struct hvm_hw_timer
+{
+ uint64_t vtb_offset;
+ uint32_t ctl;
+ uint64_t cval;
+ uint32_t type;
+};
+
+DECLARE_HVM_SAVE_TYPE(A15_TIMER, 3, struct hvm_hw_timer);
+
+
+struct hvm_hw_cpu
+{
+#ifdef CONFIG_ARM_32
+ uint64_t vfp[34]; /* 32-bit VFP registers */
+#else
+ uint64_t vfp[66]; /* 64-bit VFP registers */
+#endif
+
+ /* Guest core registers */
+ uint64_t x0; /* r0_usr */
+ uint64_t x1; /* r1_usr */
+ uint64_t x2; /* r2_usr */
+ uint64_t x3; /* r3_usr */
+ uint64_t x4; /* r4_usr */
+ uint64_t x5; /* r5_usr */
+ uint64_t x6; /* r6_usr */
+ uint64_t x7; /* r7_usr */
+ uint64_t x8; /* r8_usr */
+ uint64_t x9; /* r9_usr */
+ uint64_t x10; /* r10_usr */
+ uint64_t x11; /* r11_usr */
+ uint64_t x12; /* r12_usr */
+ uint64_t x13; /* sp_usr */
+ uint64_t x14; /* lr_usr; */
+ uint64_t x15; /* __unused_sp_hyp */
+ uint64_t x16; /* lr_irq */
+ uint64_t x17; /* sp_irq */
+ uint64_t x18; /* lr_svc */
+ uint64_t x19; /* sp_svc */
+ uint64_t x20; /* lr_abt */
+ uint64_t x21; /* sp_abt */
+ uint64_t x22; /* lr_und */
+ uint64_t x23; /* sp_und */
+ uint64_t x24; /* r8_fiq */
+ uint64_t x25; /* r9_fiq */
+ uint64_t x26; /* r10_fiq */
+ uint64_t x27; /* r11_fiq */
+ uint64_t x28; /* r12_fiq */
+ uint64_t x29; /* fp,sp_fiq */
+ uint64_t x30; /* lr_fiq */
+ uint64_t pc64; /* ELR_EL2 */
+ uint32_t cpsr; /* SPSR_EL2 */
+ uint32_t spsr_el1; /*spsr_svc */
+ /* AArch32 guests only */
+ uint32_t spsr_fiq, spsr_irq, spsr_und, spsr_abt;
+ /* AArch64 guests only */
+ uint64_t sp_el0;
+ uint64_t sp_el1, elr_el1;
+
+ uint32_t sctlr, ttbcr;
+ uint64_t ttbr0, ttbr1;
+
+ uint32_t ifar, dfar;
+ uint32_t ifsr, dfsr;
+ uint32_t dacr;
+ uint64_t par;
+
+ uint64_t far;
+ uint64_t esr;
+
+ uint64_t mair0, mair1;
+ uint64_t tpidr_el0;
+ uint64_t tpidr_el1;
+ uint64_t tpidrro_el0;
+ uint64_t vbar;
+
+ /* Control Registers */
+ uint32_t actlr;
+ uint32_t cpacr;
+ uint32_t afsr0, afsr1;
+ uint32_t contextidr;
+ uint32_t teecr, teehbr; /* ThumbEE, 32-bit guests only */
+ uint32_t joscr, jmcr;
+ /* CP 15 */
+ uint32_t csselr;
+
+ unsigned long pause_flags;
+
+};
+
+DECLARE_HVM_SAVE_TYPE(VCPU, 4, struct hvm_hw_cpu);
+
+/*
+ * Largest type-code in use
+ */
+#define HVM_SAVE_CODE_MAX 4
+
#endif
/*