@@ -187,3 +187,75 @@ int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm, struct gzvm_enable_cap *cap
return ret;
}
+
+int gzvm_arch_vcpu_update_one_reg(struct gzvm_vcpu *vcpu, __u64 reg_id,
+ bool is_write, __u64 *data)
+{
+ struct arm_smccc_res res;
+ unsigned long a1;
+ int ret;
+
+ /* reg id follows KVM's encoding */
+ switch (reg_id & GZVM_REG_ARM_COPROC_MASK) {
+ case GZVM_REG_ARM_CORE:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ a1 = assemble_vm_vcpu_tuple(vcpu->gzvm->vm_id, vcpu->vcpuid);
+ if (!is_write) {
+ ret = gzvm_hypcall_wrapper(MT_HVC_GZVM_GET_ONE_REG,
+ a1, reg_id, 0, 0, 0, 0, 0, &res);
+ if (ret == 0)
+ *data = res.a1;
+ } else {
+ ret = gzvm_hypcall_wrapper(MT_HVC_GZVM_SET_ONE_REG,
+ a1, reg_id, *data, 0, 0, 0, 0, &res);
+ }
+
+ return ret;
+}
+
+int gzvm_arch_vcpu_run(struct gzvm_vcpu *vcpu, __u64 *exit_reason)
+{
+ struct arm_smccc_res res;
+ unsigned long a1;
+ int ret;
+
+ a1 = assemble_vm_vcpu_tuple(vcpu->gzvm->vm_id, vcpu->vcpuid);
+ ret = gzvm_hypcall_wrapper(MT_HVC_GZVM_RUN, a1, 0, 0, 0, 0, 0,
+ 0, &res);
+ *exit_reason = res.a1;
+ return ret;
+}
+
+int gzvm_arch_destroy_vcpu(gzvm_id_t vm_id, int vcpuid)
+{
+ struct arm_smccc_res res;
+ unsigned long a1;
+
+ a1 = assemble_vm_vcpu_tuple(vm_id, vcpuid);
+ gzvm_hypcall_wrapper(MT_HVC_GZVM_DESTROY_VCPU, a1, 0, 0, 0, 0, 0, 0,
+ &res);
+
+ return 0;
+}
+
+/**
+ * gzvm_arch_create_vcpu() - Call smc to gz hypervisor to create vcpu
+ * @run: Virtual address of vcpu->run
+ */
+int gzvm_arch_create_vcpu(gzvm_id_t vm_id, int vcpuid, void *run)
+{
+ struct arm_smccc_res res;
+ unsigned long a1, a2;
+ int ret;
+
+ a1 = assemble_vm_vcpu_tuple(vm_id, vcpuid);
+ a2 = (__u64)virt_to_phys(run);
+ ret = gzvm_hypcall_wrapper(MT_HVC_GZVM_CREATE_VCPU, a1, a2, 0, 0, 0, 0,
+ 0, &res);
+
+ return ret;
+}
@@ -47,4 +47,28 @@ enum {
#define MT_HVC_GZVM_PROBE GZVM_HCALL_ID(GZVM_FUNC_PROBE)
#define MT_HVC_GZVM_ENABLE_CAP GZVM_HCALL_ID(GZVM_FUNC_ENABLE_CAP)
+static inline gzvm_id_t get_vmid_from_tuple(unsigned int tuple)
+{
+ return (gzvm_id_t)(tuple >> 16);
+}
+
+static inline gzvm_vcpu_id_t get_vcpuid_from_tuple(unsigned int tuple)
+{
+ return (gzvm_vcpu_id_t)(tuple & 0xffff);
+}
+
+static inline unsigned int
+assemble_vm_vcpu_tuple(gzvm_id_t vmid, gzvm_vcpu_id_t vcpuid)
+{
+ return ((unsigned int)vmid << 16 | vcpuid);
+}
+
+static inline void
+disassemble_vm_vcpu_tuple(unsigned int tuple, gzvm_id_t *vmid,
+ gzvm_vcpu_id_t *vcpuid)
+{
+ *vmid = get_vmid_from_tuple(tuple);
+ *vcpuid = get_vcpuid_from_tuple(tuple);
+}
+
#endif /* __GZVM_ARCH_H__ */
@@ -15,4 +15,33 @@
#define GZVM_CAP_ARM_PVM_SET_PVMFW_IPA 0
#define GZVM_CAP_ARM_PVM_GET_PVMFW_SIZE 1
+/*
+ * Architecture specific registers are to be defined in arch headers and
+ * ORed with the arch identifier.
+ */
+#define GZVM_REG_ARM 0x4000000000000000ULL
+#define GZVM_REG_ARM64 0x6000000000000000ULL
+
+#define GZVM_REG_SIZE_SHIFT 52
+#define GZVM_REG_SIZE_MASK 0x00f0000000000000ULL
+#define GZVM_REG_SIZE_U8 0x0000000000000000ULL
+#define GZVM_REG_SIZE_U16 0x0010000000000000ULL
+#define GZVM_REG_SIZE_U32 0x0020000000000000ULL
+#define GZVM_REG_SIZE_U64 0x0030000000000000ULL
+#define GZVM_REG_SIZE_U128 0x0040000000000000ULL
+#define GZVM_REG_SIZE_U256 0x0050000000000000ULL
+#define GZVM_REG_SIZE_U512 0x0060000000000000ULL
+#define GZVM_REG_SIZE_U1024 0x0070000000000000ULL
+#define GZVM_REG_SIZE_U2048 0x0080000000000000ULL
+
+#define GZVM_REG_ARCH_MASK 0xff00000000000000ULL
+
+/* If you need to interpret the index values, here is the key: */
+#define GZVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
+#define GZVM_REG_ARM_COPROC_SHIFT 16
+
+/* Normal registers are mapped as coprocessor 16. */
+#define GZVM_REG_ARM_CORE (0x0010 << GZVM_REG_ARM_COPROC_SHIFT)
+#define GZVM_REG_ARM_CORE_REG(name) (offsetof(struct gzvm_regs, name) / sizeof(__u32))
+
#endif /* __GZVM_ARCH_H__ */
@@ -6,5 +6,6 @@
GZVM_DIR ?= ../../../drivers/virt/geniezone
-gzvm-y := $(GZVM_DIR)/gzvm_main.o $(GZVM_DIR)/gzvm_vm.o
+gzvm-y := $(GZVM_DIR)/gzvm_main.o $(GZVM_DIR)/gzvm_vm.o \
+ $(GZVM_DIR)/gzvm_vcpu.o
new file mode 100644
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ */
+
+#include <asm/sysreg.h>
+#include <linux/anon_inodes.h>
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gzvm_drv.h>
+
+/* maximum size needed for holding an integer */
+#define ITOA_MAX_LEN 12
+
+static long gzvm_vcpu_update_one_reg(struct gzvm_vcpu *vcpu, void * __user argp,
+ bool is_write)
+{
+ struct gzvm_one_reg reg;
+ void __user *reg_addr;
+ u64 data = 0;
+ u64 reg_size;
+ long ret;
+
+ if (copy_from_user(®, argp, sizeof(reg)))
+ return -EFAULT;
+
+ reg_addr = (void __user *)reg.addr;
+ reg_size = (reg.id & GZVM_REG_SIZE_MASK) >> GZVM_REG_SIZE_SHIFT;
+ reg_size = BIT(reg_size);
+
+ if (is_write) {
+ if (copy_from_user(&data, reg_addr, reg_size))
+ return -EFAULT;
+ }
+
+ ret = gzvm_arch_vcpu_update_one_reg(vcpu, reg.id, is_write, &data);
+
+ if (ret)
+ return ret;
+
+ if (!is_write) {
+ if (copy_to_user(reg_addr, &data, reg_size))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * gzvm_vcpu_run() - Handle vcpu run ioctl, entry point to guest and exit
+ * point from guest
+ * @argp: pointer to struct gzvm_vcpu_run in userspace
+ */
+static long gzvm_vcpu_run(struct gzvm_vcpu *vcpu, void * __user argp)
+{
+ bool need_userspace = false;
+ u64 exit_reason;
+
+ if (copy_from_user(vcpu->run, argp, sizeof(struct gzvm_vcpu_run)))
+ return -EFAULT;
+
+ if (vcpu->run->immediate_exit == 1)
+ return -EINTR;
+
+ while (!need_userspace && !signal_pending(current)) {
+ gzvm_arch_vcpu_run(vcpu, &exit_reason);
+
+ switch (exit_reason) {
+ case GZVM_EXIT_MMIO:
+ need_userspace = true;
+ break;
+ /**
+ * it's geniezone's responsibility to fill corresponding data
+ * structure
+ */
+ case GZVM_EXIT_HYPERCALL:
+ fallthrough;
+ case GZVM_EXIT_EXCEPTION:
+ fallthrough;
+ case GZVM_EXIT_DEBUG:
+ fallthrough;
+ case GZVM_EXIT_FAIL_ENTRY:
+ fallthrough;
+ case GZVM_EXIT_INTERNAL_ERROR:
+ fallthrough;
+ case GZVM_EXIT_SYSTEM_EVENT:
+ fallthrough;
+ case GZVM_EXIT_SHUTDOWN:
+ need_userspace = true;
+ break;
+ case GZVM_EXIT_IRQ:
+ break;
+ case GZVM_EXIT_UNKNOWN:
+ fallthrough;
+ default:
+ dev_err(&gzvm_debug_dev->dev, "vcpu unknown exit\n");
+ need_userspace = true;
+ goto out;
+ }
+ }
+
+out:
+ if (copy_to_user(argp, vcpu->run, sizeof(struct gzvm_vcpu_run)))
+ return -EFAULT;
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ return 0;
+}
+
+static long gzvm_vcpu_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg)
+{
+ int ret = -ENOTTY;
+ void __user *argp = (void __user *)arg;
+ struct gzvm_vcpu *vcpu = filp->private_data;
+
+ switch (ioctl) {
+ case GZVM_RUN:
+ ret = gzvm_vcpu_run(vcpu, argp);
+ break;
+ case GZVM_GET_ONE_REG:
+ /* is_write */
+ ret = gzvm_vcpu_update_one_reg(vcpu, argp, false);
+ break;
+ case GZVM_SET_ONE_REG:
+ /* is_write */
+ ret = gzvm_vcpu_update_one_reg(vcpu, argp, true);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static const struct file_operations gzvm_vcpu_fops = {
+ .unlocked_ioctl = gzvm_vcpu_ioctl,
+ .llseek = noop_llseek,
+};
+
+/* caller must hold the vm lock */
+void gzvm_destroy_vcpu(struct gzvm_vcpu *vcpu)
+{
+ if (!vcpu)
+ return;
+
+ gzvm_arch_destroy_vcpu(vcpu->gzvm->vm_id, vcpu->vcpuid);
+ /* clean guest's data */
+ memset(vcpu->run, 0, GZVM_VCPU_RUN_MAP_SIZE);
+ free_pages_exact(vcpu->run, GZVM_VCPU_RUN_MAP_SIZE);
+ kfree(vcpu);
+}
+
+/**
+ * gzvm_destroy_vcpus() - Destroy all vcpus, caller has to hold the vm lock
+ *
+ * @gzvm: vm struct that owns the vcpus
+ */
+void gzvm_destroy_vcpus(struct gzvm *gzvm)
+{
+ int i;
+
+ for (i = 0; i < GZVM_MAX_VCPUS; i++) {
+ gzvm_destroy_vcpu(gzvm->vcpus[i]);
+ gzvm->vcpus[i] = NULL;
+ }
+}
+
+/* create_vcpu_fd() - Allocates an inode for the vcpu. */
+static int create_vcpu_fd(struct gzvm_vcpu *vcpu)
+{
+ /* sizeof("gzvm-vcpu:") + max(strlen(itoa(vcpuid))) + null */
+ char name[10 + ITOA_MAX_LEN + 1];
+
+ snprintf(name, sizeof(name), "gzvm-vcpu:%d", vcpu->vcpuid);
+ return anon_inode_getfd(name, &gzvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
+}
+
+/**
+ * gzvm_vm_ioctl_create_vcpu()
+ *
+ * @cpuid: equals arg
+ *
+ * Return: Fd of vcpu, negative errno if error occurs
+ */
+int gzvm_vm_ioctl_create_vcpu(struct gzvm *gzvm, u32 cpuid)
+{
+ struct gzvm_vcpu *vcpu;
+ int ret;
+
+ if (cpuid >= GZVM_MAX_VCPUS)
+ return -EINVAL;
+
+ vcpu = kzalloc(sizeof(*vcpu), GFP_KERNEL);
+ if (!vcpu)
+ return -ENOMEM;
+
+ /**
+ * Allocate 2 pages for data sharing between driver and gz hypervisor
+ *
+ * |- page 0 -|- page 1 -|
+ * |gzvm_vcpu_run|......|hwstate|.......|
+ *
+ */
+ vcpu->run = alloc_pages_exact(GZVM_VCPU_RUN_MAP_SIZE,
+ GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+ if (!vcpu->run) {
+ ret = -ENOMEM;
+ goto free_vcpu;
+ }
+ vcpu->vcpuid = cpuid;
+ vcpu->gzvm = gzvm;
+ mutex_init(&vcpu->lock);
+
+ ret = gzvm_arch_create_vcpu(gzvm->vm_id, vcpu->vcpuid, vcpu->run);
+ if (ret < 0)
+ goto free_vcpu_run;
+
+ ret = create_vcpu_fd(vcpu);
+ if (ret < 0)
+ goto free_vcpu_run;
+ gzvm->vcpus[cpuid] = vcpu;
+
+ return ret;
+
+free_vcpu_run:
+ free_pages_exact(vcpu->run, GZVM_VCPU_RUN_MAP_SIZE);
+free_vcpu:
+ kfree(vcpu);
+ return ret;
+}
@@ -9,6 +9,7 @@
#include <linux/kvm_host.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/gzvm_drv.h>
@@ -59,9 +60,10 @@ static int fill_constituents(struct mem_region_addr_range *consti,
int *consti_cnt, int max_nr_consti, gfn_t gfn,
u32 total_pages, struct gzvm_memslot *slot)
{
- int i, nr_pages;
hfn_t pfn, prev_pfn;
gfn_t gfn_end;
+ int nr_pages = 1;
+ int i = 0;
if (unlikely(total_pages == 0))
return -EINVAL;
@@ -74,8 +76,6 @@ static int fill_constituents(struct mem_region_addr_range *consti,
consti[0].pg_cnt = 1;
gfn++;
prev_pfn = pfn;
- i = 0;
- nr_pages = 1;
while (i < max_nr_consti && gfn < gfn_end) {
if (gzvm_gfn_to_pfn_memslot(slot, gfn, &pfn) != 0)
return -EFAULT;
@@ -92,10 +92,10 @@ static int fill_constituents(struct mem_region_addr_range *consti,
gfn++;
nr_pages++;
}
- if (i == max_nr_consti)
- *consti_cnt = i;
- else
- *consti_cnt = (i + 1);
+
+ if (i != max_nr_consti)
+ i++;
+ *consti_cnt = i;
return nr_pages;
}
@@ -105,9 +105,9 @@ static int
register_memslot_addr_range(struct gzvm *gzvm, struct gzvm_memslot *memslot)
{
struct gzvm_memory_region_ranges *region;
- u32 buf_size;
- int max_nr_consti, remain_pages;
gfn_t gfn, gfn_end;
+ int max_nr_consti, remain_pages;
+ u32 buf_size;
buf_size = PAGE_SIZE * 2;
region = alloc_pages_exact(buf_size, GFP_KERNEL);
@@ -206,6 +206,10 @@ static long gzvm_vm_ioctl(struct file *filp, unsigned int ioctl,
ret = gzvm_dev_ioctl_check_extension(gzvm, arg);
break;
}
+ case GZVM_CREATE_VCPU: {
+ ret = gzvm_vm_ioctl_create_vcpu(gzvm, arg);
+ break;
+ }
case GZVM_SET_USER_MEMORY_REGION: {
struct gzvm_userspace_memory_region userspace_mem;
@@ -242,6 +246,8 @@ static void gzvm_destroy_vm(struct gzvm *gzvm)
gzvm_arch_destroy_vm(gzvm->vm_id);
+ gzvm_destroy_vcpus(gzvm);
+
mutex_lock(&gzvm_list_lock);
list_del(&gzvm->vm_list);
mutex_unlock(&gzvm_list_lock);
@@ -275,8 +281,10 @@ static struct gzvm *gzvm_create_vm(unsigned long vm_type)
return ERR_PTR(-ENOMEM);
ret = gzvm_arch_create_vm();
- if (ret < 0)
- goto err;
+ if (ret < 0) {
+ kfree(gzvm);
+ return ERR_PTR(ret);
+ }
gzvm->vm_id = ret;
gzvm->mm = current->mm;
@@ -289,10 +297,6 @@ static struct gzvm *gzvm_create_vm(unsigned long vm_type)
pr_info("VM-%u is created\n", gzvm->vm_id);
return gzvm;
-
-err:
- kfree(gzvm);
- return ERR_PTR(ret);
}
/**
@@ -306,18 +310,14 @@ int gzvm_dev_ioctl_create_vm(unsigned long vm_type)
int ret;
gzvm = gzvm_create_vm(vm_type);
- if (IS_ERR(gzvm)) {
- ret = PTR_ERR(gzvm);
- goto error;
- }
+ if (IS_ERR(gzvm))
+ return PTR_ERR(gzvm);
ret = anon_inode_getfd("gzvm-vm", &gzvm_vm_fops, gzvm,
O_RDWR | O_CLOEXEC);
- if (ret < 0)
- goto error;
-
-error:
- return ret;
+ if (ret)
+ return ret;
+ return 0;
}
void destroy_all_vm(void)
@@ -26,18 +26,15 @@
#define ERR_NOT_IMPLEMENTED (-27)
#define ERR_FAULT (-40)
-static inline gzvm_id_t get_vmid_from_tuple(unsigned int tuple)
-{
- return (gzvm_id_t)(tuple >> 16);
-}
-
-/**
+/*
* The following data structures are for data transferring between driver and
* hypervisor, and they're aligned with hypervisor definitions
*/
#define GZVM_MAX_VCPUS 8
#define GZVM_MAX_MEM_REGION 10
+#define GZVM_VCPU_RUN_MAP_SIZE (PAGE_SIZE * 2)
+
/* struct mem_region_addr_range - Identical to ffa memory constituent */
struct mem_region_addr_range {
/* the base IPA of the constituent memory region, aligned to 4 kiB */
@@ -65,7 +62,16 @@ struct gzvm_memslot {
u32 slot_id;
};
+struct gzvm_vcpu {
+ struct gzvm *gzvm;
+ int vcpuid;
+ /* lock of vcpu*/
+ struct mutex lock;
+ struct gzvm_vcpu_run *run;
+};
+
struct gzvm {
+ struct gzvm_vcpu *vcpus[GZVM_MAX_VCPUS];
/* userspace tied to this vm */
struct mm_struct *mm;
struct gzvm_memslot memslot[GZVM_MAX_MEM_REGION];
@@ -82,6 +88,8 @@ int gz_err_to_errno(unsigned long err);
void destroy_all_vm(void);
+void gzvm_destroy_vcpus(struct gzvm *gzvm);
+
/* arch-dependant functions */
int gzvm_arch_probe(void);
int gzvm_arch_set_memregion(gzvm_id_t vm_id, size_t buf_size,
@@ -92,6 +100,12 @@ int gzvm_arch_destroy_vm(gzvm_id_t vm_id);
int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm,
struct gzvm_enable_cap *cap,
void __user *argp);
+int gzvm_vm_ioctl_create_vcpu(struct gzvm *gzvm, u32 cpuid);
+int gzvm_arch_vcpu_update_one_reg(struct gzvm_vcpu *vcpu, __u64 reg_id,
+ bool is_write, __u64 *data);
+int gzvm_arch_create_vcpu(gzvm_id_t vm_id, int vcpuid, void *run);
+int gzvm_arch_vcpu_run(struct gzvm_vcpu *vcpu, __u64 *exit_reason);
+int gzvm_arch_destroy_vcpu(gzvm_id_t vm_id, int vcpuid);
extern struct platform_device *gzvm_debug_dev;
@@ -3,6 +3,12 @@
* Copyright (c) 2023 MediaTek Inc.
*/
+/**
+ * DOC: UAPI of GenieZone Hypervisor
+ *
+ * This file declares common data structure shared among user space,
+ * kernel space, and GenieZone hypervisor.
+ */
#ifndef __GZVM_H__
#define __GZVM_H__
@@ -16,11 +22,6 @@
#endif
-/**
- * DOC: This file declares common data structure shared between userspace,
- * kernel space, and GZ.
- */
-
typedef __u16 gzvm_id_t;
typedef __u16 gzvm_vcpu_id_t;
@@ -44,26 +45,30 @@ struct gzvm_memory_region {
#define GZVM_SET_MEMORY_REGION _IOW(GZVM_IOC_MAGIC, 0x40, \
struct gzvm_memory_region)
-/*
- * GZVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns
- * a vcpu fd.
+/**
+ * for irqfd, GZVM_CREATE_VCPU receives as a parameter the vcpu slot,
+ * and returns a vcpu fd.
*/
#define GZVM_CREATE_VCPU _IO(GZVM_IOC_MAGIC, 0x41)
+#define GZVM_ENABLE_CAP _IOW(GZVM_IOC_MAGIC, 0xa3, \
+ struct gzvm_enable_cap)
+
/* for GZVM_SET_USER_MEMORY_REGION */
struct gzvm_userspace_memory_region {
__u32 slot;
__u32 flags;
__u64 guest_phys_addr;
- __u64 memory_size; /* bytes */
- __u64 userspace_addr; /* start of the userspace allocated memory */
+ /* bytes */
+ __u64 memory_size;
+ /* start of the userspace allocated memory */
+ __u64 userspace_addr;
};
#define GZVM_SET_USER_MEMORY_REGION _IOW(GZVM_IOC_MAGIC, 0x46, \
struct gzvm_userspace_memory_region)
-/* for GZVM_IRQ_LINE */
-/* GZVM_IRQ_LINE irq field index values */
+/* for GZVM_IRQ_LINE, irq field index values */
#define GZVM_IRQ_VCPU2_SHIFT 28
#define GZVM_IRQ_VCPU2_MASK 0xf
#define GZVM_IRQ_TYPE_SHIFT 24
@@ -85,15 +90,108 @@ struct gzvm_userspace_memory_region {
/* ioctls for vcpu fds */
#define GZVM_RUN _IO(GZVM_IOC_MAGIC, 0x80)
+/* VM exit reason */
+enum {
+ GZVM_EXIT_UNKNOWN = 0x92920000,
+ GZVM_EXIT_MMIO,
+ GZVM_EXIT_HYPERCALL,
+ GZVM_EXIT_IRQ,
+ GZVM_EXIT_EXCEPTION,
+ GZVM_EXIT_DEBUG,
+ GZVM_EXIT_FAIL_ENTRY,
+ GZVM_EXIT_INTERNAL_ERROR,
+ GZVM_EXIT_SYSTEM_EVENT,
+ GZVM_EXIT_SHUTDOWN,
+};
+
+/**
+ * struct gzvm_cpu_run: Same purpose as kvm_run, this struct is
+ * shared between userspace, kernel and
+ * GenieZone hypervisor
+ *
+ * Keep identical layout between the 3 modules
+ */
+struct gzvm_vcpu_run {
+ /* to userspace */
+ __u32 exit_reason;
+ __u8 immediate_exit;
+ __u8 padding1[3];
+ /* union structure of collection of guest exit reason */
+ union {
+ /* GZVM_EXIT_MMIO */
+ struct {
+ /* from FAR_EL2 */
+ __u64 phys_addr;
+ __u8 data[8];
+ /* from ESR_EL2 as */
+ __u64 size;
+ /* from ESR_EL2 */
+ __u32 reg_nr;
+ /* from ESR_EL2 */
+ __u8 is_write;
+ } mmio;
+ /* GZVM_EXIT_FAIL_ENTRY */
+ struct {
+ __u64 hardware_entry_failure_reason;
+ __u32 cpu;
+ } fail_entry;
+ /* GZVM_EXIT_EXCEPTION */
+ struct {
+ __u32 exception;
+ __u32 error_code;
+ } exception;
+ /* GZVM_EXIT_HYPERCALL */
+ struct {
+ __u64 args[8]; /* in-out */
+ } hypercall;
+ /* GZVM_EXIT_INTERNAL_ERROR */
+ struct {
+ __u32 suberror;
+ __u32 ndata;
+ __u64 data[16];
+ } internal;
+ /* GZVM_EXIT_SYSTEM_EVENT */
+ struct {
+#define GZVM_SYSTEM_EVENT_SHUTDOWN 1
+#define GZVM_SYSTEM_EVENT_RESET 2
+#define GZVM_SYSTEM_EVENT_CRASH 3
+#define GZVM_SYSTEM_EVENT_WAKEUP 4
+#define GZVM_SYSTEM_EVENT_SUSPEND 5
+#define GZVM_SYSTEM_EVENT_SEV_TERM 6
+#define GZVM_SYSTEM_EVENT_S2IDLE 7
+ __u32 type;
+ __u32 ndata;
+ __u64 data[16];
+ } system_event;
+ /* Fix the size of the union. */
+ char padding[256];
+ };
+};
+
/* for GZVM_ENABLE_CAP */
struct gzvm_enable_cap {
- /* in */
- __u64 cap;
- /* we have total 5 (8 - 3) registers can be used for additional args */
- __u64 args[5];
+ /* in */
+ __u64 cap;
+ /**
+ * we have total 5 (8 - 3) registers can be used for
+ * additional args
+ */
+ __u64 args[5];
};
#define GZVM_ENABLE_CAP _IOW(GZVM_IOC_MAGIC, 0xa3, \
struct gzvm_enable_cap)
+/* for GZVM_GET/SET_ONE_REG */
+struct gzvm_one_reg {
+ __u64 id;
+ __u64 addr;
+};
+
+#define GZVM_GET_ONE_REG _IOW(GZVM_IOC_MAGIC, 0xab, \
+ struct gzvm_one_reg)
+#define GZVM_SET_ONE_REG _IOW(GZVM_IOC_MAGIC, 0xac, \
+ struct gzvm_one_reg)
+
+#define GZVM_REG_GENERIC 0x0000000000000000ULL
#endif /* __GZVM_H__ */