@@ -10,6 +10,7 @@
#include <linux/types.h>
#define GUNYAH_VMID_INVAL U16_MAX
+#define GUNYAH_MEM_HANDLE_INVAL U32_MAX
struct gunyah_rm;
@@ -58,6 +59,12 @@ struct gunyah_rm_vm_status_payload {
__le16 app_status;
} __packed;
+/* RPC Calls */
+int gunyah_rm_mem_share(struct gunyah_rm *rm,
+ struct gunyah_rm_mem_parcel *parcel);
+int gunyah_rm_mem_reclaim(struct gunyah_rm *rm,
+ struct gunyah_rm_mem_parcel *parcel);
+
int gunyah_rm_alloc_vmid(struct gunyah_rm *rm, u16 vmid);
int gunyah_rm_dealloc_vmid(struct gunyah_rm *rm, u16 vmid);
int gunyah_rm_vm_reset(struct gunyah_rm *rm, u16 vmid);
@@ -99,6 +106,8 @@ struct gunyah_rm_hyp_resources {
int gunyah_rm_get_hyp_resources(struct gunyah_rm *rm, u16 vmid,
struct gunyah_rm_hyp_resources **resources);
+int gunyah_rm_get_vmid(struct gunyah_rm *rm, u16 *vmid);
+
struct gunyah_resource *
gunyah_rm_alloc_resource(struct gunyah_rm *rm,
struct gunyah_rm_hyp_resource *hyp_resource);
@@ -5,6 +5,12 @@
#include "rsc_mgr.h"
+/* Message IDs: Memory Management */
+#define GUNYAH_RM_RPC_MEM_LEND 0x51000012
+#define GUNYAH_RM_RPC_MEM_SHARE 0x51000013
+#define GUNYAH_RM_RPC_MEM_RECLAIM 0x51000015
+#define GUNYAH_RM_RPC_MEM_APPEND 0x51000018
+
/* Message IDs: VM Management */
/* clang-format off */
#define GUNYAH_RM_RPC_VM_ALLOC_VMID 0x56000001
@@ -15,6 +21,7 @@
#define GUNYAH_RM_RPC_VM_CONFIG_IMAGE 0x56000009
#define GUNYAH_RM_RPC_VM_INIT 0x5600000B
#define GUNYAH_RM_RPC_VM_GET_HYP_RESOURCES 0x56000020
+#define GUNYAH_RM_RPC_VM_GET_VMID 0x56000024
/* clang-format on */
struct gunyah_rm_vm_common_vmid_req {
@@ -22,6 +29,48 @@ struct gunyah_rm_vm_common_vmid_req {
__le16 _padding;
} __packed;
+/* Call: MEM_LEND, MEM_SHARE */
+#define GUNYAH_RM_MAX_MEM_ENTRIES 512
+
+#define GUNYAH_MEM_SHARE_REQ_FLAGS_APPEND BIT(1)
+
+struct gunyah_rm_mem_share_req_header {
+ u8 mem_type;
+ u8 _padding0;
+ u8 flags;
+ u8 _padding1;
+ __le32 label;
+} __packed;
+
+struct gunyah_rm_mem_share_req_acl_section {
+ __le32 n_entries;
+ struct gunyah_rm_mem_acl_entry entries[];
+} __packed;
+
+struct gunyah_rm_mem_share_req_mem_section {
+ __le16 n_entries;
+ __le16 _padding;
+ struct gunyah_rm_mem_entry entries[];
+} __packed;
+
+/* Call: MEM_RELEASE */
+struct gunyah_rm_mem_release_req {
+ __le32 mem_handle;
+ u8 flags; /* currently not used */
+ u8 _padding0;
+ __le16 _padding1;
+} __packed;
+
+/* Call: MEM_APPEND */
+#define GUNYAH_MEM_APPEND_REQ_FLAGS_END BIT(0)
+
+struct gunyah_rm_mem_append_req_header {
+ __le32 mem_handle;
+ u8 flags;
+ u8 _padding0;
+ __le16 _padding1;
+} __packed;
+
/* Call: VM_ALLOC */
struct gunyah_rm_vm_alloc_vmid_resp {
__le16 vmid;
@@ -66,6 +115,159 @@ static int gunyah_rm_common_vmid_call(struct gunyah_rm *rm, u32 message_id,
NULL, NULL);
}
+static int gunyah_rm_mem_append(struct gunyah_rm *rm, u32 mem_handle,
+ struct gunyah_rm_mem_entry *entries,
+ size_t n_entries)
+{
+ struct gunyah_rm_mem_append_req_header *req __free(kfree) = NULL;
+ struct gunyah_rm_mem_share_req_mem_section *mem;
+ int ret = 0;
+ size_t n;
+
+ req = kzalloc(sizeof(*req) + struct_size(mem, entries, GUNYAH_RM_MAX_MEM_ENTRIES),
+ GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->mem_handle = cpu_to_le32(mem_handle);
+ mem = (void *)(req + 1);
+
+ while (n_entries) {
+ req->flags = 0;
+ if (n_entries > GUNYAH_RM_MAX_MEM_ENTRIES) {
+ n = GUNYAH_RM_MAX_MEM_ENTRIES;
+ } else {
+ req->flags |= GUNYAH_MEM_APPEND_REQ_FLAGS_END;
+ n = n_entries;
+ }
+
+ mem->n_entries = cpu_to_le16(n);
+ memcpy(mem->entries, entries, sizeof(*entries) * n);
+
+ ret = gunyah_rm_call(rm, GUNYAH_RM_RPC_MEM_APPEND, req,
+ sizeof(*req) + struct_size(mem, entries, n),
+ NULL, NULL);
+ if (ret)
+ break;
+
+ entries += n;
+ n_entries -= n;
+ }
+
+ return ret;
+}
+
+/**
+ * gunyah_rm_mem_share() - Share memory with other virtual machines.
+ * @rm: Handle to a Gunyah resource manager
+ * @p: Information about the memory to be shared.
+ *
+ * Sharing keeps Linux's access to the memory while the memory parcel is shared.
+ */
+int gunyah_rm_mem_share(struct gunyah_rm *rm, struct gunyah_rm_mem_parcel *p)
+{
+ u32 message_id = p->n_acl_entries == 1 ? GUNYAH_RM_RPC_MEM_LEND :
+ GUNYAH_RM_RPC_MEM_SHARE;
+ size_t msg_size, initial_mem_entries = p->n_mem_entries, resp_size;
+ struct gunyah_rm_mem_share_req_acl_section *acl;
+ struct gunyah_rm_mem_share_req_mem_section *mem;
+ struct gunyah_rm_mem_share_req_header *req_header;
+ size_t acl_size, mem_size;
+ u32 *attr_section;
+ bool need_append = false;
+ __le32 *resp;
+ void *msg;
+ int ret;
+
+ if (!p->acl_entries || !p->n_acl_entries || !p->mem_entries ||
+ !p->n_mem_entries || p->n_acl_entries > U8_MAX ||
+ p->mem_handle != GUNYAH_MEM_HANDLE_INVAL)
+ return -EINVAL;
+
+ if (initial_mem_entries > GUNYAH_RM_MAX_MEM_ENTRIES) {
+ initial_mem_entries = GUNYAH_RM_MAX_MEM_ENTRIES;
+ need_append = true;
+ }
+
+ acl_size = struct_size(acl, entries, p->n_acl_entries);
+ mem_size = struct_size(mem, entries, initial_mem_entries);
+
+ /* The format of the message goes:
+ * request header
+ * ACL entries (which VMs get what kind of access to this memory parcel)
+ * Memory entries (list of memory regions to share)
+ * Memory attributes (currently unused, we'll hard-code the size to 0)
+ */
+ msg_size = sizeof(struct gunyah_rm_mem_share_req_header) + acl_size +
+ mem_size +
+ sizeof(u32); /* for memory attributes, currently unused */
+
+ msg = kzalloc(msg_size, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ req_header = msg;
+ acl = (void *)req_header + sizeof(*req_header);
+ mem = (void *)acl + acl_size;
+ attr_section = (void *)mem + mem_size;
+
+ req_header->mem_type = p->mem_type;
+ if (need_append)
+ req_header->flags |= GUNYAH_MEM_SHARE_REQ_FLAGS_APPEND;
+ req_header->label = cpu_to_le32(p->label);
+
+ acl->n_entries = cpu_to_le32(p->n_acl_entries);
+ memcpy(acl->entries, p->acl_entries,
+ flex_array_size(acl, entries, p->n_acl_entries));
+
+ mem->n_entries = cpu_to_le16(initial_mem_entries);
+ memcpy(mem->entries, p->mem_entries,
+ flex_array_size(mem, entries, initial_mem_entries));
+
+ /* Set n_entries for memory attribute section to 0 */
+ *attr_section = 0;
+
+ ret = gunyah_rm_call(rm, message_id, msg, msg_size, (void **)&resp,
+ &resp_size);
+ kfree(msg);
+
+ if (ret)
+ return ret;
+
+ p->mem_handle = le32_to_cpu(*resp);
+ kfree(resp);
+
+ if (need_append) {
+ ret = gunyah_rm_mem_append(
+ rm, p->mem_handle, &p->mem_entries[initial_mem_entries],
+ p->n_mem_entries - initial_mem_entries);
+ if (ret) {
+ gunyah_rm_mem_reclaim(rm, p);
+ p->mem_handle = GUNYAH_MEM_HANDLE_INVAL;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * gunyah_rm_mem_reclaim() - Reclaim a memory parcel
+ * @rm: Handle to a Gunyah resource manager
+ * @parcel: Information about the memory to be reclaimed.
+ *
+ * RM maps the associated memory back into the stage-2 page tables of the owner VM.
+ */
+int gunyah_rm_mem_reclaim(struct gunyah_rm *rm,
+ struct gunyah_rm_mem_parcel *parcel)
+{
+ struct gunyah_rm_mem_release_req req = {
+ .mem_handle = cpu_to_le32(parcel->mem_handle),
+ };
+
+ return gunyah_rm_call(rm, GUNYAH_RM_RPC_MEM_RECLAIM, &req, sizeof(req),
+ NULL, NULL);
+}
+
/**
* gunyah_rm_alloc_vmid() - Allocate a new VM in Gunyah. Returns the VM identifier.
* @rm: Handle to a Gunyah resource manager
@@ -236,3 +438,32 @@ int gunyah_rm_get_hyp_resources(struct gunyah_rm *rm, u16 vmid,
*resources = resp;
return 0;
}
+
+/**
+ * gunyah_rm_get_vmid() - Retrieve VMID of this virtual machine
+ * @rm: Handle to a Gunyah resource manager
+ * @vmid: Filled with the VMID of this VM
+ */
+int gunyah_rm_get_vmid(struct gunyah_rm *rm, u16 *vmid)
+{
+ static u16 cached_vmid = GUNYAH_VMID_INVAL;
+ size_t resp_size;
+ __le32 *resp;
+ int ret;
+
+ if (cached_vmid != GUNYAH_VMID_INVAL) {
+ *vmid = cached_vmid;
+ return 0;
+ }
+
+ ret = gunyah_rm_call(rm, GUNYAH_RM_RPC_VM_GET_VMID, NULL, 0,
+ (void **)&resp, &resp_size);
+ if (ret)
+ return ret;
+
+ *vmid = cached_vmid = lower_16_bits(le32_to_cpu(*resp));
+ kfree(resp);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gunyah_rm_get_vmid);
@@ -156,6 +156,49 @@ int gunyah_vm_add_resource_ticket(struct gunyah_vm *ghvm,
void gunyah_vm_remove_resource_ticket(struct gunyah_vm *ghvm,
struct gunyah_vm_resource_ticket *ticket);
+#define GUNYAH_RM_ACL_X BIT(0)
+#define GUNYAH_RM_ACL_W BIT(1)
+#define GUNYAH_RM_ACL_R BIT(2)
+
+struct gunyah_rm_mem_acl_entry {
+ __le16 vmid;
+ u8 perms;
+ u8 reserved;
+} __packed;
+
+struct gunyah_rm_mem_entry {
+ __le64 phys_addr;
+ __le64 size;
+} __packed;
+
+enum gunyah_rm_mem_type {
+ GUNYAH_RM_MEM_TYPE_NORMAL = 0,
+ GUNYAH_RM_MEM_TYPE_IO = 1,
+};
+
+/*
+ * struct gunyah_rm_mem_parcel - Info about memory to be lent/shared/donated/reclaimed
+ * @mem_type: The type of memory: normal (DDR) or IO
+ * @label: An client-specified identifier which can be used by the other VMs to identify the purpose
+ * of the memory parcel.
+ * @n_acl_entries: Count of the number of entries in the @acl_entries array.
+ * @acl_entries: An array of access control entries. Each entry specifies a VM and what access
+ * is allowed for the memory parcel.
+ * @n_mem_entries: Count of the number of entries in the @mem_entries array.
+ * @mem_entries: An array of regions to be associated with the memory parcel. Addresses should be
+ * (intermediate) physical addresses from Linux's perspective.
+ * @mem_handle: On success, filled with memory handle that RM allocates for this memory parcel
+ */
+struct gunyah_rm_mem_parcel {
+ enum gunyah_rm_mem_type mem_type;
+ u32 label;
+ size_t n_acl_entries;
+ struct gunyah_rm_mem_acl_entry *acl_entries;
+ size_t n_mem_entries;
+ struct gunyah_rm_mem_entry *mem_entries;
+ u32 mem_handle;
+};
+
/******************************************************************************/
/* Common arch-independent definitions for Gunyah hypercalls */
#define GUNYAH_CAPID_INVAL U64_MAX
In a Gunyah hypervisor system using the Gunyah Resource Manager, the "standard" unit of donating, lending and sharing memory is called a memory parcel (memparcel). A memparcel is an abstraction used by the resource manager for securely managing donating, lending and sharing memory, which may be physically and virtually fragmented, without dealing directly with physical memory addresses. Memparcels are created and managed through the RM RPC functions for lending, sharing and reclaiming memory from VMs. When creating a new VM the initial VM memory containing the VM image and the VM's device tree blob must be provided as a memparcel. The memparcel must be created using the RM RPC for lending and mapping the memory to the VM. Signed-off-by: Elliot Berman <quic_eberman@quicinc.com> --- drivers/virt/gunyah/rsc_mgr.h | 9 ++ drivers/virt/gunyah/rsc_mgr_rpc.c | 231 ++++++++++++++++++++++++++++++++++++++ include/linux/gunyah.h | 43 +++++++ 3 files changed, 283 insertions(+)