@@ -22,6 +22,23 @@
#include <virglrenderer.h>
+struct virtio_gpu_virgl_resource {
+ struct virtio_gpu_simple_resource base;
+};
+
+static struct virtio_gpu_virgl_resource *
+virtio_gpu_virgl_find_resource(VirtIOGPU *g, uint32_t resource_id)
+{
+ struct virtio_gpu_simple_resource *res;
+
+ res = virtio_gpu_find_resource(g, resource_id);
+ if (!res) {
+ return NULL;
+ }
+
+ return container_of(res, struct virtio_gpu_virgl_resource, base);
+}
+
#if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
static void *
virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
@@ -35,11 +52,34 @@ static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
{
struct virtio_gpu_resource_create_2d c2d;
struct virgl_renderer_resource_create_args args;
+ struct virtio_gpu_virgl_resource *res;
VIRTIO_GPU_FILL_CMD(c2d);
trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
c2d.width, c2d.height);
+ if (c2d.resource_id == 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
+ __func__);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ res = virtio_gpu_virgl_find_resource(g, c2d.resource_id);
+ if (res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
+ __func__, c2d.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ res = g_new0(struct virtio_gpu_virgl_resource, 1);
+ res->base.width = c2d.width;
+ res->base.height = c2d.height;
+ res->base.format = c2d.format;
+ res->base.resource_id = c2d.resource_id;
+ QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
+
args.handle = c2d.resource_id;
args.target = 2;
args.format = c2d.format;
@@ -59,11 +99,34 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
{
struct virtio_gpu_resource_create_3d c3d;
struct virgl_renderer_resource_create_args args;
+ struct virtio_gpu_virgl_resource *res;
VIRTIO_GPU_FILL_CMD(c3d);
trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format,
c3d.width, c3d.height, c3d.depth);
+ if (c3d.resource_id == 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
+ __func__);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ res = virtio_gpu_virgl_find_resource(g, c3d.resource_id);
+ if (res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
+ __func__, c3d.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ res = g_new0(struct virtio_gpu_virgl_resource, 1);
+ res->base.width = c3d.width;
+ res->base.height = c3d.height;
+ res->base.format = c3d.format;
+ res->base.resource_id = c3d.resource_id;
+ QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
+
args.handle = c3d.resource_id;
args.target = c3d.target;
args.format = c3d.format;
@@ -82,12 +145,21 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
struct virtio_gpu_resource_unref unref;
+ struct virtio_gpu_virgl_resource *res;
struct iovec *res_iovs = NULL;
int num_iovs = 0;
VIRTIO_GPU_FILL_CMD(unref);
trace_virtio_gpu_cmd_res_unref(unref.resource_id);
+ res = virtio_gpu_virgl_find_resource(g, unref.resource_id);
+ if (!res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
+ __func__, unref.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
virgl_renderer_resource_detach_iov(unref.resource_id,
&res_iovs,
&num_iovs);
@@ -95,6 +167,10 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g,
virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
}
virgl_renderer_resource_unref(unref.resource_id);
+
+ QTAILQ_REMOVE(&g->reslist, &res->base, next);
+
+ g_free(res);
}
static void virgl_cmd_context_create(VirtIOGPU *g,