[virtio-magma] make virtio-magma its own device

This change moves virtio-magma into its own device, instead of a sub-
device of virtio-wayland. It also adds support for the full magma API
sufficient to support stock Vulkan. It also extends virtio-wayland to
offer file descriptors for existing vfds that virtio-magma can use to
wrap exported buffers. This allows for zero-copy swap chain rendering
e.g. using the VK_KHR_wayland_surface vulkan extension.

Test: ran virtmagma_unit_tests in biscotti_guest
      ran virtmagma_vulkan_unit_tests in biscotti_guest
      ran wayland_swapchain_test in biscotti_guest
Change-Id: I1933e45d486d4d74c512620492d37ecdac7e4049
diff --git a/arch/x86/configs/biscotti_defconfig b/arch/x86/configs/biscotti_defconfig
index 8e2c1fe..dbf3c09 100644
--- a/arch/x86/configs/biscotti_defconfig
+++ b/arch/x86/configs/biscotti_defconfig
@@ -334,6 +334,7 @@
 CONFIG_VIRTIO_MMIO=y
 CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
 CONFIG_VIRTIO_WL=y
+CONFIG_VIRTIO_MAGMA=y
 CONFIG_STAGING=y
 CONFIG_POWERCAP=y
 CONFIG_INTEL_RAPL=y
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index a9c3cbe..e71de1f 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -91,4 +91,12 @@
 
 	 If unsure, say 'N'.
 
+config VIRTIO_MAGMA
+	bool "Virtio Magma driver"
+	depends on VIRTIO
+	---help---
+	 This driver supports proxying of a magma device from host to guest.
+
+	 If unsure, say 'N'.
+
 endif # VIRTIO_MENU
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index a74ec94..a0f07695 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -6,4 +6,5 @@
 virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o
 obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
 obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o
-obj-$(CONFIG_VIRTIO_WL) += virtio_wl.o virtio_magma.o
+obj-$(CONFIG_VIRTIO_WL) += virtio_wl.o
+obj-$(CONFIG_VIRTIO_MAGMA) += virtio_magma.o
diff --git a/drivers/virtio/virtio_magma.c b/drivers/virtio/virtio_magma.c
index 98ba709..cd6da57 100644
--- a/drivers/virtio/virtio_magma.c
+++ b/drivers/virtio/virtio_magma.c
@@ -2,101 +2,1295 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include <linux/anon_inodes.h>
+#include <linux/cdev.h>
+#include <linux/compat.h>
+#include <linux/hashtable.h>
+#include <linux/module.h>
 #include <linux/uaccess.h>
+#include <linux/virtio.h>
 #include <linux/virtio_magma.h>
 #include <linux/vmalloc.h>
 
-#include "virtio_magma.h"
-
 #define VQ_DESCRIPTOR_SIZE PAGE_SIZE
-#define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
+#define CONNECTIONS_HASHTABLE_BITS 4
+#define CONNECTION_OBJECTS_HASHTABLE_BITS 12
+#define NOTIFICATION_MAX_BYTES 65536
+#define COMMAND_OK(command, request_ptr, response_ptr)                         \
+	((command)->request_size >= sizeof(*(request_ptr)) &&                  \
+	 (command)->response_size >= sizeof(*(response_ptr)))
+#define WAYLAND_DEVICE_PATH "/dev/wl0"
+#define MESSAGE_CACHE_OBJECT_SIZE 64
 
-struct virtmagma_info_private
-{
-	struct mutex vq_locks[VIRTMAGMA_QUEUE_COUNT];
-	struct virtqueue *vqs[VIRTMAGMA_QUEUE_COUNT];
-	struct work_struct in_vq_work;
+struct virtmagma_info {
+	dev_t dev_num;
+	struct device *dev;
+	struct class *class;
+	struct cdev cdev;
+
+	struct mutex vq_out_lock;
+	struct virtqueue *vq_out;
 	struct work_struct out_vq_work;
 	wait_queue_head_t out_waitq;
+
+	struct mutex instances_lock;
+	struct idr instances;
 };
 
-static int vq_return_inbuf_locked(struct virtqueue *vq, void *buffer)
+enum virtmagma_connection_object_type {
+	MAGMA_BUFFER,
+	MAGMA_SEMAPHORE,
+	MAGMA_CONTEXT
+};
+
+static const char *virtmagma_connection_object_type_string(
+	enum virtmagma_connection_object_type type)
+{
+	switch (type) {
+	case MAGMA_BUFFER:
+		return "MAGMA_BUFFER";
+	case MAGMA_SEMAPHORE:
+		return "MAGMA_SEMAPHORE";
+	case MAGMA_CONTEXT:
+		return "MAGMA_CONTEXT";
+	default:
+		return "[UNKNOWN]";
+	}
+}
+
+struct virtmagma_buffer {
+	size_t size_requested;
+	size_t size_allocated;
+	bool is_command_buffer;
+};
+
+struct virtmagma_semaphore {
+	uint8_t dummy;
+};
+
+struct virtmagma_context {
+	uint8_t dummy;
+};
+
+struct virtmagma_connection;
+struct virtmagma_connection_object {
+	struct virtmagma_connection *parent_connection;
+	enum virtmagma_connection_object_type type;
+	uint64_t host_value;
+	union {
+		struct virtmagma_buffer buffer;
+		struct virtmagma_semaphore semaphore;
+		struct virtmagma_context context;
+	};
+	struct hlist_node node;
+};
+
+struct virtmagma_connection {
+	struct virtmagma_instance *parent_instance;
+	uint64_t host_value;
+	DECLARE_HASHTABLE(objects, CONNECTION_OBJECTS_HASHTABLE_BITS);
+	struct hlist_node node;
+};
+
+struct virtmagma_instance {
+	struct virtmagma_info *vi;
+	int id;
+	DECLARE_HASHTABLE(connections, CONNECTIONS_HASHTABLE_BITS);
+	int mmfd;
+	struct {
+		struct virtmagma_buffer *buffer;
+		u64 phys_addr;
+		bool pending;
+	} mmap_params;
+	struct {
+		pid_t pid;
+		pid_t tgid;
+		char comm[TASK_COMM_LEN];
+	} creator;
+	struct kmem_cache *msg_cache;
+	void *wayland_device_private_data;
+};
+
+struct virtmagma_virtio_command {
+	void *request_ptr;
+	size_t request_size;
+	void *response_ptr;
+	size_t response_size;
+};
+
+static void virtmagma_cache_ctor(void *p)
+{
+	memset(p, 0, MESSAGE_CACHE_OBJECT_SIZE);
+}
+
+static int vq_out_send_sync(struct virtmagma_info *vi,
+			    struct virtmagma_virtio_command *command)
 {
 	int ret;
-	struct scatterlist sg[1];
+	DECLARE_COMPLETION_ONSTACK(finish_completion);
+	struct scatterlist sg_out;
+	struct scatterlist sg_in;
+	struct scatterlist *sgs[] = { &sg_out, &sg_in };
+	init_completion(&finish_completion);
+	sg_init_one(&sg_out, command->request_ptr, command->request_size);
+	sg_init_one(&sg_in, command->response_ptr, command->response_size);
 
-	sg_init_one(sg, buffer, VQ_DESCRIPTOR_SIZE);
+	mutex_lock(&vi->vq_out_lock);
+	while ((ret = virtqueue_add_sgs(vi->vq_out, sgs, 1, 1,
+					&finish_completion, GFP_KERNEL)) ==
+	       -ENOSPC) {
+		mutex_unlock(&vi->vq_out_lock);
+		if (!wait_event_timeout(vi->out_waitq, vi->vq_out->num_free > 0,
+					HZ))
+			return -EBUSY;
+		mutex_lock(&vi->vq_out_lock);
+	}
+	if (!ret)
+		virtqueue_kick(vi->vq_out);
+	mutex_unlock(&vi->vq_out_lock);
 
-	ret = virtqueue_add_inbuf(vq, sg, 1, buffer, GFP_KERNEL);
-	if (ret) {
-		pr_warn("virtmagma: failed to give inbuf to host: %d\n", ret);
+	wait_for_completion(&finish_completion);
+
+	return ret;
+}
+
+/* Verify that a virtio command's response matches its expected response.
+   Note that a match indicates only that the proxying of the magma command
+   has succeeded, not necessarily that the magma command itself did. */
+static int virtmagma_check_expected_response_type(void *request, void *response)
+{
+	struct virtio_magma_ctrl_hdr *request_hdr = request;
+	struct virtio_magma_ctrl_hdr *response_hdr = response;
+	if (virtio_magma_expected_response_type(request_hdr->type) !=
+	    response_hdr->type) {
+		pr_warn("virtmagma: unexpected virtio response %s (%d) to request %s (%d)",
+			virtio_magma_ctrl_type_string(response_hdr->type),
+			response_hdr->type,
+			virtio_magma_ctrl_type_string(request_hdr->type),
+			request_hdr->type);
+		return -EIO;
+	}
+	return 0;
+}
+
+static struct virtmagma_connection *
+	get_connection(struct virtmagma_instance *instance, uint64_t id)
+{
+	struct virtmagma_connection *connection = NULL;
+	hash_for_each_possible (instance->connections, connection, node, id) {
+		if (connection->host_value == id)
+			break;
+	}
+	if (!connection) {
+		pr_warn("virtmagma: invalid connection id %lld", id);
+	}
+	return connection;
+}
+
+static struct virtmagma_connection_object *
+	get_connection_object(struct virtmagma_connection *connection,
+			      uint64_t id,
+			      enum virtmagma_connection_object_type type)
+{
+	struct virtmagma_connection_object *object = NULL;
+	hash_for_each_possible (connection->objects, object, node, id) {
+		if (object->type == type && object->host_value == id)
+			break;
+	}
+	if (!object) {
+		pr_warn("virtmagma: invalid %s object id %lld",
+			virtmagma_connection_object_type_string(type), id);
+	}
+	return object;
+}
+
+static int control_type(void *p)
+{
+	return ((struct virtio_magma_ctrl_hdr *)p)->type;
+}
+
+static int release_buffer(struct virtmagma_buffer *buffer)
+{
+	int ret;
+	struct virtio_magma_release_buffer_ctrl *request;
+	struct virtio_magma_release_buffer_resp *response;
+	struct virtmagma_virtio_command command;
+	struct virtmagma_connection_object *object = container_of(
+		buffer, struct virtmagma_connection_object, buffer);
+	BUILD_BUG_ON(sizeof(*request) > MESSAGE_CACHE_OBJECT_SIZE);
+	BUILD_BUG_ON(sizeof(*response) > MESSAGE_CACHE_OBJECT_SIZE);
+
+	request = kmem_cache_alloc(
+		object->parent_connection->parent_instance->msg_cache,
+		GFP_KERNEL);
+	if (!request)
+		return -ENOMEM;
+
+	response = kmem_cache_alloc(
+		object->parent_connection->parent_instance->msg_cache,
+		GFP_KERNEL);
+	if (!response) {
+		ret = -ENOMEM;
+		goto free_request;
+	}
+
+	request->hdr.type = VIRTIO_MAGMA_CMD_RELEASE_BUFFER;
+	request->connection = object->parent_connection->host_value;
+	request->buffer = object->host_value;
+
+	command.request_ptr = request;
+	command.request_size = sizeof(*request);
+	command.response_ptr = response;
+	command.response_size = sizeof(*response);
+
+	ret = vq_out_send_sync(object->parent_connection->parent_instance->vi,
+			       &command);
+	if (ret)
+		goto free_response;
+
+	if (response->hdr.type != VIRTIO_MAGMA_RESP_RELEASE_BUFFER)
+		ret = -EIO;
+
+free_response:
+	kmem_cache_free(object->parent_connection->parent_instance->msg_cache,
+			response);
+
+free_request:
+	kmem_cache_free(object->parent_connection->parent_instance->msg_cache,
+			request);
+
+	return ret;
+}
+
+static int release_command_buffer(struct virtmagma_buffer *buffer)
+{
+	int ret;
+	struct virtio_magma_release_command_buffer_ctrl *request;
+	struct virtio_magma_release_command_buffer_resp *response;
+	struct virtmagma_virtio_command command;
+	struct virtmagma_connection_object *object = container_of(
+		buffer, struct virtmagma_connection_object, buffer);
+	BUILD_BUG_ON(sizeof(*request) > MESSAGE_CACHE_OBJECT_SIZE);
+	BUILD_BUG_ON(sizeof(*response) > MESSAGE_CACHE_OBJECT_SIZE);
+
+	request = kmem_cache_alloc(
+		object->parent_connection->parent_instance->msg_cache,
+		GFP_KERNEL);
+	if (!request)
+		return -ENOMEM;
+
+	response = kmem_cache_alloc(
+		object->parent_connection->parent_instance->msg_cache,
+		GFP_KERNEL);
+	if (!response) {
+		ret = -ENOMEM;
+		goto free_request;
+	}
+
+	request->hdr.type = VIRTIO_MAGMA_CMD_RELEASE_COMMAND_BUFFER;
+	request->connection = object->parent_connection->host_value;
+	request->command_buffer = object->host_value;
+
+	command.request_ptr = request;
+	command.request_size = sizeof(*request);
+	command.response_ptr = response;
+	command.response_size = sizeof(*response);
+
+	ret = vq_out_send_sync(object->parent_connection->parent_instance->vi,
+			       &command);
+	if (ret)
+		goto free_response;
+
+	if (response->hdr.type != VIRTIO_MAGMA_RESP_RELEASE_COMMAND_BUFFER)
+		ret = -EIO;
+
+free_response:
+	kmem_cache_free(object->parent_connection->parent_instance->msg_cache,
+			response);
+
+free_request:
+	kmem_cache_free(object->parent_connection->parent_instance->msg_cache,
+			request);
+
+	return ret;
+}
+
+static int release_semaphore(struct virtmagma_semaphore *semaphore)
+{
+	int ret;
+	struct virtio_magma_release_semaphore_ctrl *request;
+	struct virtio_magma_release_semaphore_resp *response;
+	struct virtmagma_virtio_command command;
+	struct virtmagma_connection_object *object = container_of(
+		semaphore, struct virtmagma_connection_object, semaphore);
+	BUILD_BUG_ON(sizeof(*request) > MESSAGE_CACHE_OBJECT_SIZE);
+	BUILD_BUG_ON(sizeof(*response) > MESSAGE_CACHE_OBJECT_SIZE);
+
+	request = kmem_cache_alloc(
+		object->parent_connection->parent_instance->msg_cache,
+		GFP_KERNEL);
+	if (!request)
+		return -ENOMEM;
+
+	response = kmem_cache_alloc(
+		object->parent_connection->parent_instance->msg_cache,
+		GFP_KERNEL);
+	if (!response) {
+		ret = -ENOMEM;
+		goto free_request;
+	}
+
+	request->hdr.type = VIRTIO_MAGMA_CMD_RELEASE_SEMAPHORE;
+	request->connection = object->parent_connection->host_value;
+	request->semaphore = object->host_value;
+
+	command.request_ptr = request;
+	command.request_size = sizeof(*request);
+	command.response_ptr = response;
+	command.response_size = sizeof(*response);
+
+	ret = vq_out_send_sync(object->parent_connection->parent_instance->vi,
+			       &command);
+	if (ret)
+		goto free_response;
+
+	if (response->hdr.type != VIRTIO_MAGMA_RESP_RELEASE_SEMAPHORE)
+		ret = -EIO;
+
+free_response:
+	kmem_cache_free(object->parent_connection->parent_instance->msg_cache,
+			response);
+
+free_request:
+	kmem_cache_free(object->parent_connection->parent_instance->msg_cache,
+			request);
+
+	return ret;
+}
+
+static int release_context(struct virtmagma_context *context)
+{
+	int ret;
+	struct virtio_magma_release_context_ctrl *request;
+	struct virtio_magma_release_context_resp *response;
+	struct virtmagma_virtio_command command;
+	struct virtmagma_connection_object *object = container_of(
+		context, struct virtmagma_connection_object, context);
+	BUILD_BUG_ON(sizeof(*request) > MESSAGE_CACHE_OBJECT_SIZE);
+	BUILD_BUG_ON(sizeof(*response) > MESSAGE_CACHE_OBJECT_SIZE);
+
+	request = kmem_cache_alloc(
+		object->parent_connection->parent_instance->msg_cache,
+		GFP_KERNEL);
+	if (!request)
+		return -ENOMEM;
+
+	response = kmem_cache_alloc(
+		object->parent_connection->parent_instance->msg_cache,
+		GFP_KERNEL);
+	if (!response) {
+		ret = -ENOMEM;
+		goto free_request;
+	}
+
+	request->hdr.type = VIRTIO_MAGMA_CMD_RELEASE_CONTEXT;
+	request->connection = object->parent_connection->host_value;
+	request->context_id = object->host_value;
+
+	command.request_ptr = request;
+	command.request_size = sizeof(*request);
+	command.response_ptr = response;
+	command.response_size = sizeof(*response);
+
+	ret = vq_out_send_sync(object->parent_connection->parent_instance->vi,
+			       &command);
+	if (ret)
+		goto free_response;
+
+	if (response->hdr.type != VIRTIO_MAGMA_RESP_RELEASE_CONTEXT)
+		ret = -EIO;
+
+free_response:
+	kmem_cache_free(object->parent_connection->parent_instance->msg_cache,
+			response);
+
+free_request:
+	kmem_cache_free(object->parent_connection->parent_instance->msg_cache,
+			request);
+
+	return ret;
+}
+
+static int release_connection(struct virtmagma_connection *connection)
+{
+	int ret;
+	int bkt;
+	struct virtmagma_connection_object *object;
+	struct virtio_magma_release_connection_ctrl *request;
+	struct virtio_magma_release_connection_resp *response;
+	struct virtmagma_virtio_command command;
+	uint64_t leaked_buffers = 0;
+	uint64_t leaked_command_buffers = 0;
+	uint64_t leaked_semaphores = 0;
+	uint64_t leaked_contexts = 0;
+
+	/* first, release any child objects */
+
+	hash_for_each (connection->objects, bkt, object, node) {
+		switch (object->type) {
+		case MAGMA_BUFFER:
+			if (object->buffer.is_command_buffer) {
+				release_command_buffer(&object->buffer);
+				++leaked_command_buffers;
+			} else {
+				release_buffer(&object->buffer);
+				++leaked_buffers;
+			}
+			break;
+		case MAGMA_CONTEXT:
+			release_context(&object->context);
+			++leaked_contexts;
+			break;
+		case MAGMA_SEMAPHORE:
+			release_semaphore(&object->semaphore);
+			++leaked_semaphores;
+			break;
+		default:
+			pr_err("virtmagma: unknown connection object (%d)",
+			       object->type);
+			break;
+		}
+	}
+	if (leaked_buffers || leaked_command_buffers || leaked_semaphores ||
+	    leaked_contexts) {
+		pr_info("virtmagma: connection %lld from command %s closed with leaked objects:\n",
+			connection->host_value,
+			connection->parent_instance->creator.comm);
+		pr_cont("virtmagma: buffers: %lld\n", leaked_buffers);
+		pr_cont("virtmagma: command buffers: %lld\n",
+			leaked_command_buffers);
+		pr_cont("virtmagma: semaphores: %lld\n", leaked_semaphores);
+		pr_cont("virtmagma: contexts: %lld\n", leaked_contexts);
+	}
+
+	/* now release the connection */
+
+	BUILD_BUG_ON(sizeof(*request) > MESSAGE_CACHE_OBJECT_SIZE);
+	BUILD_BUG_ON(sizeof(*response) > MESSAGE_CACHE_OBJECT_SIZE);
+
+	request = kmem_cache_alloc(connection->parent_instance->msg_cache,
+				   GFP_KERNEL);
+	if (!request)
+		return -ENOMEM;
+
+	response = kmem_cache_alloc(connection->parent_instance->msg_cache,
+				    GFP_KERNEL);
+	if (!response) {
+		ret = -ENOMEM;
+		goto free_request;
+	}
+
+	request->hdr.type = VIRTIO_MAGMA_CMD_RELEASE_CONNECTION;
+	request->connection = connection->host_value;
+
+	command.request_ptr = request;
+	command.request_size = sizeof(*request);
+	command.response_ptr = response;
+	command.response_size = sizeof(*response);
+
+	ret = vq_out_send_sync(connection->parent_instance->vi, &command);
+	if (ret)
+		goto free_response;
+
+	if (response->hdr.type != VIRTIO_MAGMA_RESP_RELEASE_CONNECTION)
+		ret = -EIO;
+
+free_response:
+	kmem_cache_free(connection->parent_instance->msg_cache, response);
+
+free_request:
+	kmem_cache_free(connection->parent_instance->msg_cache, request);
+
+	return ret;
+}
+
+static int destroy_connection(struct virtmagma_connection *connection)
+{
+	int bkt;
+	struct virtmagma_connection_object *object;
+	hash_for_each (connection->objects, bkt, object, node) {
+		hash_del(&object->node);
+		kfree(object);
+	}
+	hash_del(&connection->node);
+	kfree(connection);
+	return 0;
+}
+
+static int destroy_instance(int id, void *p, void *data)
+{
+	struct virtmagma_instance *instance = p;
+	struct virtmagma_connection *connection;
+	int bkt;
+	uint64_t leaked_connections = 0;
+
+	instance = p;
+
+	hash_for_each (instance->connections, bkt, connection, node) {
+		++leaked_connections;
+	}
+	if (leaked_connections) {
+		pr_info("virtmagma: command %s exited with %lld leaked connections",
+			instance->creator.comm, leaked_connections);
+	}
+	hash_for_each (instance->connections, bkt, connection, node) {
+		release_connection(connection);
+		destroy_connection(connection);
+	}
+
+	kmem_cache_destroy(instance->msg_cache);
+
+	kfree(instance);
+	return 0;
+}
+
+static int virtmagma_mmfd_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct virtmagma_instance *instance = filp->private_data;
+	unsigned long vm_size = vma->vm_end - vma->vm_start;
+	size_t max_map_size;
+	int ret;
+
+	if (!instance)
+		return -ENODEV;
+
+	if (!instance->mmap_params.pending) {
+		pr_warn("virtmagma: user called mmap on the mmfd without first submitting a magma_map ioctl");
+		return -EINVAL;
+	}
+
+	instance->mmap_params.pending = false;
+
+	if (instance->mmap_params.buffer->is_command_buffer)
+		max_map_size = instance->mmap_params.buffer->size_requested;
+	else
+		max_map_size = instance->mmap_params.buffer->size_allocated;
+	max_map_size = PAGE_ALIGN(max_map_size);
+
+	if (vm_size > max_map_size) {
+		pr_warn("virtmagma: user tried to mmap with a size (%ld) larger than the buffer's size (%ld)",
+			vm_size, max_map_size);
+		return -EINVAL;
+	}
+
+	ret = io_remap_pfn_range(vma, vma->vm_start,
+				 instance->mmap_params.phys_addr / PAGE_SIZE,
+				 vm_size, vma->vm_page_prot);
+
+	if (ret)
+		return ret;
+
+	vma->vm_flags |= VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+
+	return ret;
+}
+
+static const struct file_operations virtmagma_mmfd_fops = {
+	.mmap = virtmagma_mmfd_mmap,
+};
+
+static int create_instance(struct virtmagma_info *vi,
+			   struct virtmagma_instance **instance_out)
+{
+	int ret;
+	struct file *filp;
+	struct virtmagma_instance *instance;
+
+	*instance_out = NULL;
+
+	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+	if (!instance)
+		return -ENOMEM;
+	instance->vi = vi;
+
+	mutex_lock(&vi->instances_lock);
+	ret = idr_alloc(&vi->instances, instance, 1, -1, GFP_KERNEL);
+	mutex_unlock(&vi->instances_lock);
+	if (ret <= 0) {
+		ret = -ENOMEM;
+		goto free_instance;
+	}
+	instance->id = ret;
+
+	ret = anon_inode_getfd("[virtmagma_mmfd]", &virtmagma_mmfd_fops,
+			       instance, O_RDWR);
+	if (ret < 0) {
+		goto free_instance;
+	}
+	instance->mmfd = ret;
+
+	hash_init(instance->connections);
+	instance->creator.pid = current->pid;
+	instance->creator.tgid = current->tgid;
+	memcpy(instance->creator.comm, current->comm,
+	       sizeof(instance->creator.comm));
+	instance->creator.comm[sizeof(instance->creator.comm) - 1] = 0;
+
+	filp = filp_open(WAYLAND_DEVICE_PATH, O_RDWR, 0);
+	if (filp) {
+		instance->wayland_device_private_data = filp->private_data;
+	} else {
+		pr_warn("virtmagma: failed to open wayland device at %s\n",
+			WAYLAND_DEVICE_PATH);
+		pr_cont("virtmagma: magma_export will not be available\n");
+	}
+	filp_close(filp, 0);
+
+	instance->msg_cache =
+		kmem_cache_create("virtmagma_cache", MESSAGE_CACHE_OBJECT_SIZE,
+				  MESSAGE_CACHE_OBJECT_SIZE, 0,
+				  virtmagma_cache_ctor);
+	if (!instance->msg_cache) {
+		pr_err("virtmagma: failed to create message cache");
+		return -ENOMEM;
+	}
+
+	*instance_out = instance;
+
+	return 0;
+
+free_instance:
+	kfree(instance);
+
+	return ret;
+}
+
+static int virtmagma_command_magma_create_connection(
+	struct virtmagma_instance *instance,
+	struct virtmagma_virtio_command *command)
+{
+	int ret;
+	struct virtmagma_connection *connection;
+	struct virtio_magma_create_connection_ctrl *request =
+		command->request_ptr;
+	struct virtio_magma_create_connection_resp *response =
+		command->response_ptr;
+
+	if (!COMMAND_OK(command, request, response))
+		return -EINVAL;
+
+	ret = vq_out_send_sync(instance->vi, command);
+	if (ret)
+		return ret;
+
+	ret = virtmagma_check_expected_response_type(request, response);
+	if (ret)
+		return ret;
+
+	/* pass on magma errors without creating a connection object */
+	if (response->result_return) {
+		pr_warn("virtmagma: magma_create_connection returned %d",
+			(int32_t)response->result_return);
+		return 0; /* the ioctl is still successful */
+	}
+
+	connection = kzalloc(sizeof(*connection), GFP_KERNEL);
+	if (!connection)
+		return -ENOMEM;
+
+	connection->parent_instance = instance;
+	connection->host_value = response->connection_out;
+	hash_init(connection->objects);
+
+	hash_add(instance->connections, &connection->node,
+		 connection->host_value);
+
+	return 0;
+}
+
+static int virtmagma_command_magma_release_connection(
+	struct virtmagma_instance *instance,
+	struct virtmagma_virtio_command *command)
+{
+	int ret;
+	struct virtmagma_connection *connection = NULL;
+	struct virtio_magma_release_connection_ctrl *request =
+		command->request_ptr;
+	struct virtio_magma_release_connection_resp *response =
+		command->response_ptr;
+
+	if (!COMMAND_OK(command, request, response))
+		return -EINVAL;
+
+	connection = get_connection(instance, request->connection);
+	if (!connection)
+		return -EINVAL;
+
+	ret = release_connection(connection);
+	if (ret)
+		return ret;
+
+	return destroy_connection(connection);
+}
+
+static int virtmagma_command_magma_create_context(
+	struct virtmagma_instance *instance,
+	struct virtmagma_virtio_command *command)
+{
+	int ret;
+	struct virtmagma_connection *connection;
+	struct virtmagma_connection_object *object;
+	struct virtio_magma_create_context_ctrl *request = command->request_ptr;
+	struct virtio_magma_create_context_resp *response =
+		command->response_ptr;
+
+	if (!COMMAND_OK(command, request, response))
+		return -EINVAL;
+
+	connection = get_connection(instance, request->connection);
+	if (!connection)
+		return -EINVAL;
+
+	ret = vq_out_send_sync(instance->vi, command);
+	if (ret)
+		return ret;
+
+	ret = virtmagma_check_expected_response_type(request, response);
+	if (ret)
+		return ret;
+
+	/* magma_create_context does not return errors */
+
+	object = kzalloc(sizeof(*object), GFP_KERNEL);
+	if (!object)
+		return -ENOMEM;
+
+	object->parent_connection = connection;
+	object->host_value = response->context_id_out;
+	object->type = MAGMA_CONTEXT;
+
+	hash_add(connection->objects, &object->node, object->host_value);
+
+	return 0;
+}
+
+static int virtmagma_command_magma_release_context(
+	struct virtmagma_instance *instance,
+	struct virtmagma_virtio_command *command)
+{
+	int ret;
+	struct virtmagma_connection *connection = NULL;
+	struct virtmagma_connection_object *object = NULL;
+	struct virtio_magma_release_context_ctrl *request =
+		command->request_ptr;
+	struct virtio_magma_release_context_resp *response =
+		command->response_ptr;
+
+	if (!COMMAND_OK(command, request, response))
+		return -EINVAL;
+
+	connection = get_connection(instance, request->connection);
+	if (!connection)
+		return -EINVAL;
+
+	object = get_connection_object(connection, request->context_id,
+				       MAGMA_CONTEXT);
+	if (!object)
+		return -EINVAL;
+
+	ret = vq_out_send_sync(instance->vi, command);
+	if (ret)
+		return ret;
+
+	ret = virtmagma_check_expected_response_type(request, response);
+	if (ret)
+		return ret;
+
+	hash_del(&object->node);
+	kfree(object);
+
+	return 0;
+}
+
+static int virtmagma_command_magma_create_buffer(
+	struct virtmagma_instance *instance,
+	struct virtmagma_virtio_command *command)
+{
+	int ret;
+	struct virtmagma_connection *connection;
+	struct virtmagma_connection_object *object;
+	struct virtio_magma_create_buffer_ctrl *request = command->request_ptr;
+	struct virtio_magma_create_buffer_resp *response =
+		command->response_ptr;
+
+	if (!COMMAND_OK(command, request, response))
+		return -EINVAL;
+
+	connection = get_connection(instance, request->connection);
+	if (!connection)
+		return -EINVAL;
+
+	ret = vq_out_send_sync(instance->vi, command);
+	if (ret)
+		return ret;
+
+	ret = virtmagma_check_expected_response_type(request, response);
+	if (ret)
+		return ret;
+
+	/* pass on magma errors without creating a buffer object */
+	if (response->result_return) {
+		pr_warn("virtmagma: magma_create_buffer returned %d",
+			(int32_t)response->result_return);
+		return 0; /* the ioctl is still successful */
+	}
+
+	object = kzalloc(sizeof(*object), GFP_KERNEL);
+	if (!object)
+		return -ENOMEM;
+
+	object->parent_connection = connection;
+	object->host_value = response->buffer_out;
+	object->type = MAGMA_BUFFER;
+	object->buffer.size_requested = request->size;
+	object->buffer.size_allocated = response->size_out;
+	object->buffer.is_command_buffer = false;
+
+	hash_add(connection->objects, &object->node, object->host_value);
+
+	return 0;
+}
+
+static int virtmagma_command_magma_release_buffer(
+	struct virtmagma_instance *instance,
+	struct virtmagma_virtio_command *command)
+{
+	int ret;
+	struct virtmagma_connection *connection = NULL;
+	struct virtmagma_connection_object *object = NULL;
+	struct virtio_magma_release_buffer_ctrl *request = command->request_ptr;
+	struct virtio_magma_release_buffer_resp *response =
+		command->response_ptr;
+
+	if (!COMMAND_OK(command, request, response))
+		return -EINVAL;
+
+	connection = get_connection(instance, request->connection);
+	if (!connection)
+		return -EINVAL;
+
+	object = get_connection_object(connection, request->buffer,
+				       MAGMA_BUFFER);
+	if (!object)
+		return -EINVAL;
+
+	ret = vq_out_send_sync(instance->vi, command);
+	if (ret)
+		return ret;
+
+	ret = virtmagma_check_expected_response_type(request, response);
+	if (ret)
+		return ret;
+
+	hash_del(&object->node);
+	kfree(object);
+
+	return 0;
+}
+
+static int virtmagma_command_magma_create_command_buffer(
+	struct virtmagma_instance *instance,
+	struct virtmagma_virtio_command *command)
+{
+	int ret;
+	struct virtmagma_connection *connection;
+	struct virtmagma_connection_object *object;
+	struct virtio_magma_create_command_buffer_ctrl *request =
+		command->request_ptr;
+	struct virtio_magma_create_command_buffer_resp *response =
+		command->response_ptr;
+
+	if (!COMMAND_OK(command, request, response))
+		return -EINVAL;
+
+	connection = get_connection(instance, request->connection);
+	if (!connection)
+		return -EINVAL;
+
+	ret = vq_out_send_sync(instance->vi, command);
+	if (ret)
+		return ret;
+
+	ret = virtmagma_check_expected_response_type(request, response);
+	if (ret)
+		return ret;
+
+	/* pass on magma errors without creating a command_buffer object */
+	if (response->result_return) {
+		pr_warn("virtmagma: magma_create_command_buffer returned %d",
+			(int32_t)response->result_return);
+		return 0; /* the ioctl is still successful */
+	}
+
+	object = kzalloc(sizeof(*object), GFP_KERNEL);
+	if (!object)
+		return -ENOMEM;
+
+	object->parent_connection = connection;
+	object->host_value = response->buffer_out;
+	object->type = MAGMA_BUFFER;
+	object->buffer.size_requested = request->size;
+	object->buffer.is_command_buffer = true;
+
+	hash_add(connection->objects, &object->node, object->host_value);
+
+	return 0;
+}
+
+static int virtmagma_command_magma_release_command_buffer(
+	struct virtmagma_instance *instance,
+	struct virtmagma_virtio_command *command)
+{
+	int ret;
+	struct virtmagma_connection *connection = NULL;
+	struct virtmagma_connection_object *object = NULL;
+	struct virtio_magma_release_command_buffer_ctrl *request =
+		command->request_ptr;
+	struct virtio_magma_release_command_buffer_resp *response =
+		command->response_ptr;
+
+	if (!COMMAND_OK(command, request, response))
+		return -EINVAL;
+
+	connection = get_connection(instance, request->connection);
+	if (!connection)
+		return -EINVAL;
+
+	object = get_connection_object(connection, request->command_buffer,
+				       MAGMA_BUFFER);
+	if (!object)
+		return -EINVAL;
+
+	ret = vq_out_send_sync(instance->vi, command);
+	if (ret)
+		return ret;
+
+	ret = virtmagma_check_expected_response_type(request, response);
+	if (ret)
+		return ret;
+
+	hash_del(&object->node);
+	kfree(object);
+
+	return 0;
+}
+
+static int virtmagma_command_magma_submit_command_buffer(
+	struct virtmagma_instance *instance,
+	struct virtmagma_virtio_command *command)
+{
+	int ret;
+	struct virtmagma_connection *connection = NULL;
+	struct virtmagma_connection_object *object = NULL;
+	struct virtio_magma_submit_command_buffer_ctrl *request =
+		command->request_ptr;
+	struct virtio_magma_submit_command_buffer_resp *response =
+		command->response_ptr;
+
+	if (!COMMAND_OK(command, request, response))
+		return -EINVAL;
+
+	connection = get_connection(instance, request->connection);
+	if (!connection)
+		return -EINVAL;
+
+	object = get_connection_object(connection, request->command_buffer,
+				       MAGMA_BUFFER);
+	if (!object)
+		return -EINVAL;
+
+	ret = vq_out_send_sync(instance->vi, command);
+	if (ret)
+		return ret;
+
+	ret = virtmagma_check_expected_response_type(request, response);
+	if (ret)
+		return ret;
+
+	/* command buffers are implicitly freed on submit */
+
+	hash_del(&object->node);
+	kfree(object);
+
+	return 0;
+}
+
+static int virtmagma_command_magma_create_semaphore(
+	struct virtmagma_instance *instance,
+	struct virtmagma_virtio_command *command)
+{
+	int ret;
+	struct virtmagma_connection *connection;
+	struct virtmagma_connection_object *object;
+	struct virtio_magma_create_semaphore_ctrl *request =
+		command->request_ptr;
+	struct virtio_magma_create_semaphore_resp *response =
+		command->response_ptr;
+
+	if (!COMMAND_OK(command, request, response))
+		return -EINVAL;
+
+	connection = get_connection(instance, request->connection);
+	if (!connection)
+		return -EINVAL;
+
+	ret = vq_out_send_sync(instance->vi, command);
+	if (ret)
+		return ret;
+
+	ret = virtmagma_check_expected_response_type(request, response);
+	if (ret)
+		return ret;
+
+	/* pass on magma errors without creating a semaphore object */
+	if (response->result_return) {
+		pr_warn("virtmagma: magma_create_semaphore returned %d",
+			(int32_t)response->result_return);
+		return 0; /* the ioctl is still successful */
+	}
+
+	object = kzalloc(sizeof(*object), GFP_KERNEL);
+	if (!object)
+		return -ENOMEM;
+
+	object->parent_connection = connection;
+	object->host_value = response->semaphore_out;
+	object->type = MAGMA_SEMAPHORE;
+
+	hash_add(connection->objects, &object->node, object->host_value);
+
+	return 0;
+}
+
+static int virtmagma_command_magma_release_semaphore(
+	struct virtmagma_instance *instance,
+	struct virtmagma_virtio_command *command)
+{
+	int ret;
+	struct virtmagma_connection *connection = NULL;
+	struct virtmagma_connection_object *object = NULL;
+	struct virtio_magma_release_semaphore_ctrl *request =
+		command->request_ptr;
+	struct virtio_magma_release_semaphore_resp *response =
+		command->response_ptr;
+
+	if (!COMMAND_OK(command, request, response))
+		return -EINVAL;
+
+	connection = get_connection(instance, request->connection);
+	if (!connection)
+		return -EINVAL;
+
+	object = get_connection_object(connection, request->semaphore,
+				       MAGMA_SEMAPHORE);
+	if (!object)
+		return -EINVAL;
+
+	ret = vq_out_send_sync(instance->vi, command);
+	if (ret)
+		return ret;
+
+	ret = virtmagma_check_expected_response_type(request, response);
+	if (ret)
+		return ret;
+
+	hash_del(&object->node);
+	kfree(object);
+
+	return 0;
+}
+
+static int virtmagma_command_magma_map(struct virtmagma_instance *instance,
+				       struct virtmagma_virtio_command *command)
+{
+	int ret;
+	struct virtmagma_connection *connection;
+	struct virtmagma_connection_object *object;
+	struct virtio_magma_map_ctrl *request = command->request_ptr;
+	/* this ioctl has a size_t output parameter appended */
+	struct {
+		struct virtio_magma_map_resp virtio_response;
+		size_t size_to_mmap_out;
+	} *response = command->response_ptr;
+
+	if (!COMMAND_OK(command, request, response))
+		return -EINVAL;
+
+	connection = get_connection(instance, request->connection);
+	if (!connection)
+		return -EINVAL;
+
+	object = get_connection_object(connection, request->buffer,
+				       MAGMA_BUFFER);
+	if (!object)
+		return -EINVAL;
+
+	ret = vq_out_send_sync(instance->vi, command);
+	if (ret)
+		return ret;
+
+	ret = virtmagma_check_expected_response_type(request, response);
+	if (ret)
+		return ret;
+
+	/* store parameters for subsequent mmap call */
+
+	instance->mmap_params.buffer = &object->buffer;
+	instance->mmap_params.phys_addr = response->virtio_response.addr_out;
+	instance->mmap_params.pending = true;
+
+	/* user must use the returned size in its subsequent mmap call */
+
+	response->size_to_mmap_out = object->buffer.size_requested;
+
+	return 0;
+}
+
+static int virtmagma_command_magma_wait_semaphores(
+	struct virtmagma_instance *instance,
+	struct virtmagma_virtio_command *command)
+{
+	int ret;
+	size_t semaphores_size;
+	struct virtio_magma_wait_semaphores_ctrl *request =
+		command->request_ptr;
+	struct virtio_magma_wait_semaphores_resp *response =
+		command->response_ptr;
+
+	if (!COMMAND_OK(command, request, response))
+		return -EINVAL;
+
+	/* reallocate request buffer with enough space for the semaphores */
+	semaphores_size = request->count * sizeof(uint64_t);
+	command->request_size = sizeof(*request) + semaphores_size;
+	command->request_ptr = kzalloc(command->request_size, GFP_KERNEL);
+	if (!command->request_ptr)
+		return -ENOMEM;
+
+	memcpy(command->request_ptr, request, sizeof(*request));
+	ret = copy_from_user((char *)command->request_ptr + sizeof(*request),
+			     (void *)request->semaphores, semaphores_size);
+	if (ret)
+		return ret;
+
+	ret = vq_out_send_sync(instance->vi, command);
+	if (ret)
+		return ret;
+
+	return virtmagma_check_expected_response_type(request, response);
+}
+
+static int virtmagma_command_magma_read_notification_channel(
+	struct virtmagma_instance *instance,
+	struct virtmagma_virtio_command *command)
+{
+	int ret;
+	struct virtio_magma_read_notification_channel_ctrl *request =
+		command->request_ptr;
+	struct virtio_magma_read_notification_channel_resp *response =
+		command->response_ptr;
+
+	if (!COMMAND_OK(command, request, response))
+		return -EINVAL;
+
+	/* reallocate response buffer with additional space for notification data.
+	   note that the size is not modified, as we only want the response struct
+	   itself to be copied back to the user by our caller */
+
+	command->response_ptr = response =
+		kzalloc(sizeof(*response) + NOTIFICATION_MAX_BYTES, GFP_KERNEL);
+	if (!command->response_ptr)
+		return -ENOMEM;
+
+	ret = vq_out_send_sync(instance->vi, command);
+	if (ret)
+		return ret;
+
+	ret = virtmagma_check_expected_response_type(request, response);
+	if (ret)
+		return ret;
+
+	/* pass on magma errors without writing to the buffer */
+	if (response->result_return) {
+		pr_warn("virtmagma: magma_read_notification_channel returned %d",
+			(int32_t)response->result_return);
+		return 0; /* the ioctl is still successful */
+	}
+
+	if (response->buffer_size_out > request->buffer_size) {
+		pr_err("virtmagma: magma_read_notification_channel returned buffer_size_out (%lld) larger than buffer_size (%lld)",
+		       response->buffer_size_out, request->buffer_size);
+		return -EIO;
+	}
+
+	return copy_to_user((void *)request->buffer,
+			    (char *)command->response_ptr + sizeof(*response),
+			    response->buffer_size_out);
+}
+
+#if IS_ENABLED(CONFIG_VIRTIO_WL)
+/* use the implementation in the virtio_wl module */
+extern int virtwl_create_fd_for_vfd(void *filp_private_data, uint32_t vfd_id);
+#else
+#define virtwl_create_fd_for_vfd(a, b) (-ENODEV)
+#endif
+
+static int
+	virtmagma_command_magma_export(struct virtmagma_instance *instance,
+				       struct virtmagma_virtio_command *command)
+{
+	int ret;
+	struct virtio_magma_export_ctrl *request = command->request_ptr;
+	struct virtio_magma_export_resp *response = command->response_ptr;
+
+	if (!instance->wayland_device_private_data)
+		return -ENODEV;
+
+	if (!COMMAND_OK(command, request, response))
+		return -EINVAL;
+
+	ret = vq_out_send_sync(instance->vi, command);
+	if (ret)
+		return ret;
+
+	ret = virtmagma_check_expected_response_type(request, response);
+	if (ret)
+		return ret;
+
+	/* pass on magma errors without creating a vfd */
+	if (response->result_return) {
+		pr_warn("virtmagma: magma_export returned %d",
+			(int32_t)response->result_return);
+		return 0; /* the ioctl is still successful */
+	}
+
+	ret = virtwl_create_fd_for_vfd(instance->wayland_device_private_data,
+				       response->buffer_handle_out);
+	if (ret < 0) {
+		pr_err("virtmagma: failed to get vfd creation info for vfd id %lld",
+		       response->buffer_handle_out);
 		return ret;
 	}
 
-	return 0;
-}
-
-static int vq_queue_out(struct virtmagma_info_private *vip,
-			struct scatterlist *out_sg,
-			struct scatterlist *in_sg,
-			struct completion *finish_completion,
-			bool nonblock)
-{
-	struct virtqueue *vq = vip->vqs[VIRTMAGMA_VQ_OUT];
-	struct mutex *vq_lock = &vip->vq_locks[VIRTMAGMA_VQ_OUT];
-	struct scatterlist *sgs[] = { out_sg, in_sg };
-	int ret = 0;
-
-	mutex_lock(vq_lock);
-	while ((ret = virtqueue_add_sgs(vq, sgs, 1, 1, finish_completion,
-					GFP_KERNEL)) == -ENOSPC) {
-		mutex_unlock(vq_lock);
-		if (nonblock)
-			return -EAGAIN;
-		if (!wait_event_timeout(vip->out_waitq, vq->num_free > 0, HZ))
-			return -EBUSY;
-		mutex_lock(vq_lock);
-	}
-	if (!ret)
-		virtqueue_kick(vq);
-	mutex_unlock(vq_lock);
-
-	if (!nonblock)
-		wait_for_completion(finish_completion);
-
-	return ret;
-}
-
-static int vq_fill_locked(struct virtqueue *vq)
-{
-	void *buffer;
-	int ret = 0;
-
-	while (vq->num_free > 0) {
-		buffer = kmalloc(VQ_DESCRIPTOR_SIZE, GFP_KERNEL);
-		if (!buffer) {
-			ret = -ENOMEM;
-			goto clear_queue;
-		}
-
-		ret = vq_return_inbuf_locked(vq, buffer);
-		if (ret)
-			goto clear_queue;
-	}
+	response->buffer_handle_out = ret;
 
 	return 0;
-
-clear_queue:
-	while ((buffer = virtqueue_detach_unused_buf(vq)))
-		kfree(buffer);
-	return ret;
 }
 
-static int virtmagma_ioctl_handshake(void __user *ptr)
+static int virtmagma_ioctl_handshake(struct file *filp, void __user *ptr)
 {
 	struct virtmagma_ioctl_args_handshake ioctl_args;
-	int ret;
-	ret = copy_from_user(&ioctl_args, ptr, sizeof(ioctl_args));
+	int ret = copy_from_user(&ioctl_args, ptr, sizeof(ioctl_args));
 	if (ret)
 		return ret;
 	if (ioctl_args.handshake_inout != VIRTMAGMA_HANDSHAKE_SEND)
@@ -106,406 +1300,400 @@
 	return copy_to_user(ptr, &ioctl_args, sizeof(ioctl_args));
 }
 
-static void* map_driver(unsigned long long pfn_base, size_t size) {
-	size_t num_pages;
-	struct page **pages;
-	size_t page_index;
-	void *ret;
-
-	num_pages = PAGE_COUNT(size);
-
-	pages = kzalloc(sizeof(pages[0]) * num_pages, GFP_KERNEL);
-	if (!pages)
-		return NULL;
-
-	for (page_index = 0; page_index < num_pages; ++page_index)
-		pages[page_index] = pfn_to_page(pfn_base + page_index);
-
-	/* TODO(MA-520): there should be a faster way to do this if pages are all contiguous in physmem */
-	ret = vm_map_ram(pages, num_pages, 0, PAGE_KERNEL);
-	kfree(pages);
-	return ret;
-}
-
-static void unmap_driver(void *addr, size_t size) {
-	vm_unmap_ram(addr, PAGE_COUNT(size));
-}
-
-static int write_driver_file(const char* path, const void* data, size_t size) {
-	struct file *driver_file;
-	ssize_t bytes_written;
-	loff_t offset;
-	int ret;
-
-	driver_file = filp_open(path, O_CLOEXEC | O_CREAT | O_RDWR, 0555);
-	if (!driver_file)
-		return -EFAULT;
-
-	offset = 0;
-	bytes_written = kernel_write(driver_file, data, size, &offset);
-	if (bytes_written != size) {
-		ret = -EFAULT;
-		goto close_file;
-	}
-
-	ret = 0;
-
-close_file:
-	filp_close(driver_file, NULL);
-
-	return ret;
-}
-
-#define DRIVER_PATH "/libvulkan_magma.so"
-
-static int virtmagma_ioctl_get_driver(struct virtmagma_info* vi, void __user *ptr)
+static int virtmagma_ioctl_get_mmfd(struct file *filp, void __user *ptr)
 {
-	struct virtmagma_ioctl_args_get_driver ioctl_args;
-	struct virtio_magma_get_driver *virtio_ctrl;
-	struct virtio_magma_get_driver_resp *virtio_resp;
-	struct completion finish_completion;
-	struct scatterlist out_sg;
-	struct scatterlist in_sg;
-	unsigned long long driver_pfn_base;
-	size_t driver_size;
-	void *driver_data;
-	int ret;
-
-	ret = copy_from_user(&ioctl_args, ptr, sizeof(ioctl_args));
-	if (ret)
-		return -EFAULT;
-
-	virtio_ctrl = kzalloc(sizeof(*virtio_ctrl), GFP_KERNEL);
-	if (!virtio_ctrl)
-		return -ENOMEM;
-	virtio_resp = kzalloc(sizeof(*virtio_resp), GFP_KERNEL);
-	if (!virtio_resp) {
-		ret = -ENOMEM;
-		goto free_virtio_ctrl;
-	}
-
-	virtio_ctrl->hdr.type = VIRTIO_MAGMA_CMD_GET_DRIVER;
-	virtio_ctrl->page_size = cpu_to_le32(PAGE_SIZE);
-
-	init_completion(&finish_completion);
-
-	sg_init_one(&out_sg, virtio_ctrl, sizeof(*virtio_ctrl));
-	sg_init_one(&in_sg, virtio_resp, sizeof(*virtio_resp));
-
-	ret = vq_queue_out(vi->private, &out_sg, &in_sg, &finish_completion, false /* block */);
-	if (ret)
-		goto free_virtio_resp;
-
-	if (virtio_resp->hdr.type != VIRTIO_MAGMA_RESP_GET_DRIVER) {
-		ret = -EFAULT;
-		goto free_virtio_resp;
-	}
-
-	driver_pfn_base = le64_to_cpu(virtio_resp->pfn);
-	driver_size = le64_to_cpu(virtio_resp->size);
-	driver_data = map_driver(driver_pfn_base, driver_size);
-	if (!driver_data) {
-		ret = -EFAULT;
-		goto free_virtio_resp;
-	}
-
-	ret = write_driver_file(DRIVER_PATH, driver_data, driver_size);
-	if (ret)
-		goto free_driver_alloc;
-
-	ioctl_args.unused = 0;
-	ret = copy_to_user(ptr, &ioctl_args, sizeof(ioctl_args));
-
-free_driver_alloc:
-	unmap_driver(driver_data, driver_size);
-
-free_virtio_resp:
-	kfree(virtio_resp);
-
-free_virtio_ctrl:
-	kfree(virtio_ctrl);
-
-	return ret;
-}
-
-static int virtmagma_ioctl_query(struct virtmagma_info* vi, void __user *ptr)
-{
-	struct virtmagma_ioctl_args_query ioctl_args;
-	struct virtio_magma_query *virtio_ctrl;
-	struct virtio_magma_query_resp *virtio_resp;
-	struct completion finish_completion;
-	struct scatterlist out_sg;
-	struct scatterlist in_sg;
-	int ret;
-
-	ret = copy_from_user(&ioctl_args, ptr, sizeof(ioctl_args));
-	if (ret)
-		return -EFAULT;
-
-	virtio_ctrl = kzalloc(sizeof(*virtio_ctrl), GFP_KERNEL);
-	if (!virtio_ctrl)
-		return -ENOMEM;
-	virtio_resp = kzalloc(sizeof(*virtio_resp), GFP_KERNEL);
-	if (!virtio_resp) {
-		ret = -ENOMEM;
-		goto free_virtio_ctrl;
-	}
-
-	virtio_ctrl->hdr.type = VIRTIO_MAGMA_CMD_QUERY;
-	virtio_ctrl->field_id = cpu_to_le64(ioctl_args.id);
-
-	init_completion(&finish_completion);
-
-	sg_init_one(&out_sg, virtio_ctrl, sizeof(*virtio_ctrl));
-	sg_init_one(&in_sg, virtio_resp, sizeof(*virtio_resp));
-
-	ret = vq_queue_out(vi->private, &out_sg, &in_sg, &finish_completion, false /* block */);
-	if (ret)
-		goto free_virtio_resp;
-
-	if (virtio_resp->hdr.type != VIRTIO_MAGMA_RESP_QUERY) {
-		ret = -EFAULT;
-		goto free_virtio_resp;
-	}
-	ioctl_args.value_out = le64_to_cpu(virtio_resp->field_value_out);
-	ioctl_args.status_return = le32_to_cpu(virtio_resp->status_return);
-	ret = copy_to_user(ptr, &ioctl_args, sizeof(ioctl_args));
-
-free_virtio_resp:
-	kfree(virtio_resp);
-
-free_virtio_ctrl:
-	kfree(virtio_ctrl);
-
-	return ret;
-}
-
-static int virtmagma_ioctl_create_connection(struct virtmagma_info* vi, void __user *ptr)
-{
-	struct virtmagma_ioctl_args_create_connection ioctl_args;
-	struct virtio_magma_create_connection *virtio_ctrl;
-	struct virtio_magma_create_connection_resp *virtio_resp;
-	struct completion finish_completion;
-	struct scatterlist out_sg;
-	struct scatterlist in_sg;
-	int ret;
-
-	ret = copy_from_user(&ioctl_args, ptr, sizeof(ioctl_args));
-	if (ret)
-		return -EFAULT;
-
-	virtio_ctrl = kzalloc(sizeof(*virtio_ctrl), GFP_KERNEL);
-	if (!virtio_ctrl)
-		return -ENOMEM;
-	virtio_resp = kzalloc(sizeof(*virtio_resp), GFP_KERNEL);
-	if (!virtio_resp) {
-		ret = -ENOMEM;
-		goto free_virtio_ctrl;
-	}
-
-	virtio_ctrl->hdr.type = VIRTIO_MAGMA_CMD_CREATE_CONNECTION;
-
-	init_completion(&finish_completion);
-
-	sg_init_one(&out_sg, virtio_ctrl, sizeof(*virtio_ctrl));
-	sg_init_one(&in_sg, virtio_resp, sizeof(*virtio_resp));
-
-	ret = vq_queue_out(vi->private, &out_sg, &in_sg, &finish_completion, false /* block */);
-	if (ret)
-		goto free_virtio_resp;
-
-	if (virtio_resp->hdr.type != VIRTIO_MAGMA_RESP_CREATE_CONNECTION) {
-		ret = -EFAULT;
-		goto free_virtio_resp;
-	}
-
-	ioctl_args.connection_return = virtio_resp->connection_return;
-	ret = copy_to_user(ptr, &ioctl_args, sizeof(ioctl_args));
-
-free_virtio_resp:
-	kfree(virtio_resp);
-
-free_virtio_ctrl:
-	kfree(virtio_ctrl);
-
-	return ret;
-}
-
-static int virtmagma_ioctl_release_connection(struct virtmagma_info* vi, void __user *ptr)
-{
-	struct virtmagma_ioctl_args_release_connection ioctl_args;
-	struct virtio_magma_release_connection *virtio_ctrl;
-	struct virtio_magma_release_connection_resp *virtio_resp;
-	struct completion finish_completion;
-	struct scatterlist out_sg;
-	struct scatterlist in_sg;
-	int ret;
-
-	ret = copy_from_user(&ioctl_args, ptr, sizeof(ioctl_args));
-	if (ret)
-		return -EFAULT;
-
-	virtio_ctrl = kzalloc(sizeof(*virtio_ctrl), GFP_KERNEL);
-	if (!virtio_ctrl)
-		return -ENOMEM;
-	virtio_resp = kzalloc(sizeof(*virtio_resp), GFP_KERNEL);
-	if (!virtio_resp) {
-		ret = -ENOMEM;
-		goto free_virtio_ctrl;
-	}
-
-	virtio_ctrl->hdr.type = VIRTIO_MAGMA_CMD_RELEASE_CONNECTION;
-	virtio_ctrl->connection = ioctl_args.connection;
-
-	init_completion(&finish_completion);
-
-	sg_init_one(&out_sg, virtio_ctrl, sizeof(*virtio_ctrl));
-	sg_init_one(&in_sg, virtio_resp, sizeof(*virtio_resp));
-
-	ret = vq_queue_out(vi->private, &out_sg, &in_sg, &finish_completion, false /* block */);
-	if (ret)
-		goto free_virtio_resp;
-
-	if (virtio_resp->hdr.type != VIRTIO_MAGMA_RESP_RELEASE_CONNECTION)
-		ret = -EFAULT;
-
-free_virtio_resp:
-	kfree(virtio_resp);
-
-free_virtio_ctrl:
-	kfree(virtio_ctrl);
-
-	return ret;
-}
-
-static int virtmagma_probe(struct virtmagma_info* vi, struct virtio_device *vdev)
-{
-	return 0;
-}
-
-static void virtmagma_remove(struct virtmagma_info* vi, struct virtio_device *vdev)
-{
-}
-
-static long virtmagma_ioctl(struct virtmagma_info* vi, unsigned int cmd, void __user *ptr)
-{
-	/* if vi->private is NULL, magma has not been initialized */
-	if (!vi->private)
+	struct virtmagma_ioctl_args_get_mmfd ioctl_args;
+	struct virtmagma_instance *instance = filp->private_data;
+	if (!instance)
 		return -ENODEV;
-	switch (cmd) {
-	case VIRTMAGMA_IOCTL_HANDSHAKE:
-		return virtmagma_ioctl_handshake(ptr);
-	case VIRTMAGMA_IOCTL_GET_DRIVER:
-		return virtmagma_ioctl_get_driver(vi, ptr);
-	case VIRTMAGMA_IOCTL_QUERY:
-		return virtmagma_ioctl_query(vi, ptr);
-	case VIRTMAGMA_IOCTL_CREATE_CONNECTION:
-		return virtmagma_ioctl_create_connection(vi, ptr);
-	case VIRTMAGMA_IOCTL_RELEASE_CONNECTION:
-		return virtmagma_ioctl_release_connection(vi, ptr);
-	default:
+	ioctl_args.fd_out = instance->mmfd;
+	return copy_to_user(ptr, &ioctl_args, sizeof(ioctl_args));
+}
+
+int virtmagma_ioctl_magma_command(struct file *filp, void __user *ptr)
+{
+	struct virtmagma_ioctl_args_magma_command ioctl_args;
+	struct virtmagma_virtio_command command;
+	void *request;
+	void *response;
+	int request_type;
+	int ret;
+	struct virtmagma_instance *instance = filp->private_data;
+	command.request_ptr = NULL;
+	command.response_ptr = NULL;
+
+	if (!instance)
+		return -ENODEV;
+
+	if (instance->mmap_params.pending) {
+		pr_warn("virtmagma: user failed to mmap on the mmfd after submitting a magma_map ioctl");
 		return -EINVAL;
 	}
+
+	/* copy in command arguments */
+
+	ret = copy_from_user(&ioctl_args, ptr, sizeof(ioctl_args));
+	if (ret)
+		return ret;
+
+	/* verify userspace-provided pointers are accessible */
+
+	ret = !access_ok(VERIFY_READ, (void *)ioctl_args.request_address,
+			 ioctl_args.request_size);
+	if (ret)
+		return -EFAULT;
+	ret = !access_ok(VERIFY_WRITE, (void *)ioctl_args.response_address,
+			 ioctl_args.response_size);
+	if (ret)
+		return -EFAULT;
+
+	/* allocate buffers and copy in userspace data */
+
+	if (ioctl_args.request_size <= MESSAGE_CACHE_OBJECT_SIZE)
+		request = kmem_cache_alloc(instance->msg_cache, GFP_KERNEL);
+	else
+		request = kzalloc(ioctl_args.request_size, GFP_KERNEL);
+	if (!request)
+		return -ENOMEM;
+	if (ioctl_args.response_size <= MESSAGE_CACHE_OBJECT_SIZE)
+		response = kmem_cache_alloc(instance->msg_cache, GFP_KERNEL);
+	else
+		response = kzalloc(ioctl_args.response_size, GFP_KERNEL);
+	if (!response) {
+		ret = -ENOMEM;
+		goto free_request;
+	}
+	command.request_ptr = request;
+	command.response_ptr = response;
+	command.request_size = ioctl_args.request_size;
+	command.response_size = ioctl_args.response_size;
+
+	ret = copy_from_user(command.request_ptr,
+			     (void *)ioctl_args.request_address,
+			     ioctl_args.request_size);
+	if (ret)
+		goto free_response;
+
+	request_type = control_type(command.request_ptr);
+	switch (request_type) {
+	case VIRTIO_MAGMA_CMD_CREATE_CONNECTION:
+		ret = virtmagma_command_magma_create_connection(instance,
+								&command);
+		break;
+	case VIRTIO_MAGMA_CMD_RELEASE_CONNECTION:
+		ret = virtmagma_command_magma_release_connection(instance,
+								 &command);
+		break;
+	case VIRTIO_MAGMA_CMD_CREATE_CONTEXT:
+		ret = virtmagma_command_magma_create_context(instance,
+							     &command);
+		break;
+	case VIRTIO_MAGMA_CMD_RELEASE_CONTEXT:
+		ret = virtmagma_command_magma_release_context(instance,
+							      &command);
+		break;
+	case VIRTIO_MAGMA_CMD_CREATE_BUFFER:
+		ret = virtmagma_command_magma_create_buffer(instance, &command);
+		break;
+	case VIRTIO_MAGMA_CMD_RELEASE_BUFFER:
+		ret = virtmagma_command_magma_release_buffer(instance,
+							     &command);
+		break;
+	case VIRTIO_MAGMA_CMD_CREATE_COMMAND_BUFFER:
+		ret = virtmagma_command_magma_create_command_buffer(instance,
+								    &command);
+		break;
+	case VIRTIO_MAGMA_CMD_RELEASE_COMMAND_BUFFER:
+		ret = virtmagma_command_magma_release_command_buffer(instance,
+								     &command);
+		break;
+	case VIRTIO_MAGMA_CMD_SUBMIT_COMMAND_BUFFER:
+		ret = virtmagma_command_magma_submit_command_buffer(instance,
+								    &command);
+		break;
+	case VIRTIO_MAGMA_CMD_CREATE_SEMAPHORE:
+		ret = virtmagma_command_magma_create_semaphore(instance,
+							       &command);
+		break;
+	case VIRTIO_MAGMA_CMD_RELEASE_SEMAPHORE:
+		ret = virtmagma_command_magma_release_semaphore(instance,
+								&command);
+		break;
+	case VIRTIO_MAGMA_CMD_MAP:
+		ret = virtmagma_command_magma_map(instance, &command);
+		break;
+	case VIRTIO_MAGMA_CMD_WAIT_SEMAPHORES:
+		ret = virtmagma_command_magma_wait_semaphores(instance,
+							      &command);
+		break;
+	case VIRTIO_MAGMA_CMD_READ_NOTIFICATION_CHANNEL:
+		ret = virtmagma_command_magma_read_notification_channel(
+			instance, &command);
+		break;
+	case VIRTIO_MAGMA_CMD_EXPORT:
+		ret = virtmagma_command_magma_export(instance, &command);
+		break;
+	/* pass-through handlers */
+	case VIRTIO_MAGMA_CMD_QUERY:
+	case VIRTIO_MAGMA_CMD_GET_ERROR:
+	case VIRTIO_MAGMA_CMD_GET_BUFFER_ID:
+	case VIRTIO_MAGMA_CMD_GET_BUFFER_SIZE:
+	case VIRTIO_MAGMA_CMD_CLEAN_CACHE:
+	case VIRTIO_MAGMA_CMD_SET_CACHE_POLICY:
+	case VIRTIO_MAGMA_CMD_GET_BUFFER_CACHE_POLICY:
+	case VIRTIO_MAGMA_CMD_GET_BUFFER_IS_MAPPABLE:
+	case VIRTIO_MAGMA_CMD_UNMAP:
+	case VIRTIO_MAGMA_CMD_MAP_BUFFER_GPU:
+	case VIRTIO_MAGMA_CMD_UNMAP_BUFFER_GPU:
+	case VIRTIO_MAGMA_CMD_COMMIT_BUFFER:
+	case VIRTIO_MAGMA_CMD_GET_SEMAPHORE_ID:
+	case VIRTIO_MAGMA_CMD_SIGNAL_SEMAPHORE:
+	case VIRTIO_MAGMA_CMD_RESET_SEMAPHORE:
+	case VIRTIO_MAGMA_CMD_GET_NOTIFICATION_CHANNEL_HANDLE:
+	case VIRTIO_MAGMA_CMD_WAIT_NOTIFICATION_CHANNEL:
+		ret = vq_out_send_sync(instance->vi, &command);
+		if (!ret)
+			ret = virtmagma_check_expected_response_type(
+				command.request_ptr, command.response_ptr);
+		break;
+	default:
+		pr_warn("virtmagma: command %s (%d) not implemented",
+			virtio_magma_ctrl_type_string(request_type),
+			request_type);
+		ret = -EINVAL;
+		break;
+	}
+	if (ret) {
+		pr_err("virtmagma: error handling command %s (%d)",
+		       virtio_magma_ctrl_type_string(request_type),
+		       request_type);
+		dump_stack();
+		goto free_response;
+	}
+
+	/* copy responses back to userspace */
+
+	ret = copy_to_user((void *)ioctl_args.response_address,
+			   command.response_ptr, ioctl_args.response_size);
+
+free_response:
+	if (ioctl_args.request_size <= MESSAGE_CACHE_OBJECT_SIZE)
+		kmem_cache_free(instance->msg_cache, request);
+	else
+		kfree(request);
+
+free_request:
+	if (ioctl_args.response_size <= MESSAGE_CACHE_OBJECT_SIZE)
+		kmem_cache_free(instance->msg_cache, response);
+	else
+		kfree(response);
+
+	/* Some of the handlers above may override the command members and hand
+	   off allocation ownership back to us. Free them now. */
+
+	if (command.request_ptr && command.request_ptr != request)
+		kfree(command.request_ptr);
+
+	if (command.response_ptr && command.response_ptr != response)
+		kfree(command.response_ptr);
+
+	return ret;
+}
+
+static int virtmagma_open(struct inode *inodep, struct file *filp)
+{
+	int ret;
+	struct virtmagma_instance *instance;
+	struct virtmagma_info *vi =
+		container_of(inodep->i_cdev, struct virtmagma_info, cdev);
+
+	ret = create_instance(vi, &instance);
+	if (ret)
+		return ret;
+
+	filp->private_data = instance;
+
 	return 0;
 }
 
-static void vq_in_work_handler(struct work_struct *work)
+static int virtmagma_release(struct inode *inodep, struct file *filp)
 {
-	pr_warn("%s\n", __PRETTY_FUNCTION__);
+	struct virtmagma_instance *instance = filp->private_data;
+	return destroy_instance(instance->id, instance, instance->vi);
 }
 
 static void vq_out_work_handler(struct work_struct *work)
 {
-	struct virtmagma_info_private *vip = container_of(work, struct virtmagma_info_private,
-					      out_vq_work);
-	struct virtqueue *vq = vip->vqs[VIRTMAGMA_VQ_OUT];
-	struct mutex *vq_lock = &vip->vq_locks[VIRTMAGMA_VQ_OUT];
+	struct virtmagma_info *vi =
+		container_of(work, struct virtmagma_info, out_vq_work);
 	unsigned int len;
 	struct completion *finish_completion;
 	bool wake_waitq = false;
 
-	mutex_lock(vq_lock);
-	while ((finish_completion = virtqueue_get_buf(vq, &len)) != NULL) {
+	mutex_lock(&vi->vq_out_lock);
+	while ((finish_completion = virtqueue_get_buf(vi->vq_out, &len)) !=
+	       NULL) {
 		wake_waitq = true;
 		complete(finish_completion);
 	}
-	mutex_unlock(vq_lock);
+	mutex_unlock(&vi->vq_out_lock);
 
 	if (wake_waitq)
-		wake_up_interruptible_all(&vip->out_waitq);
-}
-
-static void vq_in_cb(struct virtqueue *vq)
-{
-	struct virtmagma_info *vi = vq->vdev->priv + magma_info_offset;
-	struct virtmagma_info_private *vip = vi->private;
-	schedule_work(&vip->in_vq_work);
+		wake_up_interruptible_all(&vi->out_waitq);
 }
 
 static void vq_out_cb(struct virtqueue *vq)
 {
-	struct virtmagma_info *vi = vq->vdev->priv + magma_info_offset;
-	struct virtmagma_info_private *vip = vi->private;
-	schedule_work(&vip->out_vq_work);
+	struct virtmagma_info *vi = vq->vdev->priv;
+	schedule_work(&vi->out_vq_work);
 }
 
-static void virtmagma_virtio_find_vqs_prepare(struct virtmagma_info* vi, vq_callback_t **vq_callbacks, const char **vq_names)
+static long virtmagma_ioctl_common(struct file *filp, unsigned int cmd,
+				   void __user *ptr)
 {
-	vq_callbacks[VIRTMAGMA_VQ_IN] = vq_in_cb;
-	vq_callbacks[VIRTMAGMA_VQ_OUT] = vq_out_cb;
-	vq_names[VIRTMAGMA_VQ_IN] = "magma_in";
-	vq_names[VIRTMAGMA_VQ_OUT] = "magma_out";
-}
-
-static void virtmagma_virtio_find_vqs_complete(struct virtmagma_info* vi, struct virtqueue **vqs)
-{
-	struct virtmagma_info_private *vip = vi->private;
-	memcpy(vip->vqs, vqs, sizeof(vip->vqs));
-	INIT_WORK(&vip->in_vq_work, vq_in_work_handler);
-	INIT_WORK(&vip->out_vq_work, vq_out_work_handler);
-	init_waitqueue_head(&vip->out_waitq);
-}
-
-static int virtmagma_device_ready_prepare(struct virtmagma_info* vi)
-{
-	struct virtmagma_info_private *vip = vi->private;
-	int ret;
-	ret = vq_fill_locked(vip->vqs[VIRTMAGMA_VQ_IN]);
-	if (ret) {
-		pr_warn("virtmagma: failed to fill in virtqueue: %d", ret);
-		return ret;
+	switch (cmd) {
+	case VIRTMAGMA_IOCTL_HANDSHAKE:
+		return virtmagma_ioctl_handshake(filp, ptr);
+	case VIRTMAGMA_IOCTL_GET_MMFD:
+		return virtmagma_ioctl_get_mmfd(filp, ptr);
+	case VIRTMAGMA_IOCTL_MAGMA_COMMAND:
+		return virtmagma_ioctl_magma_command(filp, ptr);
+	default:
+		return -ENOTTY;
 	}
-	return 0;
 }
 
-static void virtmagma_device_ready_complete(struct virtmagma_info* vi)
+static long virtmagma_ioctl(struct file *filp, unsigned int cmd,
+			    unsigned long arg)
 {
-	struct virtmagma_info_private *vip = vi->private;
-	virtqueue_kick(vip->vqs[VIRTMAGMA_VQ_IN]);
+	return virtmagma_ioctl_common(filp, cmd, (void __user *)arg);
 }
 
-int virtmagma_init(struct virtmagma_info *vi)
+#ifdef CONFIG_COMPAT
+static long virtmagma_ioctl_compat(struct file *filp, unsigned int cmd,
+				   unsigned long arg)
 {
-	int i;
-	struct virtmagma_info_private *vip;
-	vi->virtio_probe = virtmagma_probe;
-	vi->virtio_remove = virtmagma_remove;
-	vi->ioctl = virtmagma_ioctl;
-	vi->virtio_find_vqs_prepare = virtmagma_virtio_find_vqs_prepare;
-	vi->virtio_find_vqs_complete = virtmagma_virtio_find_vqs_complete;
-	vi->virtio_device_ready_prepare = virtmagma_device_ready_prepare;
-	vi->virtio_device_ready_complete = virtmagma_device_ready_complete;
-	vi->queue_count = VIRTMAGMA_QUEUE_COUNT;
+	return virtmagma_ioctl_common(filp, cmd, compat_ptr(arg));
+}
+#else
+#define virtmagma_ioctl_compat NULL
+#endif
 
-	vi->private = kzalloc(sizeof(*vip), GFP_KERNEL);
-	if (!vi->private)
+static const struct file_operations virtmagma_fops = {
+	.open = virtmagma_open,
+	.unlocked_ioctl = virtmagma_ioctl,
+	.compat_ioctl = virtmagma_ioctl_compat,
+	.release = virtmagma_release,
+};
+
+static int virtmagma_probe(struct virtio_device *vdev)
+{
+	int ret;
+	struct virtmagma_info *vi = NULL;
+	static const char *vq_out_name = "out";
+	vq_callback_t *callback = &vq_out_cb;
+
+	vi = kzalloc(sizeof(struct virtmagma_info), GFP_KERNEL);
+	if (!vi)
 		return -ENOMEM;
-	vip = vi->private;
 
-	for (i = 0; i < VIRTMAGMA_QUEUE_COUNT; i++)
-		mutex_init(&vip->vq_locks[i]);
+	vdev->priv = vi;
 
-	vi->enabled = true;
+	ret = alloc_chrdev_region(&vi->dev_num, 0, 1, "magma");
+	if (ret) {
+		ret = -ENOMEM;
+		pr_warn("virtmagma: failed to allocate wl chrdev region: %d\n",
+			ret);
+		goto free_vi;
+	}
+
+	vi->class = class_create(THIS_MODULE, "magma");
+	if (IS_ERR(vi->class)) {
+		ret = PTR_ERR(vi->class);
+		pr_warn("virtmagma: failed to create magma class: %d\n", ret);
+		goto unregister_region;
+	}
+
+	vi->dev = device_create(vi->class, NULL, vi->dev_num, vi, "magma%d", 0);
+	if (IS_ERR(vi->dev)) {
+		ret = PTR_ERR(vi->dev);
+		pr_warn("virtmagma: failed to create magma0 device: %d\n", ret);
+		goto destroy_class;
+	}
+
+	cdev_init(&vi->cdev, &virtmagma_fops);
+	ret = cdev_add(&vi->cdev, vi->dev_num, 1);
+	if (ret) {
+		pr_warn("virtmagma: failed to add virtio magma character device to system: %d\n",
+			ret);
+		goto destroy_device;
+	}
+
+	mutex_init(&vi->vq_out_lock);
+	mutex_init(&vi->instances_lock);
+	idr_init(&vi->instances);
+
+	ret = virtio_find_vqs(vdev, 1, &vi->vq_out, &callback, &vq_out_name,
+			      NULL);
+	if (ret) {
+		pr_warn("virtmagma: failed to find virtio magma out queue: %d\n",
+			ret);
+		goto del_cdev;
+	}
+
+	INIT_WORK(&vi->out_vq_work, vq_out_work_handler);
+	init_waitqueue_head(&vi->out_waitq);
+
+	virtio_device_ready(vdev);
 
 	return 0;
+
+del_cdev:
+	cdev_del(&vi->cdev);
+destroy_device:
+	put_device(vi->dev);
+destroy_class:
+	class_destroy(vi->class);
+unregister_region:
+	unregister_chrdev_region(vi->dev_num, 0);
+free_vi:
+	kfree(vi);
+	return ret;
 }
+
+static void virtmagma_remove(struct virtio_device *vdev)
+{
+	struct virtmagma_info *vi = vdev->priv;
+
+	idr_for_each(&vi->instances, destroy_instance, vi);
+	mutex_destroy(&vi->instances_lock);
+	idr_destroy(&vi->instances);
+	cdev_del(&vi->cdev);
+	put_device(vi->dev);
+	class_destroy(vi->class);
+	unregister_chrdev_region(vi->dev_num, 0);
+	kfree(vi);
+}
+
+static void virtmagma_scan(struct virtio_device *vdev)
+{
+}
+
+static struct virtio_device_id id_table[] = {
+	{ VIRTIO_ID_MAGMA, VIRTIO_DEV_ANY_ID },
+	{ 0 },
+};
+
+static struct virtio_driver virtio_magma_driver = {
+	.driver.name = KBUILD_MODNAME,
+	.driver.owner = THIS_MODULE,
+	.id_table = id_table,
+	.probe = virtmagma_probe,
+	.remove = virtmagma_remove,
+	.scan = virtmagma_scan,
+};
+
+module_virtio_driver(virtio_magma_driver);
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio Magma driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_magma.h b/drivers/virtio/virtio_magma.h
deleted file mode 100644
index efef2b1..0000000
--- a/drivers/virtio/virtio_magma.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2019 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef _DRIVERS_VIRTIO_VIRTIO_MAGMA_H
-#define _DRIVERS_VIRTIO_VIRTIO_MAGMA_H
-
-#include <linux/fs.h>
-#include <linux/types.h>
-#include <linux/virtio.h>
-#include <linux/virtmagma.h>
-
-#define VIRTMAGMA_VQ_IN 0
-#define VIRTMAGMA_VQ_OUT 1
-#define VIRTMAGMA_QUEUE_COUNT 2
-
-struct virtmagma_info {
-	bool enabled;
-	int (*virtio_probe)(struct virtmagma_info* vi, struct virtio_device *vdev);
-	void (*virtio_remove)(struct virtmagma_info* vi, struct virtio_device *vdev);
-	long (*ioctl)(struct virtmagma_info* vi, unsigned int cmd, void __user *ptr);
-	void (*virtio_find_vqs_prepare)(struct virtmagma_info* vi, vq_callback_t **vq_callbacks, const char **vq_names);
-	void (*virtio_find_vqs_complete)(struct virtmagma_info* vi, struct virtqueue **vqs);
-	int (*virtio_device_ready_prepare)(struct virtmagma_info* vi);
-	void (*virtio_device_ready_complete)(struct virtmagma_info* vi);
-	uint32_t queue_count;
-	void *private;
-};
-
-int virtmagma_init(struct virtmagma_info *vi);
-
-extern const size_t magma_info_offset;
-
-#endif /* _DRIVERS_VIRTIO_VIRTIO_MAGMA_H */
diff --git a/drivers/virtio/virtio_wl.c b/drivers/virtio/virtio_wl.c
index 23e1f7e..3ee6225 100644
--- a/drivers/virtio/virtio_wl.c
+++ b/drivers/virtio/virtio_wl.c
@@ -60,8 +60,6 @@
 
 #include <uapi/linux/dma-buf.h>
 
-#include "virtio_magma.h"
-
 #define VFD_ILLEGAL_SIGN_BIT 0x80000000
 #define VFD_HOST_VFD_ID_BIT 0x40000000
 
@@ -104,11 +102,10 @@
 	struct mutex vfds_lock;
 	struct idr vfds;
 
-	struct virtmagma_info magma_info;
+	uint32_t vfd_wait_id;
+	struct completion vfd_wait_completion;
 };
 
-const size_t magma_info_offset = offsetof(struct virtwl_info, magma_info);
-
 static struct virtwl_vfd *virtwl_vfd_alloc(struct virtwl_info *vi);
 static void virtwl_vfd_free(struct virtwl_vfd *vfd);
 
@@ -240,6 +237,11 @@
 	vfd->pfn = new->pfn;
 	vfd->flags = new->flags;
 
+	if (vfd->id == vi->vfd_wait_id) {
+		vi->vfd_wait_id = 0;
+		complete(&vi->vfd_wait_completion);
+	}
+
 	return true; /* return the inbuf to vq */
 }
 
@@ -914,6 +916,36 @@
 	return 0;
 }
 
+/* externally visible function to create fds for existing vfds */
+int virtwl_create_fd_for_vfd(void *filp_private_data, uint32_t vfd_id)
+{
+	struct virtwl_info *vi = filp_private_data;
+	struct virtwl_vfd *vfd;
+
+	mutex_lock(&vi->vfds_lock);
+	vfd = idr_find(&vi->vfds, vfd_id);
+	mutex_unlock(&vi->vfds_lock);
+
+	if (!vfd) {
+		vi->vfd_wait_id = vfd_id;
+		reinit_completion(&vi->vfd_wait_completion);
+		if (wait_for_completion_timeout(&vi->vfd_wait_completion, HZ)) {
+			mutex_lock(&vi->vfds_lock);
+			vfd = idr_find(&vi->vfds, vfd_id);
+			mutex_unlock(&vi->vfds_lock);
+		}
+	}
+
+	if (!vfd) {
+		pr_warn("virtwl: request to create fd for non-existent vfd id %d",
+			vfd_id);
+		return -ENOENT;
+	}
+
+	return anon_inode_getfd("[virtwl_vfd]", &virtwl_vfd_fops, vfd,
+				virtwl_vfd_file_flags(vfd) | O_CLOEXEC);
+}
+
 static int virtwl_open(struct inode *inodep, struct file *filp)
 {
 	struct virtwl_info *vi = container_of(inodep->i_cdev,
@@ -1202,13 +1234,6 @@
 	if (filp->f_op == &virtwl_vfd_fops)
 		return virtwl_vfd_ioctl(filp, cmd, ptr);
 
-	if (_IOC_TYPE(cmd) == VIRTMAGMA_IOCTL_BASE) {
-		struct virtwl_info *vi = filp->private_data;
-		if (!vi->magma_info.enabled)
-			return -ENODEV; /* virtmagma not initialized */
-		return vi->magma_info.ioctl(&vi->magma_info, cmd, ptr);
-	}
-
 	switch (_IOC_NR(cmd)) {
 	case _IOC_NR(VIRTWL_IOCTL_NEW):
 		return virtwl_ioctl_new(filp, ptr, _IOC_SIZE(cmd));
@@ -1258,11 +1283,9 @@
 {
 	int i;
 	int ret;
-	unsigned queue_count;
 	struct virtwl_info *vi = NULL;
-	struct virtqueue *vqs[VIRTWL_MAX_QUEUES];
-	vq_callback_t *vq_callbacks[VIRTWL_MAX_QUEUES] = { vq_in_cb, vq_out_cb };
-	static const char *vq_names[VIRTWL_MAX_QUEUES] = { "in", "out" };
+	vq_callback_t *vq_callbacks[] = { vq_in_cb, vq_out_cb };
+	static const char * const vq_names[] = { "in", "out" };
 
 	vi = kzalloc(sizeof(struct virtwl_info), GFP_KERNEL);
 	if (!vi)
@@ -1304,39 +1327,13 @@
 	for (i = 0; i < VIRTWL_QUEUE_COUNT; i++)
 		mutex_init(&vi->vq_locks[i]);
 
-	queue_count = VIRTWL_QUEUE_COUNT;
-	if (vdev->features & (1ULL << VIRTIO_WL_F_MAGMA)) {
-		pr_info("virtwl: initializing virtmagma");
-		ret = virtmagma_init(&vi->magma_info);
-		if (ret) {
-			pr_warn("virtwl: failed to initialize virtmagma: %d\n", ret);
-			goto destroy_device;
-		}
-		queue_count += vi->magma_info.queue_count;
-	}
-
-	/*
-	 * virtio_find_vqs is a one-time operation, so child devices must
-	 * expose their respective arguments for the call to the parent.
-	 */
-	if (queue_count > VIRTWL_MAX_QUEUES) {
-		pr_warn("virtwl: too many queues requested by child device\n");
-		ret = -ENOMEM;
-		goto destroy_device;
-	}
-	if (vi->magma_info.enabled)
-		vi->magma_info.virtio_find_vqs_prepare(&vi->magma_info,
-			&vq_callbacks[VIRTWL_QUEUE_COUNT],
-			&vq_names[VIRTWL_QUEUE_COUNT]);
-	ret = virtio_find_vqs(vdev, queue_count, vqs, vq_callbacks, vq_names, NULL);
+	ret = virtio_find_vqs(vdev, VIRTWL_QUEUE_COUNT, vi->vqs, vq_callbacks,
+			      vq_names, NULL);
 	if (ret) {
 		pr_warn("virtwl: failed to find virtio wayland queues: %d\n",
 			ret);
 		goto del_cdev;
 	}
-	memcpy(vi->vqs, vqs, sizeof(vi->vqs));
-	if (vi->magma_info.enabled)
-		vi->magma_info.virtio_find_vqs_complete(&vi->magma_info, &vqs[VIRTWL_QUEUE_COUNT]);
 
 	INIT_WORK(&vi->in_vq_work, vq_in_work_handler);
 	INIT_WORK(&vi->out_vq_work, vq_out_work_handler);
@@ -1344,6 +1341,7 @@
 
 	mutex_init(&vi->vfds_lock);
 	idr_init(&vi->vfds);
+	init_completion(&vi->vfd_wait_completion);
 
 	/* lock is unneeded as we have unique ownership */
 	ret = vq_fill_locked(vi->vqs[VIRTWL_VQ_IN]);
@@ -1352,17 +1350,8 @@
 		goto del_cdev;
 	}
 
-	if (vi->magma_info.enabled) {
-		ret = vi->magma_info.virtio_device_ready_prepare(&vi->magma_info);
-		if (ret) {
-			pr_warn("virtwl: failed to prepare virtmagma for device-ready: %d", ret);
-			goto del_cdev;
-		}
-	}
 	virtio_device_ready(vdev);
 	virtqueue_kick(vi->vqs[VIRTWL_VQ_IN]);
-	if (vi->magma_info.enabled)
-		vi->magma_info.virtio_device_ready_complete(&vi->magma_info);
 
 	return 0;
 
@@ -1383,7 +1372,6 @@
 {
 	struct virtwl_info *vi = vdev->priv;
 
-	vi->magma_info.virtio_remove(&vi->magma_info, vdev);
 	cdev_del(&vi->cdev);
 	put_device(vi->dev);
 	class_destroy(vi->class);
@@ -1405,20 +1393,17 @@
 {
 }
 
-
 static struct virtio_device_id id_table[] = {
 	{ VIRTIO_ID_WL, VIRTIO_DEV_ANY_ID },
 	{ 0 },
 };
 
 static unsigned int features_legacy[] = {
-	VIRTIO_WL_F_TRANS_FLAGS,
-	VIRTIO_WL_F_MAGMA
+	VIRTIO_WL_F_TRANS_FLAGS
 };
 
 static unsigned int features[] = {
-	VIRTIO_WL_F_TRANS_FLAGS,
-	VIRTIO_WL_F_MAGMA
+	VIRTIO_WL_F_TRANS_FLAGS
 };
 
 static struct virtio_driver virtio_wl_driver = {
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 293872a..c5cf1b2 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -44,5 +44,6 @@
 #define VIRTIO_ID_VSOCK        19 /* virtio vsock transport */
 #define VIRTIO_ID_CRYPTO       20 /* virtio crypto */
 #define VIRTIO_ID_WL           30 /* virtio wayland */
+#define VIRTIO_ID_MAGMA        50 /* virtio magma */
 
 #endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/uapi/linux/virtio_magma.h b/include/uapi/linux/virtio_magma.h
index d57435f..74f441d 100644
--- a/include/uapi/linux/virtio_magma.h
+++ b/include/uapi/linux/virtio_magma.h
@@ -1,6 +1,9 @@
-// Copyright 2018 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
+/* Copyright 2018 The Fuchsia Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style license that can be
+   found in the LICENSE file. */
+
+/* NOTE: DO NOT EDIT THIS FILE! It is generated automatically by:
+     //garnet/lib/magma/include/virtio/virtio_magma.h.gen.py */
 
 #ifndef _LINUX_VIRTIO_MAGMA_H
 #define _LINUX_VIRTIO_MAGMA_H
@@ -9,485 +12,724 @@
 #include <linux/virtio_config.h>
 #include <linux/virtmagma.h>
 
-
-
+struct virtio_magma_config {
+	__le64 dummy;
+} __attribute((packed));
 
 enum virtio_magma_ctrl_type {
-    /* magma commands */
-    VIRTIO_MAGMA_CMD_GET_DRIVER = 0x0400,
-    VIRTIO_MAGMA_CMD_QUERY,
-    VIRTIO_MAGMA_CMD_CREATE_CONNECTION,
-    VIRTIO_MAGMA_CMD_RELEASE_CONNECTION,
-    VIRTIO_MAGMA_CMD_GET_ERROR,
-    VIRTIO_MAGMA_CMD_CREATE_CONTEXT,
-    VIRTIO_MAGMA_CMD_RELEASE_CONTEXT,
-    VIRTIO_MAGMA_CMD_CREATE_BUFFER,
-    VIRTIO_MAGMA_CMD_RELEASE_BUFFER,
-    VIRTIO_MAGMA_CMD_GET_BUFFER_ID,
-    VIRTIO_MAGMA_CMD_GET_BUFFER_SIZE,
-    VIRTIO_MAGMA_CMD_CLEAN_CACHE,
-    VIRTIO_MAGMA_CMD_SET_CACHE_POLICY,
-    VIRTIO_MAGMA_CMD_MAP,
-    VIRTIO_MAGMA_CMD_MAP_ALIGNED,
-    VIRTIO_MAGMA_CMD_MAP_SPECIFIC,
-    VIRTIO_MAGMA_CMD_UNMAP,
-    VIRTIO_MAGMA_CMD_MAP_BUFFER_GPU,
-    VIRTIO_MAGMA_CMD_UNMAP_BUFFER_GPU,
-    VIRTIO_MAGMA_CMD_COMMIT_BUFFER,
-    VIRTIO_MAGMA_CMD_EXPORT,
-    VIRTIO_MAGMA_CMD_IMPORT,
-    VIRTIO_MAGMA_CMD_CREATE_COMMAND_BUFFER,
-    VIRTIO_MAGMA_CMD_RELEASE_COMMAND_BUFFER,
-    VIRTIO_MAGMA_CMD_SUBMIT_COMMAND_BUFFER,
-    VIRTIO_MAGMA_CMD_EXECUTE_IMMEDIATE_COMMANDS,
-    VIRTIO_MAGMA_CMD_CREATE_SEMAPHORE,
-    VIRTIO_MAGMA_CMD_RELEASE_SEMAPHORE,
-    VIRTIO_MAGMA_CMD_GET_SEMAPHORE_ID,
-    VIRTIO_MAGMA_CMD_SIGNAL_SEMAPHORE,
-    VIRTIO_MAGMA_CMD_RESET_SEMAPHORE,
-    VIRTIO_MAGMA_CMD_WAIT_SEMAPHORES,
-    VIRTIO_MAGMA_CMD_EXPORT_SEMAPHORE,
-    VIRTIO_MAGMA_CMD_IMPORT_SEMAPHORE,
-    VIRTIO_MAGMA_CMD_READ_NOTIFICATION_CHANNEL,
-    /* magma success responses */
-    VIRTIO_MAGMA_RESP_GET_DRIVER = 0x1180,
-    VIRTIO_MAGMA_RESP_QUERY,
-    VIRTIO_MAGMA_RESP_CREATE_CONNECTION,
-    VIRTIO_MAGMA_RESP_RELEASE_CONNECTION,
-    VIRTIO_MAGMA_RESP_GET_ERROR,
-    VIRTIO_MAGMA_RESP_CREATE_CONTEXT,
-    VIRTIO_MAGMA_RESP_RELEASE_CONTEXT,
-    VIRTIO_MAGMA_RESP_CREATE_BUFFER,
-    VIRTIO_MAGMA_RESP_RELEASE_BUFFER,
-    VIRTIO_MAGMA_RESP_GET_BUFFER_ID,
-    VIRTIO_MAGMA_RESP_GET_BUFFER_SIZE,
-    VIRTIO_MAGMA_RESP_CLEAN_CACHE,
-    VIRTIO_MAGMA_RESP_SET_CACHE_POLICY,
-    VIRTIO_MAGMA_RESP_MAP,
-    VIRTIO_MAGMA_RESP_MAP_ALIGNED,
-    VIRTIO_MAGMA_RESP_MAP_SPECIFIC,
-    VIRTIO_MAGMA_RESP_UNMAP,
-    VIRTIO_MAGMA_RESP_MAP_BUFFER_GPU,
-    VIRTIO_MAGMA_RESP_UNMAP_BUFFER_GPU,
-    VIRTIO_MAGMA_RESP_COMMIT_BUFFER,
-    VIRTIO_MAGMA_RESP_EXPORT,
-    VIRTIO_MAGMA_RESP_IMPORT,
-    VIRTIO_MAGMA_RESP_CREATE_COMMAND_BUFFER,
-    VIRTIO_MAGMA_RESP_RELEASE_COMMAND_BUFFER,
-    VIRTIO_MAGMA_RESP_SUBMIT_COMMAND_BUFFER,
-    VIRTIO_MAGMA_RESP_EXECUTE_IMMEDIATE_COMMANDS,
-    VIRTIO_MAGMA_RESP_CREATE_SEMAPHORE,
-    VIRTIO_MAGMA_RESP_RELEASE_SEMAPHORE,
-    VIRTIO_MAGMA_RESP_GET_SEMAPHORE_ID,
-    VIRTIO_MAGMA_RESP_SIGNAL_SEMAPHORE,
-    VIRTIO_MAGMA_RESP_RESET_SEMAPHORE,
-    VIRTIO_MAGMA_RESP_WAIT_SEMAPHORES,
-    VIRTIO_MAGMA_RESP_EXPORT_SEMAPHORE,
-    VIRTIO_MAGMA_RESP_IMPORT_SEMAPHORE,
-    VIRTIO_MAGMA_RESP_READ_NOTIFICATION_CHANNEL,
-    /* magma error responses */
-    VIRTIO_MAGMA_RESP_ERR_UNIMPLEMENTED = 0x1280,
-    VIRTIO_MAGMA_RESP_ERR_INTERNAL,
-    VIRTIO_MAGMA_RESP_ERR_HOST_DISCONNECTED,
-    VIRTIO_MAGMA_RESP_ERR_OUT_OF_MEMORY,
-    VIRTIO_MAGMA_RESP_ERR_INVALID_COMMAND,
-    VIRTIO_MAGMA_RESP_ERR_INVALID_ARGUMENT,
-};
+	/* magma commands */
+	VIRTIO_MAGMA_CMD_QUERY = 0x0400,
+	VIRTIO_MAGMA_CMD_CREATE_CONNECTION,
+	VIRTIO_MAGMA_CMD_RELEASE_CONNECTION,
+	VIRTIO_MAGMA_CMD_GET_ERROR,
+	VIRTIO_MAGMA_CMD_CREATE_CONTEXT,
+	VIRTIO_MAGMA_CMD_RELEASE_CONTEXT,
+	VIRTIO_MAGMA_CMD_CREATE_BUFFER,
+	VIRTIO_MAGMA_CMD_RELEASE_BUFFER,
+	VIRTIO_MAGMA_CMD_DUPLICATE_HANDLE,
+	VIRTIO_MAGMA_CMD_RELEASE_BUFFER_HANDLE,
+	VIRTIO_MAGMA_CMD_GET_BUFFER_ID,
+	VIRTIO_MAGMA_CMD_GET_BUFFER_SIZE,
+	VIRTIO_MAGMA_CMD_CLEAN_CACHE,
+	VIRTIO_MAGMA_CMD_SET_CACHE_POLICY,
+	VIRTIO_MAGMA_CMD_GET_BUFFER_CACHE_POLICY,
+	VIRTIO_MAGMA_CMD_GET_BUFFER_IS_MAPPABLE,
+	VIRTIO_MAGMA_CMD_SET_BUFFER_MAPPING_ADDRESS_RANGE,
+	VIRTIO_MAGMA_CMD_MAP,
+	VIRTIO_MAGMA_CMD_MAP_ALIGNED,
+	VIRTIO_MAGMA_CMD_MAP_SPECIFIC,
+	VIRTIO_MAGMA_CMD_UNMAP,
+	VIRTIO_MAGMA_CMD_MAP_BUFFER_GPU,
+	VIRTIO_MAGMA_CMD_UNMAP_BUFFER_GPU,
+	VIRTIO_MAGMA_CMD_COMMIT_BUFFER,
+	VIRTIO_MAGMA_CMD_EXPORT,
+	VIRTIO_MAGMA_CMD_IMPORT,
+	VIRTIO_MAGMA_CMD_CREATE_COMMAND_BUFFER,
+	VIRTIO_MAGMA_CMD_RELEASE_COMMAND_BUFFER,
+	VIRTIO_MAGMA_CMD_SUBMIT_COMMAND_BUFFER,
+	VIRTIO_MAGMA_CMD_EXECUTE_IMMEDIATE_COMMANDS,
+	VIRTIO_MAGMA_CMD_EXECUTE_IMMEDIATE_COMMANDS2,
+	VIRTIO_MAGMA_CMD_CREATE_SEMAPHORE,
+	VIRTIO_MAGMA_CMD_RELEASE_SEMAPHORE,
+	VIRTIO_MAGMA_CMD_GET_SEMAPHORE_ID,
+	VIRTIO_MAGMA_CMD_SIGNAL_SEMAPHORE,
+	VIRTIO_MAGMA_CMD_RESET_SEMAPHORE,
+	VIRTIO_MAGMA_CMD_WAIT_SEMAPHORES,
+	VIRTIO_MAGMA_CMD_EXPORT_SEMAPHORE,
+	VIRTIO_MAGMA_CMD_IMPORT_SEMAPHORE,
+	VIRTIO_MAGMA_CMD_GET_NOTIFICATION_CHANNEL_HANDLE,
+	VIRTIO_MAGMA_CMD_WAIT_NOTIFICATION_CHANNEL,
+	VIRTIO_MAGMA_CMD_READ_NOTIFICATION_CHANNEL,
+	/* magma success responses */
+	VIRTIO_MAGMA_RESP_QUERY = 0x1180,
+	VIRTIO_MAGMA_RESP_CREATE_CONNECTION,
+	VIRTIO_MAGMA_RESP_RELEASE_CONNECTION,
+	VIRTIO_MAGMA_RESP_GET_ERROR,
+	VIRTIO_MAGMA_RESP_CREATE_CONTEXT,
+	VIRTIO_MAGMA_RESP_RELEASE_CONTEXT,
+	VIRTIO_MAGMA_RESP_CREATE_BUFFER,
+	VIRTIO_MAGMA_RESP_RELEASE_BUFFER,
+	VIRTIO_MAGMA_RESP_DUPLICATE_HANDLE,
+	VIRTIO_MAGMA_RESP_RELEASE_BUFFER_HANDLE,
+	VIRTIO_MAGMA_RESP_GET_BUFFER_ID,
+	VIRTIO_MAGMA_RESP_GET_BUFFER_SIZE,
+	VIRTIO_MAGMA_RESP_CLEAN_CACHE,
+	VIRTIO_MAGMA_RESP_SET_CACHE_POLICY,
+	VIRTIO_MAGMA_RESP_GET_BUFFER_CACHE_POLICY,
+	VIRTIO_MAGMA_RESP_GET_BUFFER_IS_MAPPABLE,
+	VIRTIO_MAGMA_RESP_SET_BUFFER_MAPPING_ADDRESS_RANGE,
+	VIRTIO_MAGMA_RESP_MAP,
+	VIRTIO_MAGMA_RESP_MAP_ALIGNED,
+	VIRTIO_MAGMA_RESP_MAP_SPECIFIC,
+	VIRTIO_MAGMA_RESP_UNMAP,
+	VIRTIO_MAGMA_RESP_MAP_BUFFER_GPU,
+	VIRTIO_MAGMA_RESP_UNMAP_BUFFER_GPU,
+	VIRTIO_MAGMA_RESP_COMMIT_BUFFER,
+	VIRTIO_MAGMA_RESP_EXPORT,
+	VIRTIO_MAGMA_RESP_IMPORT,
+	VIRTIO_MAGMA_RESP_CREATE_COMMAND_BUFFER,
+	VIRTIO_MAGMA_RESP_RELEASE_COMMAND_BUFFER,
+	VIRTIO_MAGMA_RESP_SUBMIT_COMMAND_BUFFER,
+	VIRTIO_MAGMA_RESP_EXECUTE_IMMEDIATE_COMMANDS,
+	VIRTIO_MAGMA_RESP_EXECUTE_IMMEDIATE_COMMANDS2,
+	VIRTIO_MAGMA_RESP_CREATE_SEMAPHORE,
+	VIRTIO_MAGMA_RESP_RELEASE_SEMAPHORE,
+	VIRTIO_MAGMA_RESP_GET_SEMAPHORE_ID,
+	VIRTIO_MAGMA_RESP_SIGNAL_SEMAPHORE,
+	VIRTIO_MAGMA_RESP_RESET_SEMAPHORE,
+	VIRTIO_MAGMA_RESP_WAIT_SEMAPHORES,
+	VIRTIO_MAGMA_RESP_EXPORT_SEMAPHORE,
+	VIRTIO_MAGMA_RESP_IMPORT_SEMAPHORE,
+	VIRTIO_MAGMA_RESP_GET_NOTIFICATION_CHANNEL_HANDLE,
+	VIRTIO_MAGMA_RESP_WAIT_NOTIFICATION_CHANNEL,
+	VIRTIO_MAGMA_RESP_READ_NOTIFICATION_CHANNEL,
+	/* magma error responses */
+	VIRTIO_MAGMA_RESP_ERR_UNIMPLEMENTED = 0x1280,
+	VIRTIO_MAGMA_RESP_ERR_INTERNAL,
+	VIRTIO_MAGMA_RESP_ERR_HOST_DISCONNECTED,
+	VIRTIO_MAGMA_RESP_ERR_OUT_OF_MEMORY,
+	VIRTIO_MAGMA_RESP_ERR_INVALID_COMMAND,
+	VIRTIO_MAGMA_RESP_ERR_INVALID_ARGUMENT,
+} __attribute((packed));
+
+inline const char* virtio_magma_ctrl_type_string(enum virtio_magma_ctrl_type type) {
+	switch (type) {
+		case VIRTIO_MAGMA_CMD_QUERY: return "VIRTIO_MAGMA_CMD_QUERY";
+		case VIRTIO_MAGMA_RESP_QUERY: return "VIRTIO_MAGMA_RESP_QUERY";
+		case VIRTIO_MAGMA_CMD_CREATE_CONNECTION: return "VIRTIO_MAGMA_CMD_CREATE_CONNECTION";
+		case VIRTIO_MAGMA_RESP_CREATE_CONNECTION: return "VIRTIO_MAGMA_RESP_CREATE_CONNECTION";
+		case VIRTIO_MAGMA_CMD_RELEASE_CONNECTION: return "VIRTIO_MAGMA_CMD_RELEASE_CONNECTION";
+		case VIRTIO_MAGMA_RESP_RELEASE_CONNECTION: return "VIRTIO_MAGMA_RESP_RELEASE_CONNECTION";
+		case VIRTIO_MAGMA_CMD_GET_ERROR: return "VIRTIO_MAGMA_CMD_GET_ERROR";
+		case VIRTIO_MAGMA_RESP_GET_ERROR: return "VIRTIO_MAGMA_RESP_GET_ERROR";
+		case VIRTIO_MAGMA_CMD_CREATE_CONTEXT: return "VIRTIO_MAGMA_CMD_CREATE_CONTEXT";
+		case VIRTIO_MAGMA_RESP_CREATE_CONTEXT: return "VIRTIO_MAGMA_RESP_CREATE_CONTEXT";
+		case VIRTIO_MAGMA_CMD_RELEASE_CONTEXT: return "VIRTIO_MAGMA_CMD_RELEASE_CONTEXT";
+		case VIRTIO_MAGMA_RESP_RELEASE_CONTEXT: return "VIRTIO_MAGMA_RESP_RELEASE_CONTEXT";
+		case VIRTIO_MAGMA_CMD_CREATE_BUFFER: return "VIRTIO_MAGMA_CMD_CREATE_BUFFER";
+		case VIRTIO_MAGMA_RESP_CREATE_BUFFER: return "VIRTIO_MAGMA_RESP_CREATE_BUFFER";
+		case VIRTIO_MAGMA_CMD_RELEASE_BUFFER: return "VIRTIO_MAGMA_CMD_RELEASE_BUFFER";
+		case VIRTIO_MAGMA_RESP_RELEASE_BUFFER: return "VIRTIO_MAGMA_RESP_RELEASE_BUFFER";
+		case VIRTIO_MAGMA_CMD_DUPLICATE_HANDLE: return "VIRTIO_MAGMA_CMD_DUPLICATE_HANDLE";
+		case VIRTIO_MAGMA_RESP_DUPLICATE_HANDLE: return "VIRTIO_MAGMA_RESP_DUPLICATE_HANDLE";
+		case VIRTIO_MAGMA_CMD_RELEASE_BUFFER_HANDLE: return "VIRTIO_MAGMA_CMD_RELEASE_BUFFER_HANDLE";
+		case VIRTIO_MAGMA_RESP_RELEASE_BUFFER_HANDLE: return "VIRTIO_MAGMA_RESP_RELEASE_BUFFER_HANDLE";
+		case VIRTIO_MAGMA_CMD_GET_BUFFER_ID: return "VIRTIO_MAGMA_CMD_GET_BUFFER_ID";
+		case VIRTIO_MAGMA_RESP_GET_BUFFER_ID: return "VIRTIO_MAGMA_RESP_GET_BUFFER_ID";
+		case VIRTIO_MAGMA_CMD_GET_BUFFER_SIZE: return "VIRTIO_MAGMA_CMD_GET_BUFFER_SIZE";
+		case VIRTIO_MAGMA_RESP_GET_BUFFER_SIZE: return "VIRTIO_MAGMA_RESP_GET_BUFFER_SIZE";
+		case VIRTIO_MAGMA_CMD_CLEAN_CACHE: return "VIRTIO_MAGMA_CMD_CLEAN_CACHE";
+		case VIRTIO_MAGMA_RESP_CLEAN_CACHE: return "VIRTIO_MAGMA_RESP_CLEAN_CACHE";
+		case VIRTIO_MAGMA_CMD_SET_CACHE_POLICY: return "VIRTIO_MAGMA_CMD_SET_CACHE_POLICY";
+		case VIRTIO_MAGMA_RESP_SET_CACHE_POLICY: return "VIRTIO_MAGMA_RESP_SET_CACHE_POLICY";
+		case VIRTIO_MAGMA_CMD_GET_BUFFER_CACHE_POLICY: return "VIRTIO_MAGMA_CMD_GET_BUFFER_CACHE_POLICY";
+		case VIRTIO_MAGMA_RESP_GET_BUFFER_CACHE_POLICY: return "VIRTIO_MAGMA_RESP_GET_BUFFER_CACHE_POLICY";
+		case VIRTIO_MAGMA_CMD_GET_BUFFER_IS_MAPPABLE: return "VIRTIO_MAGMA_CMD_GET_BUFFER_IS_MAPPABLE";
+		case VIRTIO_MAGMA_RESP_GET_BUFFER_IS_MAPPABLE: return "VIRTIO_MAGMA_RESP_GET_BUFFER_IS_MAPPABLE";
+		case VIRTIO_MAGMA_CMD_SET_BUFFER_MAPPING_ADDRESS_RANGE: return "VIRTIO_MAGMA_CMD_SET_BUFFER_MAPPING_ADDRESS_RANGE";
+		case VIRTIO_MAGMA_RESP_SET_BUFFER_MAPPING_ADDRESS_RANGE: return "VIRTIO_MAGMA_RESP_SET_BUFFER_MAPPING_ADDRESS_RANGE";
+		case VIRTIO_MAGMA_CMD_MAP: return "VIRTIO_MAGMA_CMD_MAP";
+		case VIRTIO_MAGMA_RESP_MAP: return "VIRTIO_MAGMA_RESP_MAP";
+		case VIRTIO_MAGMA_CMD_MAP_ALIGNED: return "VIRTIO_MAGMA_CMD_MAP_ALIGNED";
+		case VIRTIO_MAGMA_RESP_MAP_ALIGNED: return "VIRTIO_MAGMA_RESP_MAP_ALIGNED";
+		case VIRTIO_MAGMA_CMD_MAP_SPECIFIC: return "VIRTIO_MAGMA_CMD_MAP_SPECIFIC";
+		case VIRTIO_MAGMA_RESP_MAP_SPECIFIC: return "VIRTIO_MAGMA_RESP_MAP_SPECIFIC";
+		case VIRTIO_MAGMA_CMD_UNMAP: return "VIRTIO_MAGMA_CMD_UNMAP";
+		case VIRTIO_MAGMA_RESP_UNMAP: return "VIRTIO_MAGMA_RESP_UNMAP";
+		case VIRTIO_MAGMA_CMD_MAP_BUFFER_GPU: return "VIRTIO_MAGMA_CMD_MAP_BUFFER_GPU";
+		case VIRTIO_MAGMA_RESP_MAP_BUFFER_GPU: return "VIRTIO_MAGMA_RESP_MAP_BUFFER_GPU";
+		case VIRTIO_MAGMA_CMD_UNMAP_BUFFER_GPU: return "VIRTIO_MAGMA_CMD_UNMAP_BUFFER_GPU";
+		case VIRTIO_MAGMA_RESP_UNMAP_BUFFER_GPU: return "VIRTIO_MAGMA_RESP_UNMAP_BUFFER_GPU";
+		case VIRTIO_MAGMA_CMD_COMMIT_BUFFER: return "VIRTIO_MAGMA_CMD_COMMIT_BUFFER";
+		case VIRTIO_MAGMA_RESP_COMMIT_BUFFER: return "VIRTIO_MAGMA_RESP_COMMIT_BUFFER";
+		case VIRTIO_MAGMA_CMD_EXPORT: return "VIRTIO_MAGMA_CMD_EXPORT";
+		case VIRTIO_MAGMA_RESP_EXPORT: return "VIRTIO_MAGMA_RESP_EXPORT";
+		case VIRTIO_MAGMA_CMD_IMPORT: return "VIRTIO_MAGMA_CMD_IMPORT";
+		case VIRTIO_MAGMA_RESP_IMPORT: return "VIRTIO_MAGMA_RESP_IMPORT";
+		case VIRTIO_MAGMA_CMD_CREATE_COMMAND_BUFFER: return "VIRTIO_MAGMA_CMD_CREATE_COMMAND_BUFFER";
+		case VIRTIO_MAGMA_RESP_CREATE_COMMAND_BUFFER: return "VIRTIO_MAGMA_RESP_CREATE_COMMAND_BUFFER";
+		case VIRTIO_MAGMA_CMD_RELEASE_COMMAND_BUFFER: return "VIRTIO_MAGMA_CMD_RELEASE_COMMAND_BUFFER";
+		case VIRTIO_MAGMA_RESP_RELEASE_COMMAND_BUFFER: return "VIRTIO_MAGMA_RESP_RELEASE_COMMAND_BUFFER";
+		case VIRTIO_MAGMA_CMD_SUBMIT_COMMAND_BUFFER: return "VIRTIO_MAGMA_CMD_SUBMIT_COMMAND_BUFFER";
+		case VIRTIO_MAGMA_RESP_SUBMIT_COMMAND_BUFFER: return "VIRTIO_MAGMA_RESP_SUBMIT_COMMAND_BUFFER";
+		case VIRTIO_MAGMA_CMD_EXECUTE_IMMEDIATE_COMMANDS: return "VIRTIO_MAGMA_CMD_EXECUTE_IMMEDIATE_COMMANDS";
+		case VIRTIO_MAGMA_RESP_EXECUTE_IMMEDIATE_COMMANDS: return "VIRTIO_MAGMA_RESP_EXECUTE_IMMEDIATE_COMMANDS";
+		case VIRTIO_MAGMA_CMD_EXECUTE_IMMEDIATE_COMMANDS2: return "VIRTIO_MAGMA_CMD_EXECUTE_IMMEDIATE_COMMANDS2";
+		case VIRTIO_MAGMA_RESP_EXECUTE_IMMEDIATE_COMMANDS2: return "VIRTIO_MAGMA_RESP_EXECUTE_IMMEDIATE_COMMANDS2";
+		case VIRTIO_MAGMA_CMD_CREATE_SEMAPHORE: return "VIRTIO_MAGMA_CMD_CREATE_SEMAPHORE";
+		case VIRTIO_MAGMA_RESP_CREATE_SEMAPHORE: return "VIRTIO_MAGMA_RESP_CREATE_SEMAPHORE";
+		case VIRTIO_MAGMA_CMD_RELEASE_SEMAPHORE: return "VIRTIO_MAGMA_CMD_RELEASE_SEMAPHORE";
+		case VIRTIO_MAGMA_RESP_RELEASE_SEMAPHORE: return "VIRTIO_MAGMA_RESP_RELEASE_SEMAPHORE";
+		case VIRTIO_MAGMA_CMD_GET_SEMAPHORE_ID: return "VIRTIO_MAGMA_CMD_GET_SEMAPHORE_ID";
+		case VIRTIO_MAGMA_RESP_GET_SEMAPHORE_ID: return "VIRTIO_MAGMA_RESP_GET_SEMAPHORE_ID";
+		case VIRTIO_MAGMA_CMD_SIGNAL_SEMAPHORE: return "VIRTIO_MAGMA_CMD_SIGNAL_SEMAPHORE";
+		case VIRTIO_MAGMA_RESP_SIGNAL_SEMAPHORE: return "VIRTIO_MAGMA_RESP_SIGNAL_SEMAPHORE";
+		case VIRTIO_MAGMA_CMD_RESET_SEMAPHORE: return "VIRTIO_MAGMA_CMD_RESET_SEMAPHORE";
+		case VIRTIO_MAGMA_RESP_RESET_SEMAPHORE: return "VIRTIO_MAGMA_RESP_RESET_SEMAPHORE";
+		case VIRTIO_MAGMA_CMD_WAIT_SEMAPHORES: return "VIRTIO_MAGMA_CMD_WAIT_SEMAPHORES";
+		case VIRTIO_MAGMA_RESP_WAIT_SEMAPHORES: return "VIRTIO_MAGMA_RESP_WAIT_SEMAPHORES";
+		case VIRTIO_MAGMA_CMD_EXPORT_SEMAPHORE: return "VIRTIO_MAGMA_CMD_EXPORT_SEMAPHORE";
+		case VIRTIO_MAGMA_RESP_EXPORT_SEMAPHORE: return "VIRTIO_MAGMA_RESP_EXPORT_SEMAPHORE";
+		case VIRTIO_MAGMA_CMD_IMPORT_SEMAPHORE: return "VIRTIO_MAGMA_CMD_IMPORT_SEMAPHORE";
+		case VIRTIO_MAGMA_RESP_IMPORT_SEMAPHORE: return "VIRTIO_MAGMA_RESP_IMPORT_SEMAPHORE";
+		case VIRTIO_MAGMA_CMD_GET_NOTIFICATION_CHANNEL_HANDLE: return "VIRTIO_MAGMA_CMD_GET_NOTIFICATION_CHANNEL_HANDLE";
+		case VIRTIO_MAGMA_RESP_GET_NOTIFICATION_CHANNEL_HANDLE: return "VIRTIO_MAGMA_RESP_GET_NOTIFICATION_CHANNEL_HANDLE";
+		case VIRTIO_MAGMA_CMD_WAIT_NOTIFICATION_CHANNEL: return "VIRTIO_MAGMA_CMD_WAIT_NOTIFICATION_CHANNEL";
+		case VIRTIO_MAGMA_RESP_WAIT_NOTIFICATION_CHANNEL: return "VIRTIO_MAGMA_RESP_WAIT_NOTIFICATION_CHANNEL";
+		case VIRTIO_MAGMA_CMD_READ_NOTIFICATION_CHANNEL: return "VIRTIO_MAGMA_CMD_READ_NOTIFICATION_CHANNEL";
+		case VIRTIO_MAGMA_RESP_READ_NOTIFICATION_CHANNEL: return "VIRTIO_MAGMA_RESP_READ_NOTIFICATION_CHANNEL";
+		case VIRTIO_MAGMA_RESP_ERR_INTERNAL: return "VIRTIO_MAGMA_RESP_ERR_INTERNAL";
+		case VIRTIO_MAGMA_RESP_ERR_HOST_DISCONNECTED: return "VIRTIO_MAGMA_RESP_ERR_HOST_DISCONNECTED";
+		case VIRTIO_MAGMA_RESP_ERR_OUT_OF_MEMORY: return "VIRTIO_MAGMA_RESP_ERR_OUT_OF_MEMORY";
+		case VIRTIO_MAGMA_RESP_ERR_INVALID_COMMAND: return "VIRTIO_MAGMA_RESP_ERR_INVALID_COMMAND";
+		case VIRTIO_MAGMA_RESP_ERR_INVALID_ARGUMENT: return "VIRTIO_MAGMA_RESP_ERR_INVALID_ARGUMENT";
+		default: return "[invalid virtio_magma_ctrl_type]";
+	}
+}
+
+inline enum virtio_magma_ctrl_type virtio_magma_expected_response_type(enum virtio_magma_ctrl_type type) {
+	switch (type) {
+		case VIRTIO_MAGMA_CMD_QUERY: return VIRTIO_MAGMA_RESP_QUERY;
+		case VIRTIO_MAGMA_CMD_CREATE_CONNECTION: return VIRTIO_MAGMA_RESP_CREATE_CONNECTION;
+		case VIRTIO_MAGMA_CMD_RELEASE_CONNECTION: return VIRTIO_MAGMA_RESP_RELEASE_CONNECTION;
+		case VIRTIO_MAGMA_CMD_GET_ERROR: return VIRTIO_MAGMA_RESP_GET_ERROR;
+		case VIRTIO_MAGMA_CMD_CREATE_CONTEXT: return VIRTIO_MAGMA_RESP_CREATE_CONTEXT;
+		case VIRTIO_MAGMA_CMD_RELEASE_CONTEXT: return VIRTIO_MAGMA_RESP_RELEASE_CONTEXT;
+		case VIRTIO_MAGMA_CMD_CREATE_BUFFER: return VIRTIO_MAGMA_RESP_CREATE_BUFFER;
+		case VIRTIO_MAGMA_CMD_RELEASE_BUFFER: return VIRTIO_MAGMA_RESP_RELEASE_BUFFER;
+		case VIRTIO_MAGMA_CMD_DUPLICATE_HANDLE: return VIRTIO_MAGMA_RESP_DUPLICATE_HANDLE;
+		case VIRTIO_MAGMA_CMD_RELEASE_BUFFER_HANDLE: return VIRTIO_MAGMA_RESP_RELEASE_BUFFER_HANDLE;
+		case VIRTIO_MAGMA_CMD_GET_BUFFER_ID: return VIRTIO_MAGMA_RESP_GET_BUFFER_ID;
+		case VIRTIO_MAGMA_CMD_GET_BUFFER_SIZE: return VIRTIO_MAGMA_RESP_GET_BUFFER_SIZE;
+		case VIRTIO_MAGMA_CMD_CLEAN_CACHE: return VIRTIO_MAGMA_RESP_CLEAN_CACHE;
+		case VIRTIO_MAGMA_CMD_SET_CACHE_POLICY: return VIRTIO_MAGMA_RESP_SET_CACHE_POLICY;
+		case VIRTIO_MAGMA_CMD_GET_BUFFER_CACHE_POLICY: return VIRTIO_MAGMA_RESP_GET_BUFFER_CACHE_POLICY;
+		case VIRTIO_MAGMA_CMD_GET_BUFFER_IS_MAPPABLE: return VIRTIO_MAGMA_RESP_GET_BUFFER_IS_MAPPABLE;
+		case VIRTIO_MAGMA_CMD_SET_BUFFER_MAPPING_ADDRESS_RANGE: return VIRTIO_MAGMA_RESP_SET_BUFFER_MAPPING_ADDRESS_RANGE;
+		case VIRTIO_MAGMA_CMD_MAP: return VIRTIO_MAGMA_RESP_MAP;
+		case VIRTIO_MAGMA_CMD_MAP_ALIGNED: return VIRTIO_MAGMA_RESP_MAP_ALIGNED;
+		case VIRTIO_MAGMA_CMD_MAP_SPECIFIC: return VIRTIO_MAGMA_RESP_MAP_SPECIFIC;
+		case VIRTIO_MAGMA_CMD_UNMAP: return VIRTIO_MAGMA_RESP_UNMAP;
+		case VIRTIO_MAGMA_CMD_MAP_BUFFER_GPU: return VIRTIO_MAGMA_RESP_MAP_BUFFER_GPU;
+		case VIRTIO_MAGMA_CMD_UNMAP_BUFFER_GPU: return VIRTIO_MAGMA_RESP_UNMAP_BUFFER_GPU;
+		case VIRTIO_MAGMA_CMD_COMMIT_BUFFER: return VIRTIO_MAGMA_RESP_COMMIT_BUFFER;
+		case VIRTIO_MAGMA_CMD_EXPORT: return VIRTIO_MAGMA_RESP_EXPORT;
+		case VIRTIO_MAGMA_CMD_IMPORT: return VIRTIO_MAGMA_RESP_IMPORT;
+		case VIRTIO_MAGMA_CMD_CREATE_COMMAND_BUFFER: return VIRTIO_MAGMA_RESP_CREATE_COMMAND_BUFFER;
+		case VIRTIO_MAGMA_CMD_RELEASE_COMMAND_BUFFER: return VIRTIO_MAGMA_RESP_RELEASE_COMMAND_BUFFER;
+		case VIRTIO_MAGMA_CMD_SUBMIT_COMMAND_BUFFER: return VIRTIO_MAGMA_RESP_SUBMIT_COMMAND_BUFFER;
+		case VIRTIO_MAGMA_CMD_EXECUTE_IMMEDIATE_COMMANDS: return VIRTIO_MAGMA_RESP_EXECUTE_IMMEDIATE_COMMANDS;
+		case VIRTIO_MAGMA_CMD_EXECUTE_IMMEDIATE_COMMANDS2: return VIRTIO_MAGMA_RESP_EXECUTE_IMMEDIATE_COMMANDS2;
+		case VIRTIO_MAGMA_CMD_CREATE_SEMAPHORE: return VIRTIO_MAGMA_RESP_CREATE_SEMAPHORE;
+		case VIRTIO_MAGMA_CMD_RELEASE_SEMAPHORE: return VIRTIO_MAGMA_RESP_RELEASE_SEMAPHORE;
+		case VIRTIO_MAGMA_CMD_GET_SEMAPHORE_ID: return VIRTIO_MAGMA_RESP_GET_SEMAPHORE_ID;
+		case VIRTIO_MAGMA_CMD_SIGNAL_SEMAPHORE: return VIRTIO_MAGMA_RESP_SIGNAL_SEMAPHORE;
+		case VIRTIO_MAGMA_CMD_RESET_SEMAPHORE: return VIRTIO_MAGMA_RESP_RESET_SEMAPHORE;
+		case VIRTIO_MAGMA_CMD_WAIT_SEMAPHORES: return VIRTIO_MAGMA_RESP_WAIT_SEMAPHORES;
+		case VIRTIO_MAGMA_CMD_EXPORT_SEMAPHORE: return VIRTIO_MAGMA_RESP_EXPORT_SEMAPHORE;
+		case VIRTIO_MAGMA_CMD_IMPORT_SEMAPHORE: return VIRTIO_MAGMA_RESP_IMPORT_SEMAPHORE;
+		case VIRTIO_MAGMA_CMD_GET_NOTIFICATION_CHANNEL_HANDLE: return VIRTIO_MAGMA_RESP_GET_NOTIFICATION_CHANNEL_HANDLE;
+		case VIRTIO_MAGMA_CMD_WAIT_NOTIFICATION_CHANNEL: return VIRTIO_MAGMA_RESP_WAIT_NOTIFICATION_CHANNEL;
+		case VIRTIO_MAGMA_CMD_READ_NOTIFICATION_CHANNEL: return VIRTIO_MAGMA_RESP_READ_NOTIFICATION_CHANNEL;
+		default: return VIRTIO_MAGMA_RESP_ERR_INVALID_COMMAND;
+	}
+}
 
 struct virtio_magma_ctrl_hdr {
-    __le32 type;
-    __le32 flags;
-};
+	__le32 type;
+	__le32 flags;
+} __attribute((packed));
 
-struct virtio_magma_get_driver {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le32 page_size;
-};
-
-struct virtio_magma_get_driver_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 pfn;
-    __le64 size;
-};
-
-struct virtio_magma_query {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 field_id;
-};
+struct virtio_magma_query_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le32 file_descriptor;
+	__le64 id;
+} __attribute((packed));
 
 struct virtio_magma_query_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 field_value_out;
-    __le32 status_return;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 value_out;
+	__le64 result_return;
+} __attribute((packed));
 
-struct virtio_magma_create_connection {
-    struct virtio_magma_ctrl_hdr hdr;
-};
+struct virtio_magma_create_connection_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le32 file_descriptor;
+} __attribute((packed));
 
 struct virtio_magma_create_connection_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection_return;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection_out;
+	__le64 result_return;
+} __attribute((packed));
 
-struct virtio_magma_release_connection {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-};
+struct virtio_magma_release_connection_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+} __attribute((packed));
 
 struct virtio_magma_release_connection_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
 
-struct virtio_magma_get_error {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-};
+struct virtio_magma_get_error_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+} __attribute((packed));
 
 struct virtio_magma_get_error_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le32 status_return;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 result_return;
+} __attribute((packed));
 
-struct virtio_magma_create_context {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-};
+struct virtio_magma_create_context_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+} __attribute((packed));
 
 struct virtio_magma_create_context_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le32 context_id_out;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 context_id_out;
+} __attribute((packed));
 
-struct virtio_magma_release_context {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-    __le32 context_id;
-};
+struct virtio_magma_release_context_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le32 context_id;
+} __attribute((packed));
 
 struct virtio_magma_release_context_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
 
-struct virtio_magma_create_buffer {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-    __le64 size;
-};
+struct virtio_magma_create_buffer_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le64 size;
+} __attribute((packed));
 
 struct virtio_magma_create_buffer_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 size_out;
-    __le64 buffer_out;
-    __le32 status_return;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 size_out;
+	__le64 buffer_out;
+	__le64 result_return;
+} __attribute((packed));
 
-struct virtio_magma_release_buffer {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-    __le64 buffer;
-};
+struct virtio_magma_release_buffer_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le64 buffer;
+} __attribute((packed));
 
 struct virtio_magma_release_buffer_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
 
-struct virtio_magma_get_buffer_id {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 buffer;
-};
+struct virtio_magma_duplicate_handle_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le32 buffer_handle;
+} __attribute((packed));
+
+struct virtio_magma_duplicate_handle_resp {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 buffer_handle_out;
+	__le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_release_buffer_handle_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le32 buffer_handle;
+} __attribute((packed));
+
+struct virtio_magma_release_buffer_handle_resp {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_get_buffer_id_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 buffer;
+} __attribute((packed));
 
 struct virtio_magma_get_buffer_id_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 id_return;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 result_return;
+} __attribute((packed));
 
-struct virtio_magma_get_buffer_size {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 buffer;
-};
+struct virtio_magma_get_buffer_size_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 buffer;
+} __attribute((packed));
 
 struct virtio_magma_get_buffer_size_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 size_return;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 result_return;
+} __attribute((packed));
 
-struct virtio_magma_clean_cache {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 buffer;
-    __le64 offset;
-    __le64 size;
-    __le32 operation;
-};
+struct virtio_magma_clean_cache_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 buffer;
+	__le64 offset;
+	__le64 size;
+	__le64 operation;
+} __attribute((packed));
 
 struct virtio_magma_clean_cache_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le32 status_return;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 result_return;
+} __attribute((packed));
 
-struct virtio_magma_set_cache_policy {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 buffer;
-    __le32 policy;
-};
+struct virtio_magma_set_cache_policy_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 buffer;
+	__le64 policy;
+} __attribute((packed));
 
 struct virtio_magma_set_cache_policy_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le32 status_return;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 result_return;
+} __attribute((packed));
 
-struct virtio_magma_map {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-    __le64 buffer;
-};
+struct virtio_magma_get_buffer_cache_policy_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 buffer;
+} __attribute((packed));
+
+struct virtio_magma_get_buffer_cache_policy_resp {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 cache_policy_out;
+	__le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_get_buffer_is_mappable_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 buffer;
+	__le32 flags;
+} __attribute((packed));
+
+struct virtio_magma_get_buffer_is_mappable_resp {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 is_mappable_out;
+	__le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_set_buffer_mapping_address_range_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 buffer;
+	__le32 handle;
+} __attribute((packed));
+
+struct virtio_magma_set_buffer_mapping_address_range_resp {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_map_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le64 buffer;
+} __attribute((packed));
 
 struct virtio_magma_map_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 addr_out;
-    __le32 status_return;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 addr_out;
+	__le64 result_return;
+} __attribute((packed));
 
-struct virtio_magma_map_aligned {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-    __le64 buffer;
-    __le64 alignment;
-};
+struct virtio_magma_map_aligned_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le64 buffer;
+	__le64 alignment;
+} __attribute((packed));
 
 struct virtio_magma_map_aligned_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 addr_out;
-    __le32 status_return;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 addr_out;
+	__le64 result_return;
+} __attribute((packed));
 
-struct virtio_magma_map_specific {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-    __le64 buffer;
-    __le64 addr;
-};
+struct virtio_magma_map_specific_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le64 buffer;
+	__le64 addr;
+	__le64 offset;
+	__le64 length;
+} __attribute((packed));
 
 struct virtio_magma_map_specific_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le32 status_return;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 result_return;
+} __attribute((packed));
 
-struct virtio_magma_unmap {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-    __le64 buffer;
-};
+struct virtio_magma_unmap_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le64 buffer;
+} __attribute((packed));
 
 struct virtio_magma_unmap_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le32 status_return;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 result_return;
+} __attribute((packed));
 
-struct virtio_magma_map_buffer_gpu {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-    __le64 buffer;
-    __le64 page_offset;
-    __le64 page_count;
-    __le64 gpu_va;
-    __le64 map_flags;
-};
+struct virtio_magma_map_buffer_gpu_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le64 buffer;
+	__le64 page_offset;
+	__le64 page_count;
+	__le64 gpu_va;
+	__le64 map_flags;
+} __attribute((packed));
 
 struct virtio_magma_map_buffer_gpu_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
 
-struct virtio_magma_unmap_buffer_gpu {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-    __le64 buffer;
-    __le64 gpu_va;
-};
+struct virtio_magma_unmap_buffer_gpu_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le64 buffer;
+	__le64 gpu_va;
+} __attribute((packed));
 
 struct virtio_magma_unmap_buffer_gpu_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
 
-struct virtio_magma_commit_buffer {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-    __le64 buffer;
-    __le64 page_offset;
-    __le64 page_count;
-};
+struct virtio_magma_commit_buffer_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le64 buffer;
+	__le64 page_offset;
+	__le64 page_count;
+} __attribute((packed));
 
 struct virtio_magma_commit_buffer_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
 
-struct virtio_magma_export {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-    __le64 buffer;
-};
+struct virtio_magma_export_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le64 buffer;
+} __attribute((packed));
 
 struct virtio_magma_export_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le32 buffer_handle_out;
-    __le32 status_return;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 buffer_handle_out;
+	__le64 result_return;
+} __attribute((packed));
 
-struct virtio_magma_import {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-};
+struct virtio_magma_import_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le32 buffer_handle;
+} __attribute((packed));
 
 struct virtio_magma_import_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 buffer_out;
-    __le32 buffer_handle;
-    __le32 status_return;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 buffer_out;
+	__le64 result_return;
+} __attribute((packed));
 
-struct virtio_magma_create_command_buffer {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-    __le64 size;
-};
+struct virtio_magma_create_command_buffer_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le64 size;
+} __attribute((packed));
 
 struct virtio_magma_create_command_buffer_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 buffer_out;
-    __le32 status_return;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 buffer_out;
+	__le64 result_return;
+} __attribute((packed));
 
-struct virtio_magma_release_command_buffer {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-    __le64 command_buffer;
-};
+struct virtio_magma_release_command_buffer_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le64 command_buffer;
+} __attribute((packed));
 
 struct virtio_magma_release_command_buffer_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
 
-struct virtio_magma_submit_command_buffer {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-    __le64 command_buffer;
-    __le32 context_id;
-};
+struct virtio_magma_submit_command_buffer_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le64 command_buffer;
+	__le32 context_id;
+} __attribute((packed));
 
 struct virtio_magma_submit_command_buffer_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
 
-struct virtio_magma_execute_immediate_commands {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-    __le64 command_count;
-    __le64 commands; // magma_system_inline_command_buffer[command_count]
-    __le32 context_id;
-};
+struct virtio_magma_execute_immediate_commands_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le32 context_id;
+	__le64 command_count;
+	__le64 command_buffers;
+} __attribute((packed));
 
 struct virtio_magma_execute_immediate_commands_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
 
-struct virtio_magma_create_semaphore {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-};
+struct virtio_magma_execute_immediate_commands2_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le32 context_id;
+	__le64 command_count;
+	__le64 command_buffers;
+} __attribute((packed));
+
+struct virtio_magma_execute_immediate_commands2_resp {
+	struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
+
+struct virtio_magma_create_semaphore_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+} __attribute((packed));
 
 struct virtio_magma_create_semaphore_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 semaphore_out;
-    __le32 status_return;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 semaphore_out;
+	__le64 result_return;
+} __attribute((packed));
 
-struct virtio_magma_release_semaphore {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-    __le64 semaphore;
-};
+struct virtio_magma_release_semaphore_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le64 semaphore;
+} __attribute((packed));
 
 struct virtio_magma_release_semaphore_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
 
-struct virtio_magma_get_semaphore_id {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 semaphore;
-};
+struct virtio_magma_get_semaphore_id_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 semaphore;
+} __attribute((packed));
 
 struct virtio_magma_get_semaphore_id_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 id_return;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 result_return;
+} __attribute((packed));
 
-struct virtio_magma_signal_semaphore {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 semaphore;
-};
+struct virtio_magma_signal_semaphore_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 semaphore;
+} __attribute((packed));
 
 struct virtio_magma_signal_semaphore_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
 
-struct virtio_magma_reset_semaphore {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 semaphore;
-};
+struct virtio_magma_reset_semaphore_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 semaphore;
+} __attribute((packed));
 
 struct virtio_magma_reset_semaphore_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
 
-struct virtio_magma_wait_semaphores {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 timeout_ms;
-    __le64 semaphores; // magma_semaphore_t[count]
-    __le32 count;
-    __le32 status_return;
-    char wait_all;
-};
+struct virtio_magma_wait_semaphores_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 semaphores;
+	__le32 count;
+	__le64 timeout_ms;
+	u8 wait_all;
+} __attribute((packed));
 
 struct virtio_magma_wait_semaphores_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 result_return;
+} __attribute((packed));
 
-struct virtio_magma_export_semaphore {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 semaphore;
-    __le64 connection;
-};
+struct virtio_magma_export_semaphore_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le64 semaphore;
+} __attribute((packed));
 
 struct virtio_magma_export_semaphore_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le32 semaphore_handle_out;
-    __le32 status_return;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 semaphore_handle_out;
+	__le64 result_return;
+} __attribute((packed));
 
-struct virtio_magma_import_semaphore {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-    __le32 semaphore_handle;
-};
+struct virtio_magma_import_semaphore_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le32 semaphore_handle;
+} __attribute((packed));
 
 struct virtio_magma_import_semaphore_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 semaphore_out;
-    __le32 status_return;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 semaphore_out;
+	__le64 result_return;
+} __attribute((packed));
 
-struct virtio_magma_read_notification_channel {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 connection;
-    __le64 buffer;
-    __le64 buffer_size;
-};
+struct virtio_magma_get_notification_channel_handle_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+} __attribute((packed));
+
+struct virtio_magma_get_notification_channel_handle_resp {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le32 result_return;
+} __attribute((packed));
+
+struct virtio_magma_wait_notification_channel_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le64 timeout_ns;
+} __attribute((packed));
+
+struct virtio_magma_wait_notification_channel_resp {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_read_notification_channel_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le64 buffer;
+	__le64 buffer_size;
+} __attribute((packed));
 
 struct virtio_magma_read_notification_channel_resp {
-    struct virtio_magma_ctrl_hdr hdr;
-    __le64 buffer_size_out;
-    __le32 status_return;
-};
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 buffer_size_out;
+	__le64 result_return;
+} __attribute((packed));
 
-
-#endif // _LINUX_VIRTIO_MAGMA_H
+#endif /* _LINUX_VIRTIO_MAGMA_H */
diff --git a/include/uapi/linux/virtmagma.h b/include/uapi/linux/virtmagma.h
index 68737be7..5da7a3a 100644
--- a/include/uapi/linux/virtmagma.h
+++ b/include/uapi/linux/virtmagma.h
@@ -28,260 +28,19 @@
 	__u32 version_out;
 };
 
-struct virtmagma_ioctl_args_get_driver {
-	__s32 unused;
+struct virtmagma_ioctl_args_get_mmfd {
+	__s32 fd_out;
 };
 
-struct virtmagma_ioctl_args_query {
-	__u64 id;
-	__u64 value_out;
-	__u32 status_return;
-};
-
-struct virtmagma_ioctl_args_create_connection {
-	__s32 connection_return;
-};
-
-struct virtmagma_ioctl_args_release_connection {
-	__s32 connection;
-};
-
-struct virtmagma_ioctl_args_get_error {
-	__s32 connection;
-	__u32 status_return;
-};
-
-struct virtmagma_ioctl_args_create_context {
-	__s32 connection;
-	__u32 context_id_out;
-};
-
-struct virtmagma_ioctl_args_release_context {
-	__s32 connection;
-	__u32 context_id;
-};
-
-struct virtmagma_ioctl_args_create_buffer {
-	__u64 size;
-	__u64 size_out;
-	__u64 buffer_out;
-	__s32 connection;
-	__u32 status_return;
-};
-
-struct virtmagma_ioctl_args_release_buffer {
-	__u64 buffer;
-	__s32 connection;
-};
-
-struct virtmagma_ioctl_args_get_buffer_id {
-	__u64 buffer;
-	__u64 id_return;
-};
-
-struct virtmagma_ioctl_args_get_buffer_size {
-	__u64 buffer;
-	__u64 size_return;
-};
-
-struct virtmagma_ioctl_args_clean_cache {
-	__u64 buffer;
-	__u64 offset;
-	__u64 size;
-	__u32 operation;
-	__u32 status_return;
-};
-
-struct virtmagma_ioctl_args_set_cache_policy {
-	__u64 buffer;
-	__u32 policy;
-	__u32 status_return;
-};
-
-struct virtmagma_ioctl_args_map {
-	__u64 buffer;
-	__u64 addr_out;
-	__s32 connection;
-	__u32 status_return;
-};
-
-struct virtmagma_ioctl_args_map_aligned {
-	__u64 buffer;
-	__u64 alignment;
-	__u64 addr_out;
-	__s32 connection;
-	__u32 status_return;
-};
-
-struct virtmagma_ioctl_args_map_specific {
-	__u64 buffer;
-	__u64 addr;
-	__s32 connection;
-	__u32 status_return;
-};
-
-struct virtmagma_ioctl_args_unmap {
-	__u64 buffer;
-	__s32 connection;
-	__u32 status_return;
-};
-
-struct virtmagma_ioctl_args_map_buffer_gpu {
-	__u64 buffer;
-	__u64 page_offset;
-	__u64 page_count;
-	__u64 gpu_va;
-	__u64 map_flags;
-	__s32 connection;
-};
-
-struct virtmagma_ioctl_args_unmap_buffer_gpu {
-	__u64 buffer;
-	__u64 gpu_va;
-	__s32 connection;
-};
-
-struct virtmagma_ioctl_args_commit_buffer {
-	__u64 buffer;
-	__u64 page_offset;
-	__u64 page_count;
-	__s32 connection;
-};
-
-struct virtmagma_ioctl_args_export {
-	__u64 buffer;
-	__u32 buffer_handle_out;
-	__u32 status_return;
-	__s32 connection;
-};
-
-struct virtmagma_ioctl_args_import {
-	__u64 buffer_out;
-	__u32 buffer_handle;
-	__u32 status_return;
-	__s32 connection;
-};
-
-struct virtmagma_ioctl_args_create_command_buffer {
-	__u64 size;
-	__u64 buffer_out;
-	__u32 status_return;
-	__s32 connection;
-};
-
-struct virtmagma_ioctl_args_release_command_buffer {
-	__u64 command_buffer;
-	__s32 connection;
-};
-
-struct virtmagma_ioctl_args_submit_command_buffer {
-	__u64 command_buffer;
-	__u32 context_id;
-	__s32 connection;
-};
-
-struct virtmagma_ioctl_args_execute_immediate_commands {
-	__u64 command_count;
-	__u64 commands_addr; /* magma_system_inline_command_buffer[command_count] */
-	__u32 context_id;
-	__s32 connection;
-};
-
-struct virtmagma_ioctl_args_create_semaphore {
-	__u64 semaphore_out;
-	__s32 connection;
-	__u32 status_return;
-};
-
-struct virtmagma_ioctl_args_release_semaphore {
-	__u64 semaphore;
-	__s32 connection;
-};
-
-struct virtmagma_ioctl_args_get_semaphore_id {
-	__u64 semaphore;
-	__u64 id_return;
-};
-
-struct virtmagma_ioctl_args_signal_semaphore {
-	__u64 semaphore;
-};
-
-struct virtmagma_ioctl_args_reset_semaphore {
-	__u64 semaphore;
-};
-
-struct virtmagma_ioctl_args_wait_semaphores {
-	__u64 timeout_ms;
-	__u64 semaphores_addr; /* magma_semaphore_t[count] */
-	__u32 count;
-	__u32 status_return;
-	__u8 wait_all;
-};
-
-struct virtmagma_ioctl_args_export_semaphore {
-	__u64 semaphore;
-	__s32 connection;
-	__u32 semaphore_handle_out;
-	__u32 status_return;
-};
-
-struct virtmagma_ioctl_args_import_semaphore {
-	__u64 semaphore_out;
-	__s32 connection;
-	__u32 semaphore_handle;
-	__u32 status_return;
-};
-
-struct virtmagma_ioctl_args_get_notification_channel_fd {
-	__s32 connection;
-	__s32 fd_return;
-};
-
-struct virtmagma_ioctl_args_read_notification_channel {
-	__u64 buffer;
-	__u64 buffer_size;
-	__u64 buffer_size_out;
-	__s32 connection;
-	__u32 status_return;
+struct virtmagma_ioctl_args_magma_command {
+	__u64 request_address;
+	__u64 request_size;
+	__u64 response_address;
+	__u64 response_size;
 };
 
 #define VIRTMAGMA_IOCTL_HANDSHAKE VIRTMAGMA_IOWR(0x00, struct virtmagma_ioctl_args_handshake)
-#define VIRTMAGMA_IOCTL_GET_DRIVER VIRTMAGMA_IOWR(0x01, struct virtmagma_ioctl_args_get_driver)
-#define VIRTMAGMA_IOCTL_QUERY VIRTMAGMA_IOWR(0x02, struct virtmagma_ioctl_args_query)
-#define VIRTMAGMA_IOCTL_CREATE_CONNECTION VIRTMAGMA_IOWR(0x03, struct virtmagma_ioctl_args_create_connection)
-#define VIRTMAGMA_IOCTL_RELEASE_CONNECTION VIRTMAGMA_IOWR(0x04, struct virtmagma_ioctl_args_release_connection)
-#define VIRTMAGMA_IOCTL_GET_ERROR VIRTMAGMA_IOWR(0x05, struct virtmagma_ioctl_args_get_error)
-#define VIRTMAGMA_IOCTL_CREATE_CONTEXT VIRTMAGMA_IOWR(0x06, struct virtmagma_ioctl_args_create_context)
-#define VIRTMAGMA_IOCTL_RELEASE_CONTEXT VIRTMAGMA_IOWR(0x07, struct virtmagma_ioctl_args_release_context)
-#define VIRTMAGMA_IOCTL_CREATE_BUFFER VIRTMAGMA_IOWR(0x08, struct virtmagma_ioctl_args_create_buffer)
-#define VIRTMAGMA_IOCTL_RELEASE_BUFFER VIRTMAGMA_IOWR(0x09, struct virtmagma_ioctl_args_release_buffer)
-#define VIRTMAGMA_IOCTL_GET_BUFFER_ID VIRTMAGMA_IOWR(0x0A, struct virtmagma_ioctl_args_get_buffer_id)
-#define VIRTMAGMA_IOCTL_GET_BUFFER_SIZE VIRTMAGMA_IOWR(0x0B, struct virtmagma_ioctl_args_get_buffer_size)
-#define VIRTMAGMA_IOCTL_CLEAN_CACHE VIRTMAGMA_IOWR(0x0C, struct virtmagma_ioctl_args_clean_cache)
-#define VIRTMAGMA_IOCTL_SET_CACHE_POLICY VIRTMAGMA_IOWR(0x0D, struct virtmagma_ioctl_args_set_cache_policy)
-#define VIRTMAGMA_IOCTL_MAP VIRTMAGMA_IOWR(0x0E, struct virtmagma_ioctl_args_map)
-#define VIRTMAGMA_IOCTL_MAP_ALIGNED VIRTMAGMA_IOWR(0x0F, struct virtmagma_ioctl_args_map_aligned)
-#define VIRTMAGMA_IOCTL_MAP_SPECIFIC VIRTMAGMA_IOWR(0x10, struct virtmagma_ioctl_args_map_specific)
-#define VIRTMAGMA_IOCTL_UNMAP VIRTMAGMA_IOWR(0x11, struct virtmagma_ioctl_args_unmap)
-#define VIRTMAGMA_IOCTL_MAP_BUFFER_GPU VIRTMAGMA_IOWR(0x12, struct virtmagma_ioctl_args_map_buffer_gpu)
-#define VIRTMAGMA_IOCTL_UNMAP_BUFFER_GPU VIRTMAGMA_IOWR(0x13, struct virtmagma_ioctl_args_unmap_buffer_gpu)
-#define VIRTMAGMA_IOCTL_COMMIT_BUFFER VIRTMAGMA_IOWR(0x14, struct virtmagma_ioctl_args_commit_buffer)
-#define VIRTMAGMA_IOCTL_EXPORT VIRTMAGMA_IOWR(0x15, struct virtmagma_ioctl_args_export)
-#define VIRTMAGMA_IOCTL_IMPORT VIRTMAGMA_IOWR(0x16, struct virtmagma_ioctl_args_import)
-#define VIRTMAGMA_IOCTL_CREATE_COMMAND_BUFFER VIRTMAGMA_IOWR(0x17, struct virtmagma_ioctl_args_create_command_buffer)
-#define VIRTMAGMA_IOCTL_RELEASE_COMMAND_BUFFER VIRTMAGMA_IOWR(0x18, struct virtmagma_ioctl_args_release_command_buffer)
-#define VIRTMAGMA_IOCTL_SUBMIT_COMMAND_BUFFER VIRTMAGMA_IOWR(0x19, struct virtmagma_ioctl_args_submit_command_buffer)
-#define VIRTMAGMA_IOCTL_EXECUTE_IMMEDIATE_COMMANDS VIRTMAGMA_IOWR(0x1A, struct virtmagma_ioctl_args_execute_immediate_commands)
-#define VIRTMAGMA_IOCTL_CREATE_SEMAPHORE VIRTMAGMA_IOWR(0x1B, struct virtmagma_ioctl_args_create_semaphore)
-#define VIRTMAGMA_IOCTL_RELEASE_SEMAPHORE VIRTMAGMA_IOWR(0x1C, struct virtmagma_ioctl_args_release_semaphore)
-#define VIRTMAGMA_IOCTL_GET_SEMAPHORE_ID VIRTMAGMA_IOWR(0x1D, struct virtmagma_ioctl_args_get_semaphore_id)
-#define VIRTMAGMA_IOCTL_SIGNAL_SEMAPHORE VIRTMAGMA_IOWR(0x1E, struct virtmagma_ioctl_args_signal_semaphore)
-#define VIRTMAGMA_IOCTL_RESET_SEMAPHORE VIRTMAGMA_IOWR(0x1F, struct virtmagma_ioctl_args_reset_semaphore)
-#define VIRTMAGMA_IOCTL_WAIT_SEMAPHORES VIRTMAGMA_IOWR(0x20, struct virtmagma_ioctl_args_wait_semaphores)
-#define VIRTMAGMA_IOCTL_EXPORT_SEMAPHORE VIRTMAGMA_IOWR(0x21, struct virtmagma_ioctl_args_export_semaphore)
-#define VIRTMAGMA_IOCTL_IMPORT_SEMAPHORE VIRTMAGMA_IOWR(0x22, struct virtmagma_ioctl_args_import_semaphore)
-#define VIRTMAGMA_IOCTL_GET_NOTIFICATION_CHANNEL_FD VIRTMAGMA_IOWR(0x23, struct virtmagma_ioctl_args_get_notification_channel_fd)
-#define VIRTMAGMA_IOCTL_READ_NOTIFICATION_CHANNEL VIRTMAGMA_IOWR(0x24, struct virtmagma_ioctl_args_read_notification_channel)
+#define VIRTMAGMA_IOCTL_GET_MMFD VIRTMAGMA_IOWR(0x01, struct virtmagma_ioctl_args_get_mmfd)
+#define VIRTMAGMA_IOCTL_MAGMA_COMMAND VIRTMAGMA_IOWR(0x02, struct virtmagma_ioctl_args_magma_command)
 
 #endif /* _LINUX_VIRTMAGMA_H */