[virtio-magma] add virtio-magma device

This change introduces a new virtio-magma sub-device parented to virtio-
wl. This will allow it to share virtual file descriptors with wayland
and enable zero-copy from the guest to a scenic output.

The intial implementation is simply a skeleton that supports magma_query
and magma_create_connection. It also pulls down the vulkan icd via a
temporary ioctl.

Test: ran linux_magma_tests over linux guest serial
Change-Id: I2427f7f21614b6a5450f56b00165dbd8e3b6f326
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index d867b97..a74ec94 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -6,4 +6,4 @@
 virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o
 obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
 obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o
-obj-$(CONFIG_VIRTIO_WL) += virtio_wl.o
+obj-$(CONFIG_VIRTIO_WL) += virtio_wl.o virtio_magma.o
diff --git a/drivers/virtio/virtio_magma.c b/drivers/virtio/virtio_magma.c
new file mode 100644
index 0000000..98ba709
--- /dev/null
+++ b/drivers/virtio/virtio_magma.c
@@ -0,0 +1,511 @@
+// Copyright 2019 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <linux/uaccess.h>
+#include <linux/virtio_magma.h>
+#include <linux/vmalloc.h>
+
+#include "virtio_magma.h"
+
+#define VQ_DESCRIPTOR_SIZE PAGE_SIZE
+#define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
+
+struct virtmagma_info_private
+{
+	struct mutex vq_locks[VIRTMAGMA_QUEUE_COUNT];
+	struct virtqueue *vqs[VIRTMAGMA_QUEUE_COUNT];
+	struct work_struct in_vq_work;
+	struct work_struct out_vq_work;
+	wait_queue_head_t out_waitq;
+};
+
+static int vq_return_inbuf_locked(struct virtqueue *vq, void *buffer)
+{
+	int ret;
+	struct scatterlist sg[1];
+
+	sg_init_one(sg, buffer, VQ_DESCRIPTOR_SIZE);
+
+	ret = virtqueue_add_inbuf(vq, sg, 1, buffer, GFP_KERNEL);
+	if (ret) {
+		pr_warn("virtmagma: failed to give inbuf to host: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int vq_queue_out(struct virtmagma_info_private *vip,
+			struct scatterlist *out_sg,
+			struct scatterlist *in_sg,
+			struct completion *finish_completion,
+			bool nonblock)
+{
+	struct virtqueue *vq = vip->vqs[VIRTMAGMA_VQ_OUT];
+	struct mutex *vq_lock = &vip->vq_locks[VIRTMAGMA_VQ_OUT];
+	struct scatterlist *sgs[] = { out_sg, in_sg };
+	int ret = 0;
+
+	mutex_lock(vq_lock);
+	while ((ret = virtqueue_add_sgs(vq, sgs, 1, 1, finish_completion,
+					GFP_KERNEL)) == -ENOSPC) {
+		mutex_unlock(vq_lock);
+		if (nonblock)
+			return -EAGAIN;
+		if (!wait_event_timeout(vip->out_waitq, vq->num_free > 0, HZ))
+			return -EBUSY;
+		mutex_lock(vq_lock);
+	}
+	if (!ret)
+		virtqueue_kick(vq);
+	mutex_unlock(vq_lock);
+
+	if (!nonblock)
+		wait_for_completion(finish_completion);
+
+	return ret;
+}
+
+static int vq_fill_locked(struct virtqueue *vq)
+{
+	void *buffer;
+	int ret = 0;
+
+	while (vq->num_free > 0) {
+		buffer = kmalloc(VQ_DESCRIPTOR_SIZE, GFP_KERNEL);
+		if (!buffer) {
+			ret = -ENOMEM;
+			goto clear_queue;
+		}
+
+		ret = vq_return_inbuf_locked(vq, buffer);
+		if (ret)
+			goto clear_queue;
+	}
+
+	return 0;
+
+clear_queue:
+	while ((buffer = virtqueue_detach_unused_buf(vq)))
+		kfree(buffer);
+	return ret;
+}
+
+static int virtmagma_ioctl_handshake(void __user *ptr)
+{
+	struct virtmagma_ioctl_args_handshake ioctl_args;
+	int ret;
+	ret = copy_from_user(&ioctl_args, ptr, sizeof(ioctl_args));
+	if (ret)
+		return ret;
+	if (ioctl_args.handshake_inout != VIRTMAGMA_HANDSHAKE_SEND)
+		return -EINVAL;
+	ioctl_args.handshake_inout = VIRTMAGMA_HANDSHAKE_RECV;
+	ioctl_args.version_out = VIRTMAGMA_VERSION;
+	return copy_to_user(ptr, &ioctl_args, sizeof(ioctl_args));
+}
+
+static void* map_driver(unsigned long long pfn_base, size_t size) {
+	size_t num_pages;
+	struct page **pages;
+	size_t page_index;
+	void *ret;
+
+	num_pages = PAGE_COUNT(size);
+
+	pages = kzalloc(sizeof(pages[0]) * num_pages, GFP_KERNEL);
+	if (!pages)
+		return NULL;
+
+	for (page_index = 0; page_index < num_pages; ++page_index)
+		pages[page_index] = pfn_to_page(pfn_base + page_index);
+
+	/* TODO(MA-520): there should be a faster way to do this if pages are all contiguous in physmem */
+	ret = vm_map_ram(pages, num_pages, 0, PAGE_KERNEL);
+	kfree(pages);
+	return ret;
+}
+
+static void unmap_driver(void *addr, size_t size) {
+	vm_unmap_ram(addr, PAGE_COUNT(size));
+}
+
+static int write_driver_file(const char* path, const void* data, size_t size) {
+	struct file *driver_file;
+	ssize_t bytes_written;
+	loff_t offset;
+	int ret;
+
+	driver_file = filp_open(path, O_CLOEXEC | O_CREAT | O_RDWR, 0555);
+	if (!driver_file)
+		return -EFAULT;
+
+	offset = 0;
+	bytes_written = kernel_write(driver_file, data, size, &offset);
+	if (bytes_written != size) {
+		ret = -EFAULT;
+		goto close_file;
+	}
+
+	ret = 0;
+
+close_file:
+	filp_close(driver_file, NULL);
+
+	return ret;
+}
+
+#define DRIVER_PATH "/libvulkan_magma.so"
+
+static int virtmagma_ioctl_get_driver(struct virtmagma_info* vi, void __user *ptr)
+{
+	struct virtmagma_ioctl_args_get_driver ioctl_args;
+	struct virtio_magma_get_driver *virtio_ctrl;
+	struct virtio_magma_get_driver_resp *virtio_resp;
+	struct completion finish_completion;
+	struct scatterlist out_sg;
+	struct scatterlist in_sg;
+	unsigned long long driver_pfn_base;
+	size_t driver_size;
+	void *driver_data;
+	int ret;
+
+	ret = copy_from_user(&ioctl_args, ptr, sizeof(ioctl_args));
+	if (ret)
+		return -EFAULT;
+
+	virtio_ctrl = kzalloc(sizeof(*virtio_ctrl), GFP_KERNEL);
+	if (!virtio_ctrl)
+		return -ENOMEM;
+	virtio_resp = kzalloc(sizeof(*virtio_resp), GFP_KERNEL);
+	if (!virtio_resp) {
+		ret = -ENOMEM;
+		goto free_virtio_ctrl;
+	}
+
+	virtio_ctrl->hdr.type = VIRTIO_MAGMA_CMD_GET_DRIVER;
+	virtio_ctrl->page_size = cpu_to_le32(PAGE_SIZE);
+
+	init_completion(&finish_completion);
+
+	sg_init_one(&out_sg, virtio_ctrl, sizeof(*virtio_ctrl));
+	sg_init_one(&in_sg, virtio_resp, sizeof(*virtio_resp));
+
+	ret = vq_queue_out(vi->private, &out_sg, &in_sg, &finish_completion, false /* block */);
+	if (ret)
+		goto free_virtio_resp;
+
+	if (virtio_resp->hdr.type != VIRTIO_MAGMA_RESP_GET_DRIVER) {
+		ret = -EFAULT;
+		goto free_virtio_resp;
+	}
+
+	driver_pfn_base = le64_to_cpu(virtio_resp->pfn);
+	driver_size = le64_to_cpu(virtio_resp->size);
+	driver_data = map_driver(driver_pfn_base, driver_size);
+	if (!driver_data) {
+		ret = -EFAULT;
+		goto free_virtio_resp;
+	}
+
+	ret = write_driver_file(DRIVER_PATH, driver_data, driver_size);
+	if (ret)
+		goto free_driver_alloc;
+
+	ioctl_args.unused = 0;
+	ret = copy_to_user(ptr, &ioctl_args, sizeof(ioctl_args));
+
+free_driver_alloc:
+	unmap_driver(driver_data, driver_size);
+
+free_virtio_resp:
+	kfree(virtio_resp);
+
+free_virtio_ctrl:
+	kfree(virtio_ctrl);
+
+	return ret;
+}
+
+static int virtmagma_ioctl_query(struct virtmagma_info* vi, void __user *ptr)
+{
+	struct virtmagma_ioctl_args_query ioctl_args;
+	struct virtio_magma_query *virtio_ctrl;
+	struct virtio_magma_query_resp *virtio_resp;
+	struct completion finish_completion;
+	struct scatterlist out_sg;
+	struct scatterlist in_sg;
+	int ret;
+
+	ret = copy_from_user(&ioctl_args, ptr, sizeof(ioctl_args));
+	if (ret)
+		return -EFAULT;
+
+	virtio_ctrl = kzalloc(sizeof(*virtio_ctrl), GFP_KERNEL);
+	if (!virtio_ctrl)
+		return -ENOMEM;
+	virtio_resp = kzalloc(sizeof(*virtio_resp), GFP_KERNEL);
+	if (!virtio_resp) {
+		ret = -ENOMEM;
+		goto free_virtio_ctrl;
+	}
+
+	virtio_ctrl->hdr.type = VIRTIO_MAGMA_CMD_QUERY;
+	virtio_ctrl->field_id = cpu_to_le64(ioctl_args.id);
+
+	init_completion(&finish_completion);
+
+	sg_init_one(&out_sg, virtio_ctrl, sizeof(*virtio_ctrl));
+	sg_init_one(&in_sg, virtio_resp, sizeof(*virtio_resp));
+
+	ret = vq_queue_out(vi->private, &out_sg, &in_sg, &finish_completion, false /* block */);
+	if (ret)
+		goto free_virtio_resp;
+
+	if (virtio_resp->hdr.type != VIRTIO_MAGMA_RESP_QUERY) {
+		ret = -EFAULT;
+		goto free_virtio_resp;
+	}
+	ioctl_args.value_out = le64_to_cpu(virtio_resp->field_value_out);
+	ioctl_args.status_return = le32_to_cpu(virtio_resp->status_return);
+	ret = copy_to_user(ptr, &ioctl_args, sizeof(ioctl_args));
+
+free_virtio_resp:
+	kfree(virtio_resp);
+
+free_virtio_ctrl:
+	kfree(virtio_ctrl);
+
+	return ret;
+}
+
+static int virtmagma_ioctl_create_connection(struct virtmagma_info* vi, void __user *ptr)
+{
+	struct virtmagma_ioctl_args_create_connection ioctl_args;
+	struct virtio_magma_create_connection *virtio_ctrl;
+	struct virtio_magma_create_connection_resp *virtio_resp;
+	struct completion finish_completion;
+	struct scatterlist out_sg;
+	struct scatterlist in_sg;
+	int ret;
+
+	ret = copy_from_user(&ioctl_args, ptr, sizeof(ioctl_args));
+	if (ret)
+		return -EFAULT;
+
+	virtio_ctrl = kzalloc(sizeof(*virtio_ctrl), GFP_KERNEL);
+	if (!virtio_ctrl)
+		return -ENOMEM;
+	virtio_resp = kzalloc(sizeof(*virtio_resp), GFP_KERNEL);
+	if (!virtio_resp) {
+		ret = -ENOMEM;
+		goto free_virtio_ctrl;
+	}
+
+	virtio_ctrl->hdr.type = VIRTIO_MAGMA_CMD_CREATE_CONNECTION;
+
+	init_completion(&finish_completion);
+
+	sg_init_one(&out_sg, virtio_ctrl, sizeof(*virtio_ctrl));
+	sg_init_one(&in_sg, virtio_resp, sizeof(*virtio_resp));
+
+	ret = vq_queue_out(vi->private, &out_sg, &in_sg, &finish_completion, false /* block */);
+	if (ret)
+		goto free_virtio_resp;
+
+	if (virtio_resp->hdr.type != VIRTIO_MAGMA_RESP_CREATE_CONNECTION) {
+		ret = -EFAULT;
+		goto free_virtio_resp;
+	}
+
+	ioctl_args.connection_return = virtio_resp->connection_return;
+	ret = copy_to_user(ptr, &ioctl_args, sizeof(ioctl_args));
+
+free_virtio_resp:
+	kfree(virtio_resp);
+
+free_virtio_ctrl:
+	kfree(virtio_ctrl);
+
+	return ret;
+}
+
+static int virtmagma_ioctl_release_connection(struct virtmagma_info* vi, void __user *ptr)
+{
+	struct virtmagma_ioctl_args_release_connection ioctl_args;
+	struct virtio_magma_release_connection *virtio_ctrl;
+	struct virtio_magma_release_connection_resp *virtio_resp;
+	struct completion finish_completion;
+	struct scatterlist out_sg;
+	struct scatterlist in_sg;
+	int ret;
+
+	ret = copy_from_user(&ioctl_args, ptr, sizeof(ioctl_args));
+	if (ret)
+		return -EFAULT;
+
+	virtio_ctrl = kzalloc(sizeof(*virtio_ctrl), GFP_KERNEL);
+	if (!virtio_ctrl)
+		return -ENOMEM;
+	virtio_resp = kzalloc(sizeof(*virtio_resp), GFP_KERNEL);
+	if (!virtio_resp) {
+		ret = -ENOMEM;
+		goto free_virtio_ctrl;
+	}
+
+	virtio_ctrl->hdr.type = VIRTIO_MAGMA_CMD_RELEASE_CONNECTION;
+	virtio_ctrl->connection = ioctl_args.connection;
+
+	init_completion(&finish_completion);
+
+	sg_init_one(&out_sg, virtio_ctrl, sizeof(*virtio_ctrl));
+	sg_init_one(&in_sg, virtio_resp, sizeof(*virtio_resp));
+
+	ret = vq_queue_out(vi->private, &out_sg, &in_sg, &finish_completion, false /* block */);
+	if (ret)
+		goto free_virtio_resp;
+
+	if (virtio_resp->hdr.type != VIRTIO_MAGMA_RESP_RELEASE_CONNECTION)
+		ret = -EFAULT;
+
+free_virtio_resp:
+	kfree(virtio_resp);
+
+free_virtio_ctrl:
+	kfree(virtio_ctrl);
+
+	return ret;
+}
+
+static int virtmagma_probe(struct virtmagma_info* vi, struct virtio_device *vdev)
+{
+	return 0;
+}
+
+static void virtmagma_remove(struct virtmagma_info* vi, struct virtio_device *vdev)
+{
+}
+
+static long virtmagma_ioctl(struct virtmagma_info* vi, unsigned int cmd, void __user *ptr)
+{
+	/* if vi->private is NULL, magma has not been initialized */
+	if (!vi->private)
+		return -ENODEV;
+	switch (cmd) {
+	case VIRTMAGMA_IOCTL_HANDSHAKE:
+		return virtmagma_ioctl_handshake(ptr);
+	case VIRTMAGMA_IOCTL_GET_DRIVER:
+		return virtmagma_ioctl_get_driver(vi, ptr);
+	case VIRTMAGMA_IOCTL_QUERY:
+		return virtmagma_ioctl_query(vi, ptr);
+	case VIRTMAGMA_IOCTL_CREATE_CONNECTION:
+		return virtmagma_ioctl_create_connection(vi, ptr);
+	case VIRTMAGMA_IOCTL_RELEASE_CONNECTION:
+		return virtmagma_ioctl_release_connection(vi, ptr);
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void vq_in_work_handler(struct work_struct *work)
+{
+	pr_warn("%s\n", __PRETTY_FUNCTION__);
+}
+
+static void vq_out_work_handler(struct work_struct *work)
+{
+	struct virtmagma_info_private *vip = container_of(work, struct virtmagma_info_private,
+					      out_vq_work);
+	struct virtqueue *vq = vip->vqs[VIRTMAGMA_VQ_OUT];
+	struct mutex *vq_lock = &vip->vq_locks[VIRTMAGMA_VQ_OUT];
+	unsigned int len;
+	struct completion *finish_completion;
+	bool wake_waitq = false;
+
+	mutex_lock(vq_lock);
+	while ((finish_completion = virtqueue_get_buf(vq, &len)) != NULL) {
+		wake_waitq = true;
+		complete(finish_completion);
+	}
+	mutex_unlock(vq_lock);
+
+	if (wake_waitq)
+		wake_up_interruptible_all(&vip->out_waitq);
+}
+
+static void vq_in_cb(struct virtqueue *vq)
+{
+	struct virtmagma_info *vi = vq->vdev->priv + magma_info_offset;
+	struct virtmagma_info_private *vip = vi->private;
+	schedule_work(&vip->in_vq_work);
+}
+
+static void vq_out_cb(struct virtqueue *vq)
+{
+	struct virtmagma_info *vi = vq->vdev->priv + magma_info_offset;
+	struct virtmagma_info_private *vip = vi->private;
+	schedule_work(&vip->out_vq_work);
+}
+
+static void virtmagma_virtio_find_vqs_prepare(struct virtmagma_info* vi, vq_callback_t **vq_callbacks, const char **vq_names)
+{
+	vq_callbacks[VIRTMAGMA_VQ_IN] = vq_in_cb;
+	vq_callbacks[VIRTMAGMA_VQ_OUT] = vq_out_cb;
+	vq_names[VIRTMAGMA_VQ_IN] = "magma_in";
+	vq_names[VIRTMAGMA_VQ_OUT] = "magma_out";
+}
+
+static void virtmagma_virtio_find_vqs_complete(struct virtmagma_info* vi, struct virtqueue **vqs)
+{
+	struct virtmagma_info_private *vip = vi->private;
+	memcpy(vip->vqs, vqs, sizeof(vip->vqs));
+	INIT_WORK(&vip->in_vq_work, vq_in_work_handler);
+	INIT_WORK(&vip->out_vq_work, vq_out_work_handler);
+	init_waitqueue_head(&vip->out_waitq);
+}
+
+static int virtmagma_device_ready_prepare(struct virtmagma_info* vi)
+{
+	struct virtmagma_info_private *vip = vi->private;
+	int ret;
+	ret = vq_fill_locked(vip->vqs[VIRTMAGMA_VQ_IN]);
+	if (ret) {
+		pr_warn("virtmagma: failed to fill in virtqueue: %d", ret);
+		return ret;
+	}
+	return 0;
+}
+
+static void virtmagma_device_ready_complete(struct virtmagma_info* vi)
+{
+	struct virtmagma_info_private *vip = vi->private;
+	virtqueue_kick(vip->vqs[VIRTMAGMA_VQ_IN]);
+}
+
+int virtmagma_init(struct virtmagma_info *vi)
+{
+	int i;
+	struct virtmagma_info_private *vip;
+	vi->virtio_probe = virtmagma_probe;
+	vi->virtio_remove = virtmagma_remove;
+	vi->ioctl = virtmagma_ioctl;
+	vi->virtio_find_vqs_prepare = virtmagma_virtio_find_vqs_prepare;
+	vi->virtio_find_vqs_complete = virtmagma_virtio_find_vqs_complete;
+	vi->virtio_device_ready_prepare = virtmagma_device_ready_prepare;
+	vi->virtio_device_ready_complete = virtmagma_device_ready_complete;
+	vi->queue_count = VIRTMAGMA_QUEUE_COUNT;
+
+	vi->private = kzalloc(sizeof(*vip), GFP_KERNEL);
+	if (!vi->private)
+		return -ENOMEM;
+	vip = vi->private;
+
+	for (i = 0; i < VIRTMAGMA_QUEUE_COUNT; i++)
+		mutex_init(&vip->vq_locks[i]);
+
+	vi->enabled = true;
+
+	return 0;
+}
diff --git a/drivers/virtio/virtio_magma.h b/drivers/virtio/virtio_magma.h
new file mode 100644
index 0000000..efef2b1
--- /dev/null
+++ b/drivers/virtio/virtio_magma.h
@@ -0,0 +1,34 @@
+// Copyright 2019 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef _DRIVERS_VIRTIO_VIRTIO_MAGMA_H
+#define _DRIVERS_VIRTIO_VIRTIO_MAGMA_H
+
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/virtio.h>
+#include <linux/virtmagma.h>
+
+#define VIRTMAGMA_VQ_IN 0
+#define VIRTMAGMA_VQ_OUT 1
+#define VIRTMAGMA_QUEUE_COUNT 2
+
+struct virtmagma_info {
+	bool enabled;
+	int (*virtio_probe)(struct virtmagma_info* vi, struct virtio_device *vdev);
+	void (*virtio_remove)(struct virtmagma_info* vi, struct virtio_device *vdev);
+	long (*ioctl)(struct virtmagma_info* vi, unsigned int cmd, void __user *ptr);
+	void (*virtio_find_vqs_prepare)(struct virtmagma_info* vi, vq_callback_t **vq_callbacks, const char **vq_names);
+	void (*virtio_find_vqs_complete)(struct virtmagma_info* vi, struct virtqueue **vqs);
+	int (*virtio_device_ready_prepare)(struct virtmagma_info* vi);
+	void (*virtio_device_ready_complete)(struct virtmagma_info* vi);
+	uint32_t queue_count;
+	void *private;
+};
+
+int virtmagma_init(struct virtmagma_info *vi);
+
+extern const size_t magma_info_offset;
+
+#endif /* _DRIVERS_VIRTIO_VIRTIO_MAGMA_H */
diff --git a/drivers/virtio/virtio_wl.c b/drivers/virtio/virtio_wl.c
index e4300ea..23e1f7e 100644
--- a/drivers/virtio/virtio_wl.c
+++ b/drivers/virtio/virtio_wl.c
@@ -60,6 +60,8 @@
 
 #include <uapi/linux/dma-buf.h>
 
+#include "virtio_magma.h"
+
 #define VFD_ILLEGAL_SIGN_BIT 0x80000000
 #define VFD_HOST_VFD_ID_BIT 0x40000000
 
@@ -101,8 +103,12 @@
 
 	struct mutex vfds_lock;
 	struct idr vfds;
+
+	struct virtmagma_info magma_info;
 };
 
+const size_t magma_info_offset = offsetof(struct virtwl_info, magma_info);
+
 static struct virtwl_vfd *virtwl_vfd_alloc(struct virtwl_info *vi);
 static void virtwl_vfd_free(struct virtwl_vfd *vfd);
 
@@ -1196,6 +1202,13 @@
 	if (filp->f_op == &virtwl_vfd_fops)
 		return virtwl_vfd_ioctl(filp, cmd, ptr);
 
+	if (_IOC_TYPE(cmd) == VIRTMAGMA_IOCTL_BASE) {
+		struct virtwl_info *vi = filp->private_data;
+		if (!vi->magma_info.enabled)
+			return -ENODEV; /* virtmagma not initialized */
+		return vi->magma_info.ioctl(&vi->magma_info, cmd, ptr);
+	}
+
 	switch (_IOC_NR(cmd)) {
 	case _IOC_NR(VIRTWL_IOCTL_NEW):
 		return virtwl_ioctl_new(filp, ptr, _IOC_SIZE(cmd));
@@ -1245,9 +1258,11 @@
 {
 	int i;
 	int ret;
+	unsigned queue_count;
 	struct virtwl_info *vi = NULL;
-	vq_callback_t *vq_callbacks[] = { vq_in_cb, vq_out_cb };
-	static const char * const vq_names[] = { "in", "out" };
+	struct virtqueue *vqs[VIRTWL_MAX_QUEUES];
+	vq_callback_t *vq_callbacks[VIRTWL_MAX_QUEUES] = { vq_in_cb, vq_out_cb };
+	static const char *vq_names[VIRTWL_MAX_QUEUES] = { "in", "out" };
 
 	vi = kzalloc(sizeof(struct virtwl_info), GFP_KERNEL);
 	if (!vi)
@@ -1289,13 +1304,39 @@
 	for (i = 0; i < VIRTWL_QUEUE_COUNT; i++)
 		mutex_init(&vi->vq_locks[i]);
 
-	ret = virtio_find_vqs(vdev, VIRTWL_QUEUE_COUNT, vi->vqs, vq_callbacks,
-			      vq_names, NULL);
+	queue_count = VIRTWL_QUEUE_COUNT;
+	if (vdev->features & (1ULL << VIRTIO_WL_F_MAGMA)) {
+		pr_info("virtwl: initializing virtmagma");
+		ret = virtmagma_init(&vi->magma_info);
+		if (ret) {
+			pr_warn("virtwl: failed to initialize virtmagma: %d\n", ret);
+			goto destroy_device;
+		}
+		queue_count += vi->magma_info.queue_count;
+	}
+
+	/*
+	 * virtio_find_vqs is a one-time operation, so child devices must
+	 * expose their respective arguments for the call to the parent.
+	 */
+	if (queue_count > VIRTWL_MAX_QUEUES) {
+		pr_warn("virtwl: too many queues requested by child device\n");
+		ret = -ENOMEM;
+		goto destroy_device;
+	}
+	if (vi->magma_info.enabled)
+		vi->magma_info.virtio_find_vqs_prepare(&vi->magma_info,
+			&vq_callbacks[VIRTWL_QUEUE_COUNT],
+			&vq_names[VIRTWL_QUEUE_COUNT]);
+	ret = virtio_find_vqs(vdev, queue_count, vqs, vq_callbacks, vq_names, NULL);
 	if (ret) {
 		pr_warn("virtwl: failed to find virtio wayland queues: %d\n",
 			ret);
 		goto del_cdev;
 	}
+	memcpy(vi->vqs, vqs, sizeof(vi->vqs));
+	if (vi->magma_info.enabled)
+		vi->magma_info.virtio_find_vqs_complete(&vi->magma_info, &vqs[VIRTWL_QUEUE_COUNT]);
 
 	INIT_WORK(&vi->in_vq_work, vq_in_work_handler);
 	INIT_WORK(&vi->out_vq_work, vq_out_work_handler);
@@ -1311,9 +1352,17 @@
 		goto del_cdev;
 	}
 
+	if (vi->magma_info.enabled) {
+		ret = vi->magma_info.virtio_device_ready_prepare(&vi->magma_info);
+		if (ret) {
+			pr_warn("virtwl: failed to prepare virtmagma for device-ready: %d", ret);
+			goto del_cdev;
+		}
+	}
 	virtio_device_ready(vdev);
 	virtqueue_kick(vi->vqs[VIRTWL_VQ_IN]);
-
+	if (vi->magma_info.enabled)
+		vi->magma_info.virtio_device_ready_complete(&vi->magma_info);
 
 	return 0;
 
@@ -1334,6 +1383,7 @@
 {
 	struct virtwl_info *vi = vdev->priv;
 
+	vi->magma_info.virtio_remove(&vi->magma_info, vdev);
 	cdev_del(&vi->cdev);
 	put_device(vi->dev);
 	class_destroy(vi->class);
@@ -1362,11 +1412,13 @@
 };
 
 static unsigned int features_legacy[] = {
-	VIRTIO_WL_F_TRANS_FLAGS
+	VIRTIO_WL_F_TRANS_FLAGS,
+	VIRTIO_WL_F_MAGMA
 };
 
 static unsigned int features[] = {
-	VIRTIO_WL_F_TRANS_FLAGS
+	VIRTIO_WL_F_TRANS_FLAGS,
+	VIRTIO_WL_F_MAGMA
 };
 
 static struct virtio_driver virtio_wl_driver = {
diff --git a/include/uapi/linux/virtio_magma.h b/include/uapi/linux/virtio_magma.h
new file mode 100644
index 0000000..d57435f
--- /dev/null
+++ b/include/uapi/linux/virtio_magma.h
@@ -0,0 +1,493 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef _LINUX_VIRTIO_MAGMA_H
+#define _LINUX_VIRTIO_MAGMA_H
+
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/virtmagma.h>
+
+
+
+
+enum virtio_magma_ctrl_type {
+    /* magma commands */
+    VIRTIO_MAGMA_CMD_GET_DRIVER = 0x0400,
+    VIRTIO_MAGMA_CMD_QUERY,
+    VIRTIO_MAGMA_CMD_CREATE_CONNECTION,
+    VIRTIO_MAGMA_CMD_RELEASE_CONNECTION,
+    VIRTIO_MAGMA_CMD_GET_ERROR,
+    VIRTIO_MAGMA_CMD_CREATE_CONTEXT,
+    VIRTIO_MAGMA_CMD_RELEASE_CONTEXT,
+    VIRTIO_MAGMA_CMD_CREATE_BUFFER,
+    VIRTIO_MAGMA_CMD_RELEASE_BUFFER,
+    VIRTIO_MAGMA_CMD_GET_BUFFER_ID,
+    VIRTIO_MAGMA_CMD_GET_BUFFER_SIZE,
+    VIRTIO_MAGMA_CMD_CLEAN_CACHE,
+    VIRTIO_MAGMA_CMD_SET_CACHE_POLICY,
+    VIRTIO_MAGMA_CMD_MAP,
+    VIRTIO_MAGMA_CMD_MAP_ALIGNED,
+    VIRTIO_MAGMA_CMD_MAP_SPECIFIC,
+    VIRTIO_MAGMA_CMD_UNMAP,
+    VIRTIO_MAGMA_CMD_MAP_BUFFER_GPU,
+    VIRTIO_MAGMA_CMD_UNMAP_BUFFER_GPU,
+    VIRTIO_MAGMA_CMD_COMMIT_BUFFER,
+    VIRTIO_MAGMA_CMD_EXPORT,
+    VIRTIO_MAGMA_CMD_IMPORT,
+    VIRTIO_MAGMA_CMD_CREATE_COMMAND_BUFFER,
+    VIRTIO_MAGMA_CMD_RELEASE_COMMAND_BUFFER,
+    VIRTIO_MAGMA_CMD_SUBMIT_COMMAND_BUFFER,
+    VIRTIO_MAGMA_CMD_EXECUTE_IMMEDIATE_COMMANDS,
+    VIRTIO_MAGMA_CMD_CREATE_SEMAPHORE,
+    VIRTIO_MAGMA_CMD_RELEASE_SEMAPHORE,
+    VIRTIO_MAGMA_CMD_GET_SEMAPHORE_ID,
+    VIRTIO_MAGMA_CMD_SIGNAL_SEMAPHORE,
+    VIRTIO_MAGMA_CMD_RESET_SEMAPHORE,
+    VIRTIO_MAGMA_CMD_WAIT_SEMAPHORES,
+    VIRTIO_MAGMA_CMD_EXPORT_SEMAPHORE,
+    VIRTIO_MAGMA_CMD_IMPORT_SEMAPHORE,
+    VIRTIO_MAGMA_CMD_READ_NOTIFICATION_CHANNEL,
+    /* magma success responses */
+    VIRTIO_MAGMA_RESP_GET_DRIVER = 0x1180,
+    VIRTIO_MAGMA_RESP_QUERY,
+    VIRTIO_MAGMA_RESP_CREATE_CONNECTION,
+    VIRTIO_MAGMA_RESP_RELEASE_CONNECTION,
+    VIRTIO_MAGMA_RESP_GET_ERROR,
+    VIRTIO_MAGMA_RESP_CREATE_CONTEXT,
+    VIRTIO_MAGMA_RESP_RELEASE_CONTEXT,
+    VIRTIO_MAGMA_RESP_CREATE_BUFFER,
+    VIRTIO_MAGMA_RESP_RELEASE_BUFFER,
+    VIRTIO_MAGMA_RESP_GET_BUFFER_ID,
+    VIRTIO_MAGMA_RESP_GET_BUFFER_SIZE,
+    VIRTIO_MAGMA_RESP_CLEAN_CACHE,
+    VIRTIO_MAGMA_RESP_SET_CACHE_POLICY,
+    VIRTIO_MAGMA_RESP_MAP,
+    VIRTIO_MAGMA_RESP_MAP_ALIGNED,
+    VIRTIO_MAGMA_RESP_MAP_SPECIFIC,
+    VIRTIO_MAGMA_RESP_UNMAP,
+    VIRTIO_MAGMA_RESP_MAP_BUFFER_GPU,
+    VIRTIO_MAGMA_RESP_UNMAP_BUFFER_GPU,
+    VIRTIO_MAGMA_RESP_COMMIT_BUFFER,
+    VIRTIO_MAGMA_RESP_EXPORT,
+    VIRTIO_MAGMA_RESP_IMPORT,
+    VIRTIO_MAGMA_RESP_CREATE_COMMAND_BUFFER,
+    VIRTIO_MAGMA_RESP_RELEASE_COMMAND_BUFFER,
+    VIRTIO_MAGMA_RESP_SUBMIT_COMMAND_BUFFER,
+    VIRTIO_MAGMA_RESP_EXECUTE_IMMEDIATE_COMMANDS,
+    VIRTIO_MAGMA_RESP_CREATE_SEMAPHORE,
+    VIRTIO_MAGMA_RESP_RELEASE_SEMAPHORE,
+    VIRTIO_MAGMA_RESP_GET_SEMAPHORE_ID,
+    VIRTIO_MAGMA_RESP_SIGNAL_SEMAPHORE,
+    VIRTIO_MAGMA_RESP_RESET_SEMAPHORE,
+    VIRTIO_MAGMA_RESP_WAIT_SEMAPHORES,
+    VIRTIO_MAGMA_RESP_EXPORT_SEMAPHORE,
+    VIRTIO_MAGMA_RESP_IMPORT_SEMAPHORE,
+    VIRTIO_MAGMA_RESP_READ_NOTIFICATION_CHANNEL,
+    /* magma error responses */
+    VIRTIO_MAGMA_RESP_ERR_UNIMPLEMENTED = 0x1280,
+    VIRTIO_MAGMA_RESP_ERR_INTERNAL,
+    VIRTIO_MAGMA_RESP_ERR_HOST_DISCONNECTED,
+    VIRTIO_MAGMA_RESP_ERR_OUT_OF_MEMORY,
+    VIRTIO_MAGMA_RESP_ERR_INVALID_COMMAND,
+    VIRTIO_MAGMA_RESP_ERR_INVALID_ARGUMENT,
+};
+
+struct virtio_magma_ctrl_hdr {
+    __le32 type;
+    __le32 flags;
+};
+
+struct virtio_magma_get_driver {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le32 page_size;
+};
+
+struct virtio_magma_get_driver_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 pfn;
+    __le64 size;
+};
+
+struct virtio_magma_query {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 field_id;
+};
+
+struct virtio_magma_query_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 field_value_out;
+    __le32 status_return;
+};
+
+struct virtio_magma_create_connection {
+    struct virtio_magma_ctrl_hdr hdr;
+};
+
+struct virtio_magma_create_connection_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection_return;
+};
+
+struct virtio_magma_release_connection {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+};
+
+struct virtio_magma_release_connection_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+};
+
+struct virtio_magma_get_error {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+};
+
+struct virtio_magma_get_error_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le32 status_return;
+};
+
+struct virtio_magma_create_context {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+};
+
+struct virtio_magma_create_context_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le32 context_id_out;
+};
+
+struct virtio_magma_release_context {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+    __le32 context_id;
+};
+
+struct virtio_magma_release_context_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+};
+
+struct virtio_magma_create_buffer {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+    __le64 size;
+};
+
+struct virtio_magma_create_buffer_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 size_out;
+    __le64 buffer_out;
+    __le32 status_return;
+};
+
+struct virtio_magma_release_buffer {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+    __le64 buffer;
+};
+
+struct virtio_magma_release_buffer_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+};
+
+struct virtio_magma_get_buffer_id {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 buffer;
+};
+
+struct virtio_magma_get_buffer_id_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 id_return;
+};
+
+struct virtio_magma_get_buffer_size {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 buffer;
+};
+
+struct virtio_magma_get_buffer_size_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 size_return;
+};
+
+struct virtio_magma_clean_cache {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 buffer;
+    __le64 offset;
+    __le64 size;
+    __le32 operation;
+};
+
+struct virtio_magma_clean_cache_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le32 status_return;
+};
+
+struct virtio_magma_set_cache_policy {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 buffer;
+    __le32 policy;
+};
+
+struct virtio_magma_set_cache_policy_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le32 status_return;
+};
+
+struct virtio_magma_map {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+    __le64 buffer;
+};
+
+struct virtio_magma_map_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 addr_out;
+    __le32 status_return;
+};
+
+struct virtio_magma_map_aligned {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+    __le64 buffer;
+    __le64 alignment;
+};
+
+struct virtio_magma_map_aligned_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 addr_out;
+    __le32 status_return;
+};
+
+struct virtio_magma_map_specific {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+    __le64 buffer;
+    __le64 addr;
+};
+
+struct virtio_magma_map_specific_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le32 status_return;
+};
+
+struct virtio_magma_unmap {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+    __le64 buffer;
+};
+
+struct virtio_magma_unmap_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le32 status_return;
+};
+
+struct virtio_magma_map_buffer_gpu {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+    __le64 buffer;
+    __le64 page_offset;
+    __le64 page_count;
+    __le64 gpu_va;
+    __le64 map_flags;
+};
+
+struct virtio_magma_map_buffer_gpu_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+};
+
+struct virtio_magma_unmap_buffer_gpu {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+    __le64 buffer;
+    __le64 gpu_va;
+};
+
+struct virtio_magma_unmap_buffer_gpu_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+};
+
+struct virtio_magma_commit_buffer {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+    __le64 buffer;
+    __le64 page_offset;
+    __le64 page_count;
+};
+
+struct virtio_magma_commit_buffer_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+};
+
+struct virtio_magma_export {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+    __le64 buffer;
+};
+
+struct virtio_magma_export_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le32 buffer_handle_out;
+    __le32 status_return;
+};
+
+struct virtio_magma_import {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+};
+
+struct virtio_magma_import_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 buffer_out;
+    __le32 buffer_handle;
+    __le32 status_return;
+};
+
+struct virtio_magma_create_command_buffer {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+    __le64 size;
+};
+
+struct virtio_magma_create_command_buffer_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 buffer_out;
+    __le32 status_return;
+};
+
+struct virtio_magma_release_command_buffer {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+    __le64 command_buffer;
+};
+
+struct virtio_magma_release_command_buffer_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+};
+
+struct virtio_magma_submit_command_buffer {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+    __le64 command_buffer;
+    __le32 context_id;
+};
+
+struct virtio_magma_submit_command_buffer_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+};
+
+struct virtio_magma_execute_immediate_commands {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+    __le64 command_count;
+    __le64 commands; // magma_system_inline_command_buffer[command_count]
+    __le32 context_id;
+};
+
+struct virtio_magma_execute_immediate_commands_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+};
+
+struct virtio_magma_create_semaphore {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+};
+
+struct virtio_magma_create_semaphore_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 semaphore_out;
+    __le32 status_return;
+};
+
+struct virtio_magma_release_semaphore {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+    __le64 semaphore;
+};
+
+struct virtio_magma_release_semaphore_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+};
+
+struct virtio_magma_get_semaphore_id {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 semaphore;
+};
+
+struct virtio_magma_get_semaphore_id_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 id_return;
+};
+
+struct virtio_magma_signal_semaphore {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 semaphore;
+};
+
+struct virtio_magma_signal_semaphore_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+};
+
+struct virtio_magma_reset_semaphore {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 semaphore;
+};
+
+struct virtio_magma_reset_semaphore_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+};
+
+struct virtio_magma_wait_semaphores {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 timeout_ms;
+    __le64 semaphores; // magma_semaphore_t[count]
+    __le32 count;
+    __le32 status_return;
+    char wait_all;
+};
+
+struct virtio_magma_wait_semaphores_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+};
+
+struct virtio_magma_export_semaphore {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 semaphore;
+    __le64 connection;
+};
+
+struct virtio_magma_export_semaphore_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le32 semaphore_handle_out;
+    __le32 status_return;
+};
+
+struct virtio_magma_import_semaphore {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+    __le32 semaphore_handle;
+};
+
+struct virtio_magma_import_semaphore_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 semaphore_out;
+    __le32 status_return;
+};
+
+struct virtio_magma_read_notification_channel {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 connection;
+    __le64 buffer;
+    __le64 buffer_size;
+};
+
+struct virtio_magma_read_notification_channel_resp {
+    struct virtio_magma_ctrl_hdr hdr;
+    __le64 buffer_size_out;
+    __le32 status_return;
+};
+
+
+#endif // _LINUX_VIRTIO_MAGMA_H
diff --git a/include/uapi/linux/virtio_wl.h b/include/uapi/linux/virtio_wl.h
index 76bf43e..b8cbb4f 100644
--- a/include/uapi/linux/virtio_wl.h
+++ b/include/uapi/linux/virtio_wl.h
@@ -13,12 +13,16 @@
 #define VIRTWL_VQ_IN 0
 #define VIRTWL_VQ_OUT 1
 #define VIRTWL_QUEUE_COUNT 2
+#define VIRTWL_MAX_QUEUES 4
 #define VIRTWL_MAX_ALLOC 0x800
 #define VIRTWL_PFN_SHIFT 12
 
 /* Enables the transition to new flag semantics */
 #define VIRTIO_WL_F_TRANS_FLAGS 1
 
+/* Enables virtio-magma child device */
+#define VIRTIO_WL_F_MAGMA 2
+
 struct virtio_wl_config {
 };
 
diff --git a/include/uapi/linux/virtmagma.h b/include/uapi/linux/virtmagma.h
new file mode 100644
index 0000000..68737be7
--- /dev/null
+++ b/include/uapi/linux/virtmagma.h
@@ -0,0 +1,287 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef _LINUX_VIRTMAGMA_H
+#define _LINUX_VIRTMAGMA_H
+
+#include <asm/ioctl.h>
+#include <linux/types.h>
+
+#define VIRTMAGMA_IOCTL_BASE	 'm'
+#define VIRTMAGMA_IO(nr)	 _IO(VIRTMAGMA_IOCTL_BASE, nr)
+#define VIRTMAGMA_IOR(nr, type)	 _IOR(VIRTMAGMA_IOCTL_BASE, nr, type)
+#define VIRTMAGMA_IOW(nr, type)	 _IOW(VIRTMAGMA_IOCTL_BASE, nr, type)
+#define VIRTMAGMA_IOWR(nr, type) _IOWR(VIRTMAGMA_IOCTL_BASE, nr, type)
+#define VIRTMAGMA_MAKE_VERSION(major, minor, patch) \
+	(((major) << 24) | ((minor) << 12) | (patch))
+#define VIRTMAGMA_GET_VERSION(version, major, minor, patch) (\
+	(major = ((version) >> 24)), \
+	(minor = ((version) >> 12) & 0x3FF), \
+	(patch = (version) & 0x3FF), (version))
+
+#define VIRTMAGMA_HANDSHAKE_SEND 0x46434853
+#define VIRTMAGMA_HANDSHAKE_RECV 0x474F4F47
+#define VIRTMAGMA_VERSION VIRTMAGMA_MAKE_VERSION(0,1,0)
+struct virtmagma_ioctl_args_handshake {
+	__u32 handshake_inout;
+	__u32 version_out;
+};
+
+struct virtmagma_ioctl_args_get_driver {
+	__s32 unused;
+};
+
+struct virtmagma_ioctl_args_query {
+	__u64 id;
+	__u64 value_out;
+	__u32 status_return;
+};
+
+struct virtmagma_ioctl_args_create_connection {
+	__s32 connection_return;
+};
+
+struct virtmagma_ioctl_args_release_connection {
+	__s32 connection;
+};
+
+struct virtmagma_ioctl_args_get_error {
+	__s32 connection;
+	__u32 status_return;
+};
+
+struct virtmagma_ioctl_args_create_context {
+	__s32 connection;
+	__u32 context_id_out;
+};
+
+struct virtmagma_ioctl_args_release_context {
+	__s32 connection;
+	__u32 context_id;
+};
+
+struct virtmagma_ioctl_args_create_buffer {
+	__u64 size;
+	__u64 size_out;
+	__u64 buffer_out;
+	__s32 connection;
+	__u32 status_return;
+};
+
+struct virtmagma_ioctl_args_release_buffer {
+	__u64 buffer;
+	__s32 connection;
+};
+
+struct virtmagma_ioctl_args_get_buffer_id {
+	__u64 buffer;
+	__u64 id_return;
+};
+
+struct virtmagma_ioctl_args_get_buffer_size {
+	__u64 buffer;
+	__u64 size_return;
+};
+
+struct virtmagma_ioctl_args_clean_cache {
+	__u64 buffer;
+	__u64 offset;
+	__u64 size;
+	__u32 operation;
+	__u32 status_return;
+};
+
+struct virtmagma_ioctl_args_set_cache_policy {
+	__u64 buffer;
+	__u32 policy;
+	__u32 status_return;
+};
+
+struct virtmagma_ioctl_args_map {
+	__u64 buffer;
+	__u64 addr_out;
+	__s32 connection;
+	__u32 status_return;
+};
+
+struct virtmagma_ioctl_args_map_aligned {
+	__u64 buffer;
+	__u64 alignment;
+	__u64 addr_out;
+	__s32 connection;
+	__u32 status_return;
+};
+
+struct virtmagma_ioctl_args_map_specific {
+	__u64 buffer;
+	__u64 addr;
+	__s32 connection;
+	__u32 status_return;
+};
+
+struct virtmagma_ioctl_args_unmap {
+	__u64 buffer;
+	__s32 connection;
+	__u32 status_return;
+};
+
+struct virtmagma_ioctl_args_map_buffer_gpu {
+	__u64 buffer;
+	__u64 page_offset;
+	__u64 page_count;
+	__u64 gpu_va;
+	__u64 map_flags;
+	__s32 connection;
+};
+
+struct virtmagma_ioctl_args_unmap_buffer_gpu {
+	__u64 buffer;
+	__u64 gpu_va;
+	__s32 connection;
+};
+
+struct virtmagma_ioctl_args_commit_buffer {
+	__u64 buffer;
+	__u64 page_offset;
+	__u64 page_count;
+	__s32 connection;
+};
+
+struct virtmagma_ioctl_args_export {
+	__u64 buffer;
+	__u32 buffer_handle_out;
+	__u32 status_return;
+	__s32 connection;
+};
+
+struct virtmagma_ioctl_args_import {
+	__u64 buffer_out;
+	__u32 buffer_handle;
+	__u32 status_return;
+	__s32 connection;
+};
+
+struct virtmagma_ioctl_args_create_command_buffer {
+	__u64 size;
+	__u64 buffer_out;
+	__u32 status_return;
+	__s32 connection;
+};
+
+struct virtmagma_ioctl_args_release_command_buffer {
+	__u64 command_buffer;
+	__s32 connection;
+};
+
+struct virtmagma_ioctl_args_submit_command_buffer {
+	__u64 command_buffer;
+	__u32 context_id;
+	__s32 connection;
+};
+
+struct virtmagma_ioctl_args_execute_immediate_commands {
+	__u64 command_count;
+	__u64 commands_addr; /* magma_system_inline_command_buffer[command_count] */
+	__u32 context_id;
+	__s32 connection;
+};
+
+struct virtmagma_ioctl_args_create_semaphore {
+	__u64 semaphore_out;
+	__s32 connection;
+	__u32 status_return;
+};
+
+struct virtmagma_ioctl_args_release_semaphore {
+	__u64 semaphore;
+	__s32 connection;
+};
+
+struct virtmagma_ioctl_args_get_semaphore_id {
+	__u64 semaphore;
+	__u64 id_return;
+};
+
+struct virtmagma_ioctl_args_signal_semaphore {
+	__u64 semaphore;
+};
+
+struct virtmagma_ioctl_args_reset_semaphore {
+	__u64 semaphore;
+};
+
+struct virtmagma_ioctl_args_wait_semaphores {
+	__u64 timeout_ms;
+	__u64 semaphores_addr; /* magma_semaphore_t[count] */
+	__u32 count;
+	__u32 status_return;
+	__u8 wait_all;
+};
+
+struct virtmagma_ioctl_args_export_semaphore {
+	__u64 semaphore;
+	__s32 connection;
+	__u32 semaphore_handle_out;
+	__u32 status_return;
+};
+
+struct virtmagma_ioctl_args_import_semaphore {
+	__u64 semaphore_out;
+	__s32 connection;
+	__u32 semaphore_handle;
+	__u32 status_return;
+};
+
+struct virtmagma_ioctl_args_get_notification_channel_fd {
+	__s32 connection;
+	__s32 fd_return;
+};
+
+struct virtmagma_ioctl_args_read_notification_channel {
+	__u64 buffer;
+	__u64 buffer_size;
+	__u64 buffer_size_out;
+	__s32 connection;
+	__u32 status_return;
+};
+
+#define VIRTMAGMA_IOCTL_HANDSHAKE VIRTMAGMA_IOWR(0x00, struct virtmagma_ioctl_args_handshake)
+#define VIRTMAGMA_IOCTL_GET_DRIVER VIRTMAGMA_IOWR(0x01, struct virtmagma_ioctl_args_get_driver)
+#define VIRTMAGMA_IOCTL_QUERY VIRTMAGMA_IOWR(0x02, struct virtmagma_ioctl_args_query)
+#define VIRTMAGMA_IOCTL_CREATE_CONNECTION VIRTMAGMA_IOWR(0x03, struct virtmagma_ioctl_args_create_connection)
+#define VIRTMAGMA_IOCTL_RELEASE_CONNECTION VIRTMAGMA_IOWR(0x04, struct virtmagma_ioctl_args_release_connection)
+#define VIRTMAGMA_IOCTL_GET_ERROR VIRTMAGMA_IOWR(0x05, struct virtmagma_ioctl_args_get_error)
+#define VIRTMAGMA_IOCTL_CREATE_CONTEXT VIRTMAGMA_IOWR(0x06, struct virtmagma_ioctl_args_create_context)
+#define VIRTMAGMA_IOCTL_RELEASE_CONTEXT VIRTMAGMA_IOWR(0x07, struct virtmagma_ioctl_args_release_context)
+#define VIRTMAGMA_IOCTL_CREATE_BUFFER VIRTMAGMA_IOWR(0x08, struct virtmagma_ioctl_args_create_buffer)
+#define VIRTMAGMA_IOCTL_RELEASE_BUFFER VIRTMAGMA_IOWR(0x09, struct virtmagma_ioctl_args_release_buffer)
+#define VIRTMAGMA_IOCTL_GET_BUFFER_ID VIRTMAGMA_IOWR(0x0A, struct virtmagma_ioctl_args_get_buffer_id)
+#define VIRTMAGMA_IOCTL_GET_BUFFER_SIZE VIRTMAGMA_IOWR(0x0B, struct virtmagma_ioctl_args_get_buffer_size)
+#define VIRTMAGMA_IOCTL_CLEAN_CACHE VIRTMAGMA_IOWR(0x0C, struct virtmagma_ioctl_args_clean_cache)
+#define VIRTMAGMA_IOCTL_SET_CACHE_POLICY VIRTMAGMA_IOWR(0x0D, struct virtmagma_ioctl_args_set_cache_policy)
+#define VIRTMAGMA_IOCTL_MAP VIRTMAGMA_IOWR(0x0E, struct virtmagma_ioctl_args_map)
+#define VIRTMAGMA_IOCTL_MAP_ALIGNED VIRTMAGMA_IOWR(0x0F, struct virtmagma_ioctl_args_map_aligned)
+#define VIRTMAGMA_IOCTL_MAP_SPECIFIC VIRTMAGMA_IOWR(0x10, struct virtmagma_ioctl_args_map_specific)
+#define VIRTMAGMA_IOCTL_UNMAP VIRTMAGMA_IOWR(0x11, struct virtmagma_ioctl_args_unmap)
+#define VIRTMAGMA_IOCTL_MAP_BUFFER_GPU VIRTMAGMA_IOWR(0x12, struct virtmagma_ioctl_args_map_buffer_gpu)
+#define VIRTMAGMA_IOCTL_UNMAP_BUFFER_GPU VIRTMAGMA_IOWR(0x13, struct virtmagma_ioctl_args_unmap_buffer_gpu)
+#define VIRTMAGMA_IOCTL_COMMIT_BUFFER VIRTMAGMA_IOWR(0x14, struct virtmagma_ioctl_args_commit_buffer)
+#define VIRTMAGMA_IOCTL_EXPORT VIRTMAGMA_IOWR(0x15, struct virtmagma_ioctl_args_export)
+#define VIRTMAGMA_IOCTL_IMPORT VIRTMAGMA_IOWR(0x16, struct virtmagma_ioctl_args_import)
+#define VIRTMAGMA_IOCTL_CREATE_COMMAND_BUFFER VIRTMAGMA_IOWR(0x17, struct virtmagma_ioctl_args_create_command_buffer)
+#define VIRTMAGMA_IOCTL_RELEASE_COMMAND_BUFFER VIRTMAGMA_IOWR(0x18, struct virtmagma_ioctl_args_release_command_buffer)
+#define VIRTMAGMA_IOCTL_SUBMIT_COMMAND_BUFFER VIRTMAGMA_IOWR(0x19, struct virtmagma_ioctl_args_submit_command_buffer)
+#define VIRTMAGMA_IOCTL_EXECUTE_IMMEDIATE_COMMANDS VIRTMAGMA_IOWR(0x1A, struct virtmagma_ioctl_args_execute_immediate_commands)
+#define VIRTMAGMA_IOCTL_CREATE_SEMAPHORE VIRTMAGMA_IOWR(0x1B, struct virtmagma_ioctl_args_create_semaphore)
+#define VIRTMAGMA_IOCTL_RELEASE_SEMAPHORE VIRTMAGMA_IOWR(0x1C, struct virtmagma_ioctl_args_release_semaphore)
+#define VIRTMAGMA_IOCTL_GET_SEMAPHORE_ID VIRTMAGMA_IOWR(0x1D, struct virtmagma_ioctl_args_get_semaphore_id)
+#define VIRTMAGMA_IOCTL_SIGNAL_SEMAPHORE VIRTMAGMA_IOWR(0x1E, struct virtmagma_ioctl_args_signal_semaphore)
+#define VIRTMAGMA_IOCTL_RESET_SEMAPHORE VIRTMAGMA_IOWR(0x1F, struct virtmagma_ioctl_args_reset_semaphore)
+#define VIRTMAGMA_IOCTL_WAIT_SEMAPHORES VIRTMAGMA_IOWR(0x20, struct virtmagma_ioctl_args_wait_semaphores)
+#define VIRTMAGMA_IOCTL_EXPORT_SEMAPHORE VIRTMAGMA_IOWR(0x21, struct virtmagma_ioctl_args_export_semaphore)
+#define VIRTMAGMA_IOCTL_IMPORT_SEMAPHORE VIRTMAGMA_IOWR(0x22, struct virtmagma_ioctl_args_import_semaphore)
+#define VIRTMAGMA_IOCTL_GET_NOTIFICATION_CHANNEL_FD VIRTMAGMA_IOWR(0x23, struct virtmagma_ioctl_args_get_notification_channel_fd)
+#define VIRTMAGMA_IOCTL_READ_NOTIFICATION_CHANNEL VIRTMAGMA_IOWR(0x24, struct virtmagma_ioctl_args_read_notification_channel)
+
+#endif /* _LINUX_VIRTMAGMA_H */