[virtio_magma] Support for magma_get_buffer_handle

Using magma_map/unmap for now; will be transitioned to
magma_internal_map/unmap in an upcoming change so the
former may be removed.

Bug:62444

Change-Id: Ia8c7f2bcdd1355208aa0db7a921fccda75270224
diff --git a/drivers/virtio/virtio_magma.c b/drivers/virtio/virtio_magma.c
index 58f5c31..d7e28c6 100644
--- a/drivers/virtio/virtio_magma.c
+++ b/drivers/virtio/virtio_magma.c
@@ -117,6 +117,14 @@
 	size_t response_size;
 };
 
+struct virtmagma_buffer_fd_priv {
+	struct virtmagma_instance *instance;
+	struct mutex mutex_lock;
+	u64 connection_id;
+	u64 buffer_id;
+	u64 phys_addr;
+};
+
 static void virtmagma_cache_ctor(void *p)
 {
 	memset(p, 0, MESSAGE_CACHE_OBJECT_SIZE);
@@ -529,10 +537,161 @@
 	return ret;
 }
 
+static int virtmagma_buffer_fd_release(struct inode *inodep, struct file *filp)
+{
+	struct virtmagma_buffer_fd_priv *priv = filp->private_data;
+	struct virtmagma_instance *instance = priv->instance;
+	struct virtmagma_connection *connection = NULL;
+	struct virtmagma_connection_object *object = NULL;
+	struct virtio_magma_unmap_ctrl *request = NULL;
+	struct virtio_magma_unmap_resp *response = NULL;
+	int ret;
+
+	if (instance) {
+		connection = get_connection(instance, priv->connection_id);
+	}
+
+	if (connection) {
+		object = get_connection_object(connection, priv->buffer_id,
+					       MAGMA_BUFFER);
+	}
+
+	if (connection && object && priv->phys_addr) {
+		request = kmem_cache_alloc(instance->msg_cache, GFP_KERNEL);
+		response = kmem_cache_alloc(instance->msg_cache, GFP_KERNEL);
+	}
+
+	if (request && response) {
+		struct virtmagma_virtio_command command = {
+			.request_ptr = request,
+			.request_size = sizeof(*request),
+			.response_ptr = response,
+			.response_size = sizeof(*response)
+		};
+
+		request->hdr.type = VIRTIO_MAGMA_CMD_UNMAP;
+		request->hdr.flags = 0;
+		request->connection = priv->connection_id;
+		request->buffer = priv->buffer_id;
+
+		ret = vq_out_send_sync(instance->vi, &command);
+		if (ret == 0) {
+			virtmagma_check_expected_response_type(request,
+							       response);
+		}
+	}
+
+	if (request)
+		kmem_cache_free(instance->msg_cache, request);
+
+	if (response)
+		kmem_cache_free(instance->msg_cache, response);
+
+	mutex_destroy(&priv->mutex_lock);
+	kfree(priv);
+
+	return 0;
+}
+
+// Ensure the entire buffer is mapped in the host device; then
+// we create any number of whole or partial mappings in the guest
+// that point into the host mapped region.
+static int virtmagma_buffer_fd_mmap(struct file *filp,
+				    struct vm_area_struct *vma)
+{
+	struct virtmagma_buffer_fd_priv *priv = filp->private_data;
+	struct virtmagma_instance *instance = priv->instance;
+	struct virtmagma_connection *connection;
+	struct virtmagma_connection_object *object;
+	struct virtio_magma_map_ctrl *request;
+	struct virtio_magma_map_resp *response;
+	unsigned long vm_size = vma->vm_end - vma->vm_start;
+	size_t max_map_size;
+	int ret = 0;
+
+	if (!instance)
+		return -ENODEV;
+
+	connection = get_connection(instance, priv->connection_id);
+	if (!connection)
+		return -EINVAL;
+
+	object = get_connection_object(connection, priv->buffer_id,
+				       MAGMA_BUFFER);
+	if (!object)
+		return -EINVAL;
+
+	request = kmem_cache_alloc(instance->msg_cache, GFP_KERNEL);
+	if (!request)
+		return -ENOMEM;
+
+	response = kmem_cache_alloc(instance->msg_cache, GFP_KERNEL);
+	if (!response) {
+		kmem_cache_free(instance->msg_cache, request);
+		return -ENOMEM;
+	}
+
+	request->hdr.type = VIRTIO_MAGMA_CMD_MAP;
+	request->hdr.flags = 0;
+	request->connection = priv->connection_id;
+	request->buffer = priv->buffer_id;
+
+	mutex_lock(&priv->mutex_lock);
+	if (!priv->phys_addr) {
+		struct virtmagma_virtio_command command = {
+			.request_ptr = request,
+			.request_size = sizeof(*request),
+			.response_ptr = response,
+			.response_size = sizeof(*response)
+		};
+
+		ret = vq_out_send_sync(instance->vi, &command);
+		if (ret == 0) {
+			ret = virtmagma_check_expected_response_type(request,
+								     response);
+		}
+
+		if (ret == 0) {
+			priv->phys_addr = response->addr_out;
+		}
+	}
+	mutex_unlock(&priv->mutex_lock);
+
+	kmem_cache_free(instance->msg_cache, request);
+	kmem_cache_free(instance->msg_cache, response);
+
+	if (ret)
+		return ret;
+
+	max_map_size = PAGE_ALIGN(object->buffer.size_allocated);
+
+	if (vma->vm_pgoff * PAGE_SIZE + vm_size > max_map_size) {
+		pr_warn("virtmagma: user tried to mmap with offset (%ld) and size (%ld) exceededing the buffer's size (%ld)",
+			vma->vm_pgoff * PAGE_SIZE, vm_size, max_map_size);
+		return -EINVAL;
+	}
+
+	ret = io_remap_pfn_range(vma, vma->vm_start,
+				 priv->phys_addr / PAGE_SIZE + vma->vm_pgoff,
+				 vm_size, vma->vm_page_prot);
+	if (ret)
+		return ret;
+
+	vma->vm_flags |= VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_private_data = priv;
+
+	return 0;
+}
+
 static const struct file_operations virtmagma_mmfd_fops = {
 	.mmap = virtmagma_mmfd_mmap,
 };
 
+static const struct file_operations virtmagma_buffer_fd_fops = {
+	.mmap = virtmagma_buffer_fd_mmap,
+	.release = virtmagma_buffer_fd_release,
+};
+
 static int create_instance(struct virtmagma_info *vi,
 			   struct virtmagma_instance **instance_out)
 {
@@ -1176,6 +1335,54 @@
 	return 0;
 }
 
+static int virtmagma_command_magma_get_buffer_handle(
+	struct virtmagma_instance *instance,
+	struct virtmagma_virtio_command *command)
+{
+	struct virtmagma_connection *connection;
+	struct virtmagma_connection_object *object;
+	struct virtmagma_buffer_fd_priv *priv;
+	struct virtio_magma_get_buffer_handle_ctrl *request =
+		command->request_ptr;
+	struct virtio_magma_get_buffer_handle_resp *response =
+		command->response_ptr;
+	int ret;
+
+	connection = get_connection(instance, request->connection);
+	if (!connection)
+		return -EINVAL;
+
+	object = get_connection_object(connection, request->buffer,
+				       MAGMA_BUFFER);
+	if (!object)
+		return -EINVAL;
+
+	priv = kzalloc(sizeof(struct virtmagma_buffer_fd_priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	mutex_init(&priv->mutex_lock);
+
+	// Simple reference; the client must ensure the fd is used only while
+	// the buffer remains valid.
+	priv->instance = instance;
+	priv->connection_id = request->connection;
+	priv->buffer_id = request->buffer;
+
+	ret = anon_inode_getfd("[virtmagma]", &virtmagma_buffer_fd_fops, priv,
+			       O_RDWR);
+	if (ret < 0) {
+		pr_err("virtmagma: failed to create fd: %d", ret);
+		return ret;
+	}
+
+	response->hdr.type = VIRTIO_MAGMA_RESP_GET_BUFFER_HANDLE;
+	response->handle_out = ret;
+	response->result_return = 0;
+
+	return 0;
+}
+
 static int virtmagma_ioctl_handshake(struct file *filp, void __user *ptr)
 {
 	struct virtmagma_ioctl_args_handshake ioctl_args;
@@ -1313,6 +1520,10 @@
 	case VIRTIO_MAGMA_CMD_EXPORT:
 		ret = virtmagma_command_magma_export(instance, &command);
 		break;
+	case VIRTIO_MAGMA_CMD_GET_BUFFER_HANDLE:
+		ret = virtmagma_command_magma_get_buffer_handle(instance,
+								&command);
+		break;
 	/* pass-through handlers */
 	case VIRTIO_MAGMA_CMD_QUERY2:
 	case VIRTIO_MAGMA_CMD_GET_ERROR:
diff --git a/include/uapi/linux/virtio_magma.h b/include/uapi/linux/virtio_magma.h
index afb67ee..6178400 100644
--- a/include/uapi/linux/virtio_magma.h
+++ b/include/uapi/linux/virtio_magma.h
@@ -78,6 +78,7 @@
 	VIRTIO_MAGMA_CMD_BUFFER_SET_NAME = 0x1040,
 	VIRTIO_MAGMA_CMD_BUFFER_RANGE_OP = 0x1041,
 	VIRTIO_MAGMA_CMD_BUFFER_GET_INFO = 0x1042,
+	VIRTIO_MAGMA_CMD_GET_BUFFER_HANDLE = 0x1043,
 	/* magma success responses
  */
 	VIRTIO_MAGMA_RESP_RELEASE_CONNECTION = 0x2004,
@@ -137,6 +138,7 @@
 	VIRTIO_MAGMA_RESP_BUFFER_SET_NAME = 0x2040,
 	VIRTIO_MAGMA_RESP_BUFFER_RANGE_OP = 0x2041,
 	VIRTIO_MAGMA_RESP_BUFFER_GET_INFO = 0x2042,
+	VIRTIO_MAGMA_RESP_GET_BUFFER_HANDLE = 0x2043,
 	/* magma error responses
  */
 	VIRTIO_MAGMA_RESP_ERR_UNIMPLEMENTED = 0x3001,
@@ -263,6 +265,8 @@
 		case VIRTIO_MAGMA_RESP_BUFFER_RANGE_OP: return "VIRTIO_MAGMA_RESP_BUFFER_RANGE_OP";
 		case VIRTIO_MAGMA_CMD_BUFFER_GET_INFO: return "VIRTIO_MAGMA_CMD_BUFFER_GET_INFO";
 		case VIRTIO_MAGMA_RESP_BUFFER_GET_INFO: return "VIRTIO_MAGMA_RESP_BUFFER_GET_INFO";
+		case VIRTIO_MAGMA_CMD_GET_BUFFER_HANDLE: return "VIRTIO_MAGMA_CMD_GET_BUFFER_HANDLE";
+		case VIRTIO_MAGMA_RESP_GET_BUFFER_HANDLE: return "VIRTIO_MAGMA_RESP_GET_BUFFER_HANDLE";
 		case VIRTIO_MAGMA_RESP_ERR_UNIMPLEMENTED: return "VIRTIO_MAGMA_RESP_ERR_UNIMPLEMENTED";
 		case VIRTIO_MAGMA_RESP_ERR_INTERNAL: return "VIRTIO_MAGMA_RESP_ERR_INTERNAL";
 		case VIRTIO_MAGMA_RESP_ERR_HOST_DISCONNECTED: return "VIRTIO_MAGMA_RESP_ERR_HOST_DISCONNECTED";
@@ -332,6 +336,7 @@
 		case VIRTIO_MAGMA_CMD_BUFFER_SET_NAME: return VIRTIO_MAGMA_RESP_BUFFER_SET_NAME;
 		case VIRTIO_MAGMA_CMD_BUFFER_RANGE_OP: return VIRTIO_MAGMA_RESP_BUFFER_RANGE_OP;
 		case VIRTIO_MAGMA_CMD_BUFFER_GET_INFO: return VIRTIO_MAGMA_RESP_BUFFER_GET_INFO;
+		case VIRTIO_MAGMA_CMD_GET_BUFFER_HANDLE: return VIRTIO_MAGMA_RESP_GET_BUFFER_HANDLE;
 		default: return VIRTIO_MAGMA_RESP_ERR_INVALID_COMMAND;
 	}
 }
@@ -998,5 +1003,17 @@
 	__le64 result_return;
 } __attribute((packed));
 
+struct virtio_magma_get_buffer_handle_ctrl {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 connection;
+	__le64 buffer;
+} __attribute((packed));
+
+struct virtio_magma_get_buffer_handle_resp {
+	struct virtio_magma_ctrl_hdr hdr;
+	__le64 handle_out;
+	__le64 result_return;
+} __attribute((packed));
+
 #endif /* _LINUX_VIRTIO_MAGMA_H
  */