| // Copyright 2019 The Fuchsia Authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include <linux/anon_inodes.h> |
| #include <linux/cdev.h> |
| #include <linux/compat.h> |
| #include <linux/hashtable.h> |
| #include <linux/module.h> |
| #include <linux/uaccess.h> |
| #include <linux/virtio.h> |
| #include <linux/virtio_magma.h> |
| #include <linux/vmalloc.h> |
| |
| #define VQ_DESCRIPTOR_SIZE PAGE_SIZE |
| #define CONNECTIONS_HASHTABLE_BITS 4 |
| #define CONNECTION_OBJECTS_HASHTABLE_BITS 12 |
| #define NOTIFICATION_MAX_BYTES 65536 |
| #define COMMAND_OK(command, request_ptr, response_ptr) \ |
| ((command)->request_size >= sizeof(*(request_ptr)) && \ |
| (command)->response_size >= sizeof(*(response_ptr))) |
| #define WAYLAND_DEVICE_PATH "/dev/wl0" |
| #define MESSAGE_CACHE_OBJECT_SIZE 64 |
| |
| struct virtmagma_info { |
| dev_t dev_num; |
| struct device *dev; |
| struct class *class; |
| struct cdev cdev; |
| |
| struct mutex vq_out_lock; |
| struct virtqueue *vq_out; |
| struct work_struct out_vq_work; |
| wait_queue_head_t out_waitq; |
| |
| struct mutex instances_lock; |
| struct idr instances; |
| }; |
| |
| enum virtmagma_connection_object_type { |
| MAGMA_BUFFER, |
| MAGMA_SEMAPHORE, |
| MAGMA_CONTEXT |
| }; |
| |
| static const char *virtmagma_connection_object_type_string( |
| enum virtmagma_connection_object_type type) |
| { |
| switch (type) { |
| case MAGMA_BUFFER: |
| return "MAGMA_BUFFER"; |
| case MAGMA_SEMAPHORE: |
| return "MAGMA_SEMAPHORE"; |
| case MAGMA_CONTEXT: |
| return "MAGMA_CONTEXT"; |
| default: |
| return "[UNKNOWN]"; |
| } |
| } |
| |
| struct virtmagma_buffer { |
| size_t size_requested; |
| size_t size_allocated; |
| }; |
| |
| struct virtmagma_semaphore { |
| uint8_t dummy; |
| }; |
| |
| struct virtmagma_context { |
| uint8_t dummy; |
| }; |
| |
| struct virtmagma_connection; |
| struct virtmagma_connection_object { |
| struct virtmagma_connection *parent_connection; |
| enum virtmagma_connection_object_type type; |
| uint64_t host_value; |
| union { |
| struct virtmagma_buffer buffer; |
| struct virtmagma_semaphore semaphore; |
| struct virtmagma_context context; |
| }; |
| struct hlist_node node; |
| }; |
| |
| struct virtmagma_connection { |
| struct virtmagma_instance *parent_instance; |
| uint64_t host_value; |
| DECLARE_HASHTABLE(objects, CONNECTION_OBJECTS_HASHTABLE_BITS); |
| struct hlist_node node; |
| }; |
| |
| struct virtmagma_instance { |
| struct virtmagma_info *vi; |
| int id; |
| DECLARE_HASHTABLE(connections, CONNECTIONS_HASHTABLE_BITS); |
| struct { |
| pid_t pid; |
| pid_t tgid; |
| char comm[TASK_COMM_LEN]; |
| } creator; |
| struct kmem_cache *msg_cache; |
| void *wayland_device_private_data; |
| }; |
| |
| struct virtmagma_virtio_command { |
| void *request_ptr; |
| size_t request_size; |
| void *response_ptr; |
| size_t response_size; |
| }; |
| |
| struct virtmagma_buffer_fd_priv { |
| struct virtmagma_info *vi; |
| struct mutex mutex_lock; |
| u32 buffer_handle; |
| u64 buffer_size; |
| u64 phys_addr; |
| }; |
| |
| static void virtmagma_cache_ctor(void *p) |
| { |
| memset(p, 0, MESSAGE_CACHE_OBJECT_SIZE); |
| } |
| |
| static int vq_out_send_sync(struct virtmagma_info *vi, |
| struct virtmagma_virtio_command *command) |
| { |
| int ret; |
| DECLARE_COMPLETION_ONSTACK(finish_completion); |
| struct scatterlist sg_out; |
| struct scatterlist sg_in; |
| struct scatterlist *sgs[] = { &sg_out, &sg_in }; |
| init_completion(&finish_completion); |
| sg_init_one(&sg_out, command->request_ptr, command->request_size); |
| sg_init_one(&sg_in, command->response_ptr, command->response_size); |
| |
| mutex_lock(&vi->vq_out_lock); |
| while ((ret = virtqueue_add_sgs(vi->vq_out, sgs, 1, 1, |
| &finish_completion, GFP_KERNEL)) == |
| -ENOSPC) { |
| mutex_unlock(&vi->vq_out_lock); |
| if (!wait_event_timeout(vi->out_waitq, vi->vq_out->num_free > 0, |
| HZ)) |
| return -EBUSY; |
| mutex_lock(&vi->vq_out_lock); |
| } |
| if (!ret) |
| virtqueue_kick(vi->vq_out); |
| mutex_unlock(&vi->vq_out_lock); |
| |
| wait_for_completion(&finish_completion); |
| |
| return ret; |
| } |
| |
| /* Verify that a virtio command's response matches its expected response. |
| Note that a match indicates only that the proxying of the magma command |
| has succeeded, not necessarily that the magma command itself did. */ |
| static int virtmagma_check_expected_response_type(void *request, void *response) |
| { |
| struct virtio_magma_ctrl_hdr *request_hdr = request; |
| struct virtio_magma_ctrl_hdr *response_hdr = response; |
| if (virtio_magma_expected_response_type(request_hdr->type) != |
| response_hdr->type) { |
| pr_warn("virtmagma: unexpected virtio response %s (%d) to request %s (%d)", |
| virtio_magma_ctrl_type_string(response_hdr->type), |
| response_hdr->type, |
| virtio_magma_ctrl_type_string(request_hdr->type), |
| request_hdr->type); |
| return -EIO; |
| } |
| return 0; |
| } |
| |
| static struct virtmagma_connection * |
| get_connection(struct virtmagma_instance *instance, uint64_t id) |
| { |
| struct virtmagma_connection *connection = NULL; |
| hash_for_each_possible (instance->connections, connection, node, id) { |
| if (connection->host_value == id) |
| break; |
| } |
| if (!connection) { |
| pr_warn("virtmagma: invalid connection id %lld", id); |
| } |
| return connection; |
| } |
| |
| static struct virtmagma_connection_object * |
| get_connection_object(struct virtmagma_connection *connection, |
| uint64_t id, |
| enum virtmagma_connection_object_type type) |
| { |
| struct virtmagma_connection_object *object = NULL; |
| hash_for_each_possible (connection->objects, object, node, id) { |
| if (object->type == type && object->host_value == id) |
| break; |
| } |
| if (!object) { |
| pr_warn("virtmagma: invalid %s object id %lld", |
| virtmagma_connection_object_type_string(type), id); |
| } |
| return object; |
| } |
| |
| static int control_type(void *p) |
| { |
| return ((struct virtio_magma_ctrl_hdr *)p)->type; |
| } |
| |
| static int release_buffer(struct virtmagma_buffer *buffer) |
| { |
| int ret; |
| struct virtio_magma_release_buffer_ctrl *request; |
| struct virtio_magma_release_buffer_resp *response; |
| struct virtmagma_virtio_command command; |
| struct virtmagma_connection_object *object = container_of( |
| buffer, struct virtmagma_connection_object, buffer); |
| BUILD_BUG_ON(sizeof(*request) > MESSAGE_CACHE_OBJECT_SIZE); |
| BUILD_BUG_ON(sizeof(*response) > MESSAGE_CACHE_OBJECT_SIZE); |
| |
| request = kmem_cache_alloc( |
| object->parent_connection->parent_instance->msg_cache, |
| GFP_KERNEL); |
| if (!request) |
| return -ENOMEM; |
| |
| response = kmem_cache_alloc( |
| object->parent_connection->parent_instance->msg_cache, |
| GFP_KERNEL); |
| if (!response) { |
| ret = -ENOMEM; |
| goto free_request; |
| } |
| |
| request->hdr.type = VIRTIO_MAGMA_CMD_RELEASE_BUFFER; |
| request->connection = object->parent_connection->host_value; |
| request->buffer = object->host_value; |
| |
| command.request_ptr = request; |
| command.request_size = sizeof(*request); |
| command.response_ptr = response; |
| command.response_size = sizeof(*response); |
| |
| ret = vq_out_send_sync(object->parent_connection->parent_instance->vi, |
| &command); |
| if (ret) |
| goto free_response; |
| |
| if (response->hdr.type != VIRTIO_MAGMA_RESP_RELEASE_BUFFER) |
| ret = -EIO; |
| |
| free_response: |
| kmem_cache_free(object->parent_connection->parent_instance->msg_cache, |
| response); |
| |
| free_request: |
| kmem_cache_free(object->parent_connection->parent_instance->msg_cache, |
| request); |
| |
| return ret; |
| } |
| |
| static int release_semaphore(struct virtmagma_semaphore *semaphore) |
| { |
| int ret; |
| struct virtio_magma_release_semaphore_ctrl *request; |
| struct virtio_magma_release_semaphore_resp *response; |
| struct virtmagma_virtio_command command; |
| struct virtmagma_connection_object *object = container_of( |
| semaphore, struct virtmagma_connection_object, semaphore); |
| BUILD_BUG_ON(sizeof(*request) > MESSAGE_CACHE_OBJECT_SIZE); |
| BUILD_BUG_ON(sizeof(*response) > MESSAGE_CACHE_OBJECT_SIZE); |
| |
| request = kmem_cache_alloc( |
| object->parent_connection->parent_instance->msg_cache, |
| GFP_KERNEL); |
| if (!request) |
| return -ENOMEM; |
| |
| response = kmem_cache_alloc( |
| object->parent_connection->parent_instance->msg_cache, |
| GFP_KERNEL); |
| if (!response) { |
| ret = -ENOMEM; |
| goto free_request; |
| } |
| |
| request->hdr.type = VIRTIO_MAGMA_CMD_RELEASE_SEMAPHORE; |
| request->connection = object->parent_connection->host_value; |
| request->semaphore = object->host_value; |
| |
| command.request_ptr = request; |
| command.request_size = sizeof(*request); |
| command.response_ptr = response; |
| command.response_size = sizeof(*response); |
| |
| ret = vq_out_send_sync(object->parent_connection->parent_instance->vi, |
| &command); |
| if (ret) |
| goto free_response; |
| |
| if (response->hdr.type != VIRTIO_MAGMA_RESP_RELEASE_SEMAPHORE) |
| ret = -EIO; |
| |
| free_response: |
| kmem_cache_free(object->parent_connection->parent_instance->msg_cache, |
| response); |
| |
| free_request: |
| kmem_cache_free(object->parent_connection->parent_instance->msg_cache, |
| request); |
| |
| return ret; |
| } |
| |
| static int release_context(struct virtmagma_context *context) |
| { |
| int ret; |
| struct virtio_magma_release_context_ctrl *request; |
| struct virtio_magma_release_context_resp *response; |
| struct virtmagma_virtio_command command; |
| struct virtmagma_connection_object *object = container_of( |
| context, struct virtmagma_connection_object, context); |
| BUILD_BUG_ON(sizeof(*request) > MESSAGE_CACHE_OBJECT_SIZE); |
| BUILD_BUG_ON(sizeof(*response) > MESSAGE_CACHE_OBJECT_SIZE); |
| |
| request = kmem_cache_alloc( |
| object->parent_connection->parent_instance->msg_cache, |
| GFP_KERNEL); |
| if (!request) |
| return -ENOMEM; |
| |
| response = kmem_cache_alloc( |
| object->parent_connection->parent_instance->msg_cache, |
| GFP_KERNEL); |
| if (!response) { |
| ret = -ENOMEM; |
| goto free_request; |
| } |
| |
| request->hdr.type = VIRTIO_MAGMA_CMD_RELEASE_CONTEXT; |
| request->connection = object->parent_connection->host_value; |
| request->context_id = object->host_value; |
| |
| command.request_ptr = request; |
| command.request_size = sizeof(*request); |
| command.response_ptr = response; |
| command.response_size = sizeof(*response); |
| |
| ret = vq_out_send_sync(object->parent_connection->parent_instance->vi, |
| &command); |
| if (ret) |
| goto free_response; |
| |
| if (response->hdr.type != VIRTIO_MAGMA_RESP_RELEASE_CONTEXT) |
| ret = -EIO; |
| |
| free_response: |
| kmem_cache_free(object->parent_connection->parent_instance->msg_cache, |
| response); |
| |
| free_request: |
| kmem_cache_free(object->parent_connection->parent_instance->msg_cache, |
| request); |
| |
| return ret; |
| } |
| |
| static int release_connection(struct virtmagma_connection *connection) |
| { |
| int ret; |
| int bkt; |
| struct virtmagma_connection_object *object; |
| struct virtio_magma_release_connection_ctrl *request; |
| struct virtio_magma_release_connection_resp *response; |
| struct virtmagma_virtio_command command; |
| uint64_t leaked_buffers = 0; |
| uint64_t leaked_semaphores = 0; |
| uint64_t leaked_contexts = 0; |
| |
| /* first, release any child objects */ |
| |
| hash_for_each (connection->objects, bkt, object, node) { |
| switch (object->type) { |
| case MAGMA_BUFFER: |
| release_buffer(&object->buffer); |
| ++leaked_buffers; |
| break; |
| case MAGMA_CONTEXT: |
| release_context(&object->context); |
| ++leaked_contexts; |
| break; |
| case MAGMA_SEMAPHORE: |
| release_semaphore(&object->semaphore); |
| ++leaked_semaphores; |
| break; |
| default: |
| pr_err("virtmagma: unknown connection object (%d)", |
| object->type); |
| break; |
| } |
| } |
| if (leaked_buffers || leaked_semaphores || leaked_contexts) { |
| pr_info("virtmagma: connection %lld from command %s closed with leaked objects:\n", |
| connection->host_value, |
| connection->parent_instance->creator.comm); |
| pr_cont("virtmagma: buffers: %lld\n", leaked_buffers); |
| pr_cont("virtmagma: semaphores: %lld\n", leaked_semaphores); |
| pr_cont("virtmagma: contexts: %lld\n", leaked_contexts); |
| } |
| |
| /* now release the connection */ |
| |
| BUILD_BUG_ON(sizeof(*request) > MESSAGE_CACHE_OBJECT_SIZE); |
| BUILD_BUG_ON(sizeof(*response) > MESSAGE_CACHE_OBJECT_SIZE); |
| |
| request = kmem_cache_alloc(connection->parent_instance->msg_cache, |
| GFP_KERNEL); |
| if (!request) |
| return -ENOMEM; |
| |
| response = kmem_cache_alloc(connection->parent_instance->msg_cache, |
| GFP_KERNEL); |
| if (!response) { |
| ret = -ENOMEM; |
| goto free_request; |
| } |
| |
| request->hdr.type = VIRTIO_MAGMA_CMD_RELEASE_CONNECTION; |
| request->connection = connection->host_value; |
| |
| command.request_ptr = request; |
| command.request_size = sizeof(*request); |
| command.response_ptr = response; |
| command.response_size = sizeof(*response); |
| |
| ret = vq_out_send_sync(connection->parent_instance->vi, &command); |
| if (ret) |
| goto free_response; |
| |
| if (response->hdr.type != VIRTIO_MAGMA_RESP_RELEASE_CONNECTION) |
| ret = -EIO; |
| |
| free_response: |
| kmem_cache_free(connection->parent_instance->msg_cache, response); |
| |
| free_request: |
| kmem_cache_free(connection->parent_instance->msg_cache, request); |
| |
| return ret; |
| } |
| |
| static int destroy_connection(struct virtmagma_connection *connection) |
| { |
| int bkt; |
| struct virtmagma_connection_object *object; |
| hash_for_each (connection->objects, bkt, object, node) { |
| hash_del(&object->node); |
| kfree(object); |
| } |
| hash_del(&connection->node); |
| kfree(connection); |
| return 0; |
| } |
| |
| static int destroy_instance(int id, void *p, void *data) |
| { |
| struct virtmagma_instance *instance = p; |
| struct virtmagma_connection *connection; |
| int bkt; |
| uint64_t leaked_connections = 0; |
| |
| instance = p; |
| |
| hash_for_each (instance->connections, bkt, connection, node) { |
| ++leaked_connections; |
| } |
| if (leaked_connections) { |
| pr_info("virtmagma: command %s exited with %lld leaked connections", |
| instance->creator.comm, leaked_connections); |
| } |
| hash_for_each (instance->connections, bkt, connection, node) { |
| release_connection(connection); |
| destroy_connection(connection); |
| } |
| |
| kmem_cache_destroy(instance->msg_cache); |
| |
| kfree(instance); |
| return 0; |
| } |
| |
| static int virtmagma_buffer_fd_unmap(struct virtmagma_buffer_fd_priv *priv) |
| { |
| struct virtio_magma_internal_unmap2_ctrl *request = NULL; |
| struct virtio_magma_internal_unmap2_resp *response = NULL; |
| int ret; |
| |
| if (!priv->phys_addr) |
| return -EINVAL; |
| |
| request = kzalloc(sizeof(*request), GFP_KERNEL); |
| if (!request) |
| return -ENOMEM; |
| |
| response = kzalloc(sizeof(*response), GFP_KERNEL); |
| if (!response) { |
| kfree(request); |
| return -ENOMEM; |
| } |
| |
| { |
| struct virtmagma_virtio_command command = { |
| .request_ptr = request, |
| .request_size = sizeof(*request), |
| .response_ptr = response, |
| .response_size = sizeof(*response) |
| }; |
| |
| request->hdr.type = VIRTIO_MAGMA_CMD_INTERNAL_UNMAP2; |
| request->hdr.flags = 0; |
| request->buffer = priv->buffer_handle; |
| request->address = priv->phys_addr; |
| |
| ret = vq_out_send_sync(priv->vi, &command); |
| if (ret == 0) { |
| virtmagma_check_expected_response_type(request, |
| response); |
| } |
| } |
| |
| kfree(request); |
| kfree(response); |
| |
| return 0; |
| } |
| |
| static int virtmagma_release_handle(struct virtmagma_info *vi, |
| uint32_t handle) |
| { |
| struct virtio_magma_internal_release_handle_ctrl *request; |
| struct virtio_magma_internal_release_handle_resp *response; |
| int ret; |
| |
| request = kzalloc(sizeof(*request), GFP_KERNEL); |
| if (!request) |
| return -ENOMEM; |
| |
| response = kzalloc(sizeof(*response), GFP_KERNEL); |
| if (!response) { |
| kfree(request); |
| return -ENOMEM; |
| } |
| |
| request->hdr.type = VIRTIO_MAGMA_CMD_INTERNAL_RELEASE_HANDLE; |
| request->hdr.flags = 0; |
| request->handle = handle; |
| |
| { |
| struct virtmagma_virtio_command command = { |
| .request_ptr = request, |
| .request_size = sizeof(*request), |
| .response_ptr = response, |
| .response_size = sizeof(*response) |
| }; |
| |
| ret = vq_out_send_sync(vi, &command); |
| } |
| |
| if (ret == 0) { |
| ret = virtmagma_check_expected_response_type(request, response); |
| } |
| |
| kfree(request); |
| kfree(response); |
| |
| return ret; |
| } |
| |
| static int virtmagma_buffer_fd_release(struct inode *inodep, struct file *filp) |
| { |
| struct virtmagma_buffer_fd_priv *priv = filp->private_data; |
| |
| virtmagma_buffer_fd_unmap(priv); |
| virtmagma_release_handle(priv->vi, priv->buffer_handle); |
| |
| mutex_destroy(&priv->mutex_lock); |
| kfree(priv); |
| |
| return 0; |
| } |
| |
| // Ensure the entire buffer is mapped in the host device; then |
| // we create any number of whole or partial mappings in the guest |
| // that point into the host mapped region. |
| static int virtmagma_buffer_fd_mmap(struct file *filp, |
| struct vm_area_struct *vma) |
| { |
| struct virtmagma_buffer_fd_priv *priv = filp->private_data; |
| struct virtio_magma_internal_map2_ctrl *request; |
| struct virtio_magma_internal_map2_resp *response; |
| unsigned long vm_size = vma->vm_end - vma->vm_start; |
| size_t max_map_size; |
| int ret = 0; |
| |
| request = kzalloc(sizeof(*request), GFP_KERNEL); |
| if (!request) |
| return -ENOMEM; |
| |
| response = kzalloc(sizeof(*response), GFP_KERNEL); |
| if (!response) { |
| kfree(request); |
| return -ENOMEM; |
| } |
| |
| request->hdr.type = VIRTIO_MAGMA_CMD_INTERNAL_MAP2; |
| request->hdr.flags = 0; |
| request->buffer = priv->buffer_handle; |
| request->length = priv->buffer_size; |
| |
| mutex_lock(&priv->mutex_lock); |
| if (!priv->phys_addr) { |
| struct virtmagma_virtio_command command = { |
| .request_ptr = request, |
| .request_size = sizeof(*request), |
| .response_ptr = response, |
| .response_size = sizeof(*response) |
| }; |
| |
| ret = vq_out_send_sync(priv->vi, &command); |
| if (ret == 0) { |
| ret = virtmagma_check_expected_response_type(request, |
| response); |
| } |
| |
| if (ret == 0) { |
| priv->phys_addr = response->address_out; |
| } |
| } |
| mutex_unlock(&priv->mutex_lock); |
| |
| kfree(request); |
| kfree(response); |
| |
| if (ret) |
| return ret; |
| |
| max_map_size = PAGE_ALIGN(priv->buffer_size); |
| |
| if (vma->vm_pgoff * PAGE_SIZE + vm_size > max_map_size) { |
| pr_warn("virtmagma: user tried to mmap with offset (%ld) and size (%ld) exceeding the buffer's size (%ld)", |
| vma->vm_pgoff * PAGE_SIZE, vm_size, max_map_size); |
| return -EINVAL; |
| } |
| |
| ret = io_remap_pfn_range(vma, vma->vm_start, |
| priv->phys_addr / PAGE_SIZE + vma->vm_pgoff, |
| vm_size, vma->vm_page_prot); |
| if (ret) |
| return ret; |
| |
| vma->vm_flags |= VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP; |
| vma->vm_private_data = priv; |
| |
| return 0; |
| } |
| |
| static const struct file_operations virtmagma_buffer_fd_fops = { |
| .mmap = virtmagma_buffer_fd_mmap, |
| .release = virtmagma_buffer_fd_release, |
| }; |
| |
| static int create_instance(struct virtmagma_info *vi, |
| struct virtmagma_instance **instance_out) |
| { |
| int ret; |
| struct file *filp; |
| struct virtmagma_instance *instance; |
| |
| *instance_out = NULL; |
| |
| instance = kzalloc(sizeof(*instance), GFP_KERNEL); |
| if (!instance) |
| return -ENOMEM; |
| instance->vi = vi; |
| |
| mutex_lock(&vi->instances_lock); |
| ret = idr_alloc(&vi->instances, instance, 1, -1, GFP_KERNEL); |
| mutex_unlock(&vi->instances_lock); |
| if (ret <= 0) { |
| ret = -ENOMEM; |
| goto free_instance; |
| } |
| instance->id = ret; |
| |
| hash_init(instance->connections); |
| instance->creator.pid = current->pid; |
| instance->creator.tgid = current->tgid; |
| memcpy(instance->creator.comm, current->comm, |
| sizeof(instance->creator.comm)); |
| instance->creator.comm[sizeof(instance->creator.comm) - 1] = 0; |
| |
| filp = filp_open(WAYLAND_DEVICE_PATH, O_RDWR, 0); |
| if (IS_ERR_OR_NULL(filp)) { |
| pr_warn("virtmagma: failed to open wayland device at %s\n", |
| WAYLAND_DEVICE_PATH); |
| pr_cont("virtmagma: magma_export will not be available\n"); |
| } else { |
| instance->wayland_device_private_data = filp->private_data; |
| filp_close(filp, 0); |
| } |
| |
| instance->msg_cache = |
| kmem_cache_create("virtmagma_cache", MESSAGE_CACHE_OBJECT_SIZE, |
| MESSAGE_CACHE_OBJECT_SIZE, 0, |
| virtmagma_cache_ctor); |
| if (!instance->msg_cache) { |
| pr_err("virtmagma: failed to create message cache"); |
| return -ENOMEM; |
| } |
| |
| *instance_out = instance; |
| |
| return 0; |
| |
| free_instance: |
| kfree(instance); |
| |
| return ret; |
| } |
| |
| static int virtmagma_command_magma_create_connection2( |
| struct virtmagma_instance *instance, |
| struct virtmagma_virtio_command *command) |
| { |
| int ret; |
| struct virtmagma_connection *connection; |
| struct virtio_magma_create_connection2_ctrl *request = |
| command->request_ptr; |
| struct virtio_magma_create_connection2_resp *response = |
| command->response_ptr; |
| |
| if (!COMMAND_OK(command, request, response)) |
| return -EINVAL; |
| |
| ret = vq_out_send_sync(instance->vi, command); |
| if (ret) |
| return ret; |
| |
| ret = virtmagma_check_expected_response_type(request, response); |
| if (ret) |
| return ret; |
| |
| /* pass on magma errors without creating a connection object */ |
| if (response->result_return) { |
| pr_warn("virtmagma: magma_create_connection returned %d", |
| (int32_t)response->result_return); |
| return 0; /* the ioctl is still successful */ |
| } |
| |
| connection = kzalloc(sizeof(*connection), GFP_KERNEL); |
| if (!connection) |
| return -ENOMEM; |
| |
| connection->parent_instance = instance; |
| connection->host_value = response->connection_out; |
| hash_init(connection->objects); |
| |
| hash_add(instance->connections, &connection->node, |
| connection->host_value); |
| |
| return 0; |
| } |
| |
| static int virtmagma_command_magma_release_connection( |
| struct virtmagma_instance *instance, |
| struct virtmagma_virtio_command *command) |
| { |
| int ret; |
| struct virtmagma_connection *connection = NULL; |
| struct virtio_magma_release_connection_ctrl *request = |
| command->request_ptr; |
| struct virtio_magma_release_connection_resp *response = |
| command->response_ptr; |
| |
| if (!COMMAND_OK(command, request, response)) |
| return -EINVAL; |
| |
| connection = get_connection(instance, request->connection); |
| if (!connection) |
| return -EINVAL; |
| |
| ret = release_connection(connection); |
| if (ret) |
| return ret; |
| |
| return destroy_connection(connection); |
| } |
| |
| static int virtmagma_command_magma_create_context( |
| struct virtmagma_instance *instance, |
| struct virtmagma_virtio_command *command) |
| { |
| int ret; |
| struct virtmagma_connection *connection; |
| struct virtmagma_connection_object *object; |
| struct virtio_magma_create_context_ctrl *request = command->request_ptr; |
| struct virtio_magma_create_context_resp *response = |
| command->response_ptr; |
| |
| if (!COMMAND_OK(command, request, response)) |
| return -EINVAL; |
| |
| connection = get_connection(instance, request->connection); |
| if (!connection) |
| return -EINVAL; |
| |
| ret = vq_out_send_sync(instance->vi, command); |
| if (ret) |
| return ret; |
| |
| ret = virtmagma_check_expected_response_type(request, response); |
| if (ret) |
| return ret; |
| |
| /* magma_create_context does not return errors */ |
| |
| object = kzalloc(sizeof(*object), GFP_KERNEL); |
| if (!object) |
| return -ENOMEM; |
| |
| object->parent_connection = connection; |
| object->host_value = response->context_id_out; |
| object->type = MAGMA_CONTEXT; |
| |
| hash_add(connection->objects, &object->node, object->host_value); |
| |
| return 0; |
| } |
| |
| static int virtmagma_command_magma_release_context( |
| struct virtmagma_instance *instance, |
| struct virtmagma_virtio_command *command) |
| { |
| int ret; |
| struct virtmagma_connection *connection = NULL; |
| struct virtmagma_connection_object *object = NULL; |
| struct virtio_magma_release_context_ctrl *request = |
| command->request_ptr; |
| struct virtio_magma_release_context_resp *response = |
| command->response_ptr; |
| |
| if (!COMMAND_OK(command, request, response)) |
| return -EINVAL; |
| |
| connection = get_connection(instance, request->connection); |
| if (!connection) |
| return -EINVAL; |
| |
| object = get_connection_object(connection, request->context_id, |
| MAGMA_CONTEXT); |
| |
| // Send the call even if bad context_id; tests check that magma error |
| // is set. |
| ret = vq_out_send_sync(instance->vi, command); |
| |
| if (object) { |
| hash_del(&object->node); |
| kfree(object); |
| } |
| |
| if (ret) |
| return ret; |
| |
| ret = virtmagma_check_expected_response_type(request, response); |
| if (ret) |
| return ret; |
| |
| return 0; |
| } |
| |
| static int virtmagma_command_magma_create_buffer( |
| struct virtmagma_instance *instance, |
| struct virtmagma_virtio_command *command) |
| { |
| int ret; |
| struct virtmagma_connection *connection; |
| struct virtmagma_connection_object *object; |
| struct virtio_magma_create_buffer_ctrl *request = command->request_ptr; |
| struct virtio_magma_create_buffer_resp *response = |
| command->response_ptr; |
| |
| if (!COMMAND_OK(command, request, response)) |
| return -EINVAL; |
| |
| connection = get_connection(instance, request->connection); |
| if (!connection) |
| return -EINVAL; |
| |
| ret = vq_out_send_sync(instance->vi, command); |
| if (ret) |
| return ret; |
| |
| ret = virtmagma_check_expected_response_type(request, response); |
| if (ret) |
| return ret; |
| |
| /* pass on magma errors without creating a buffer object */ |
| if (response->result_return) { |
| pr_warn("virtmagma: magma_create_buffer returned %d", |
| (int32_t)response->result_return); |
| return 0; /* the ioctl is still successful */ |
| } |
| |
| object = kzalloc(sizeof(*object), GFP_KERNEL); |
| if (!object) |
| return -ENOMEM; |
| |
| object->parent_connection = connection; |
| object->host_value = response->buffer_out; |
| object->type = MAGMA_BUFFER; |
| object->buffer.size_requested = request->size; |
| object->buffer.size_allocated = response->size_out; |
| |
| hash_add(connection->objects, &object->node, object->host_value); |
| |
| return 0; |
| } |
| |
| static int virtmagma_command_magma_release_buffer( |
| struct virtmagma_instance *instance, |
| struct virtmagma_virtio_command *command) |
| { |
| int ret; |
| struct virtmagma_connection *connection = NULL; |
| struct virtmagma_connection_object *object = NULL; |
| struct virtio_magma_release_buffer_ctrl *request = command->request_ptr; |
| struct virtio_magma_release_buffer_resp *response = |
| command->response_ptr; |
| |
| if (!COMMAND_OK(command, request, response)) |
| return -EINVAL; |
| |
| connection = get_connection(instance, request->connection); |
| if (!connection) |
| return -EINVAL; |
| |
| object = get_connection_object(connection, request->buffer, |
| MAGMA_BUFFER); |
| if (!object) |
| return -EINVAL; |
| |
| ret = vq_out_send_sync(instance->vi, command); |
| if (ret) |
| return ret; |
| |
| ret = virtmagma_check_expected_response_type(request, response); |
| if (ret) |
| return ret; |
| |
| hash_del(&object->node); |
| kfree(object); |
| |
| return 0; |
| } |
| |
| static int virtmagma_command_magma_execute_command_buffer_with_resources2( |
| struct virtmagma_instance *instance, |
| struct virtmagma_virtio_command *command) |
| { |
| struct virtio_magma_execute_command_buffer_with_resources2_ctrl *request = |
| command->request_ptr; |
| struct virtio_magma_execute_command_buffer_with_resources2_resp |
| *response = command->response_ptr; |
| struct virtmagma_connection *connection; |
| struct virtmagma_command_buffer virt_command_buffer; |
| char *dst_ptr; |
| int ret; |
| |
| if (!COMMAND_OK(command, request, response)) |
| return -EINVAL; |
| |
| connection = get_connection(instance, request->connection); |
| if (!connection) |
| return -EINVAL; |
| |
| if (!get_connection_object(connection, request->context_id, |
| MAGMA_CONTEXT)) |
| return -EINVAL; |
| |
| /* The virtmagma_comand_buffer includes the resources and semaphore data |
| and their lengths, so we ignore the corresponding members of the request. */ |
| ret = copy_from_user(&virt_command_buffer, |
| (void *)request->command_buffer, |
| sizeof(virt_command_buffer)); |
| if (ret) |
| return ret; |
| |
| /* reallocate request buffer with enough space for the structures */ |
| command->request_size = sizeof(*request) + |
| virt_command_buffer.command_buffer_size + |
| virt_command_buffer.resource_size + |
| virt_command_buffer.semaphore_size; |
| /* memory will be freed by the caller */ |
| dst_ptr = kzalloc(command->request_size, GFP_KERNEL); |
| if (!dst_ptr) |
| return -ENOMEM; |
| |
| memcpy(dst_ptr, request, sizeof(*request)); |
| command->request_ptr = dst_ptr; |
| |
| dst_ptr += sizeof(*request); |
| ret = copy_from_user(dst_ptr, |
| (void *)virt_command_buffer.command_buffer, |
| virt_command_buffer.command_buffer_size); |
| if (ret) |
| return ret; |
| |
| dst_ptr += virt_command_buffer.command_buffer_size; |
| ret = copy_from_user(dst_ptr, (void *)virt_command_buffer.resources, |
| virt_command_buffer.resource_size); |
| if (ret) |
| return ret; |
| |
| dst_ptr += virt_command_buffer.resource_size; |
| ret = copy_from_user(dst_ptr, (void *)virt_command_buffer.semaphores, |
| virt_command_buffer.semaphore_size); |
| if (ret) |
| return ret; |
| |
| ret = vq_out_send_sync(instance->vi, command); |
| if (ret) |
| return ret; |
| |
| ret = virtmagma_check_expected_response_type(request, response); |
| if (ret) |
| return ret; |
| |
| return 0; |
| } |
| |
| static int virtmagma_command_magma_create_semaphore( |
| struct virtmagma_instance *instance, |
| struct virtmagma_virtio_command *command) |
| { |
| int ret; |
| struct virtmagma_connection *connection; |
| struct virtmagma_connection_object *object; |
| struct virtio_magma_create_semaphore_ctrl *request = |
| command->request_ptr; |
| struct virtio_magma_create_semaphore_resp *response = |
| command->response_ptr; |
| |
| if (!COMMAND_OK(command, request, response)) |
| return -EINVAL; |
| |
| connection = get_connection(instance, request->connection); |
| if (!connection) |
| return -EINVAL; |
| |
| ret = vq_out_send_sync(instance->vi, command); |
| if (ret) |
| return ret; |
| |
| ret = virtmagma_check_expected_response_type(request, response); |
| if (ret) |
| return ret; |
| |
| /* pass on magma errors without creating a semaphore object */ |
| if (response->result_return) { |
| pr_warn("virtmagma: magma_create_semaphore returned %d", |
| (int32_t)response->result_return); |
| return 0; /* the ioctl is still successful */ |
| } |
| |
| object = kzalloc(sizeof(*object), GFP_KERNEL); |
| if (!object) |
| return -ENOMEM; |
| |
| object->parent_connection = connection; |
| object->host_value = response->semaphore_out; |
| object->type = MAGMA_SEMAPHORE; |
| |
| hash_add(connection->objects, &object->node, object->host_value); |
| |
| return 0; |
| } |
| |
| static int virtmagma_command_magma_release_semaphore( |
| struct virtmagma_instance *instance, |
| struct virtmagma_virtio_command *command) |
| { |
| int ret; |
| struct virtmagma_connection *connection = NULL; |
| struct virtmagma_connection_object *object = NULL; |
| struct virtio_magma_release_semaphore_ctrl *request = |
| command->request_ptr; |
| struct virtio_magma_release_semaphore_resp *response = |
| command->response_ptr; |
| |
| if (!COMMAND_OK(command, request, response)) |
| return -EINVAL; |
| |
| connection = get_connection(instance, request->connection); |
| if (!connection) |
| return -EINVAL; |
| |
| object = get_connection_object(connection, request->semaphore, |
| MAGMA_SEMAPHORE); |
| if (!object) |
| return -EINVAL; |
| |
| ret = vq_out_send_sync(instance->vi, command); |
| if (ret) |
| return ret; |
| |
| ret = virtmagma_check_expected_response_type(request, response); |
| if (ret) |
| return ret; |
| |
| hash_del(&object->node); |
| kfree(object); |
| |
| return 0; |
| } |
| |
| static int virtmagma_command_magma_import_semaphore( |
| struct virtmagma_instance *instance, |
| struct virtmagma_virtio_command *command) |
| { |
| int ret; |
| struct virtmagma_connection *connection; |
| struct virtmagma_connection_object *object; |
| struct virtio_magma_import_semaphore_ctrl *request = |
| command->request_ptr; |
| struct virtio_magma_import_semaphore_resp *response = |
| command->response_ptr; |
| |
| if (!COMMAND_OK(command, request, response)) |
| return -EINVAL; |
| |
| connection = get_connection(instance, request->connection); |
| if (!connection) |
| return -EINVAL; |
| |
| ret = vq_out_send_sync(instance->vi, command); |
| if (ret) |
| return ret; |
| |
| ret = virtmagma_check_expected_response_type(request, response); |
| if (ret) |
| return ret; |
| |
| /* pass on magma errors without creating a semaphore object */ |
| if (response->result_return) { |
| pr_warn("virtmagma: magma_import_semaphore returned %d", |
| (int32_t)response->result_return); |
| return 0; /* the ioctl is still successful */ |
| } |
| |
| object = kzalloc(sizeof(*object), GFP_KERNEL); |
| if (!object) |
| return -ENOMEM; |
| |
| object->parent_connection = connection; |
| object->host_value = response->semaphore_out; |
| object->type = MAGMA_SEMAPHORE; |
| |
| hash_add(connection->objects, &object->node, object->host_value); |
| |
| return 0; |
| } |
| |
| static int virtmagma_command_magma_export_semaphore( |
| struct virtmagma_instance *instance, |
| struct virtmagma_virtio_command *command) |
| { |
| int ret; |
| struct virtmagma_connection *connection; |
| struct virtio_magma_export_semaphore_ctrl *request = |
| command->request_ptr; |
| struct virtio_magma_export_semaphore_resp *response = |
| command->response_ptr; |
| |
| if (!COMMAND_OK(command, request, response)) |
| return -EINVAL; |
| |
| connection = get_connection(instance, request->connection); |
| if (!connection) |
| return -EINVAL; |
| |
| ret = vq_out_send_sync(instance->vi, command); |
| if (ret) |
| return ret; |
| |
| ret = virtmagma_check_expected_response_type(request, response); |
| if (ret) |
| return ret; |
| |
| /* TODO(fxbug.dev/67565): |
| * Create an anon file here that points to the zircon handle, return |
| * the fd to userspace. For now we return the zircon handle itself. |
| */ |
| |
| return 0; |
| } |
| |
| static int |
| virtmagma_command_magma_poll(struct virtmagma_instance *instance, |
| struct virtmagma_virtio_command *command) |
| { |
| int ret; |
| size_t items_size; |
| struct virtio_magma_poll_ctrl *request = command->request_ptr; |
| struct virtio_magma_poll_resp *response = command->response_ptr; |
| |
| if (!COMMAND_OK(command, request, response)) |
| return -EINVAL; |
| |
| /* reallocate request buffer with enough space for the items */ |
| /* note: request->count is used as the byte count */ |
| items_size = request->count; |
| command->request_size = sizeof(*request) + items_size; |
| command->request_ptr = kzalloc(command->request_size, GFP_KERNEL); |
| if (!command->request_ptr) |
| return -ENOMEM; |
| |
| memcpy(command->request_ptr, request, sizeof(*request)); |
| ret = copy_from_user((char *)command->request_ptr + sizeof(*request), |
| (void *)request->items, items_size); |
| if (ret) |
| return ret; |
| |
| ret = vq_out_send_sync(instance->vi, command); |
| if (ret) |
| return ret; |
| |
| ret = virtmagma_check_expected_response_type(request, response); |
| if (ret) |
| return ret; |
| |
| return copy_to_user((void *)request->items, |
| (char *)command->request_ptr + sizeof(*request), |
| items_size); |
| } |
| |
| static int virtmagma_command_magma_read_notification_channel2( |
| struct virtmagma_instance *instance, |
| struct virtmagma_virtio_command *command) |
| { |
| int ret; |
| struct virtio_magma_read_notification_channel2_ctrl *request = |
| command->request_ptr; |
| struct virtio_magma_read_notification_channel2_resp *response = |
| command->response_ptr; |
| |
| if (!COMMAND_OK(command, request, response)) |
| return -EINVAL; |
| |
| /* reallocate response buffer with additional space for notification data. |
| note that the size is not modified, as we only want the response struct |
| itself to be copied back to the user by our caller */ |
| |
| command->response_ptr = response = |
| kzalloc(sizeof(*response) + NOTIFICATION_MAX_BYTES, GFP_KERNEL); |
| if (!command->response_ptr) |
| return -ENOMEM; |
| |
| ret = vq_out_send_sync(instance->vi, command); |
| if (ret) |
| return ret; |
| |
| ret = virtmagma_check_expected_response_type(request, response); |
| if (ret) |
| return ret; |
| |
| /* pass on magma errors without writing to the buffer */ |
| if (response->result_return) { |
| pr_warn("virtmagma: magma_read_notification_channel2 returned %d", |
| (int32_t)response->result_return); |
| return 0; /* the ioctl is still successful */ |
| } |
| |
| if (response->buffer_size_out > request->buffer_size) { |
| pr_err("virtmagma: magma_read_notification_channel2 returned buffer_size_out (%lu) larger than buffer_size (%lld)", |
| response->buffer_size_out, request->buffer_size); |
| return -EIO; |
| } |
| |
| return copy_to_user((void *)request->buffer, |
| (char *)command->response_ptr + sizeof(*response), |
| response->buffer_size_out); |
| } |
| |
| static int get_buffer_size(struct virtmagma_instance *instance, |
| uintptr_t buffer, uint64_t *size_out) |
| { |
| struct virtio_magma_get_buffer_size_ctrl *request; |
| struct virtio_magma_get_buffer_size_resp *response; |
| int ret; |
| |
| request = kmem_cache_alloc(instance->msg_cache, GFP_KERNEL); |
| if (!request) |
| return -ENOMEM; |
| |
| response = kmem_cache_alloc(instance->msg_cache, GFP_KERNEL); |
| if (!response) { |
| kmem_cache_free(instance->msg_cache, request); |
| return -ENOMEM; |
| } |
| |
| request->hdr.type = VIRTIO_MAGMA_CMD_GET_BUFFER_SIZE; |
| request->hdr.flags = 0; |
| request->buffer = buffer; |
| |
| { |
| struct virtmagma_virtio_command command = { |
| .request_ptr = request, |
| .request_size = sizeof(*request), |
| .response_ptr = response, |
| .response_size = sizeof(*response) |
| }; |
| |
| ret = vq_out_send_sync(instance->vi, &command); |
| } |
| |
| if (ret == 0) { |
| ret = virtmagma_check_expected_response_type(request, response); |
| if (ret == 0) { |
| *size_out = response->result_return; |
| } |
| } |
| |
| kmem_cache_free(instance->msg_cache, request); |
| kmem_cache_free(instance->msg_cache, response); |
| |
| return ret; |
| } |
| |
| static int virtmagma_command_magma_virt_create_image( |
| struct virtmagma_instance *instance, |
| struct virtmagma_virtio_command *command) |
| { |
| struct virtmagma_connection *connection; |
| struct virtio_magma_virt_create_image_ctrl *request = |
| command->request_ptr; |
| struct virtio_magma_virt_create_image_resp *response = |
| command->response_ptr; |
| struct virtmagma_create_image_wrapper wrapper; |
| uint64_t buffer_size; |
| char *ptr; |
| int ret; |
| |
| if (!COMMAND_OK(command, request, response)) |
| return -EINVAL; |
| |
| connection = get_connection(instance, request->connection); |
| if (!connection) |
| return -EINVAL; |
| |
| /* The wrapper struct includes pointers to the actual structs and their sizes. */ |
| ret = copy_from_user(&wrapper, (void *)request->create_info, |
| sizeof(wrapper)); |
| if (ret) |
| return ret; |
| |
| /* reallocate request buffer with enough space for the structures */ |
| command->request_size = sizeof(*request) + wrapper.create_info_size; |
| /* memory will be freed by the caller */ |
| ptr = kzalloc(command->request_size, GFP_KERNEL); |
| if (!ptr) |
| return -ENOMEM; |
| |
| memcpy(ptr, request, sizeof(*request)); |
| command->request_ptr = ptr; |
| |
| /* ptr set to create_info */ |
| ptr += sizeof(*request); |
| ret = copy_from_user(ptr, (void *)wrapper.create_info, |
| wrapper.create_info_size); |
| if (ret) |
| return ret; |
| |
| ret = vq_out_send_sync(instance->vi, command); |
| if (ret) |
| return ret; |
| |
| ret = virtmagma_check_expected_response_type(request, response); |
| if (ret) |
| return ret; |
| |
| /* pass on magma errors without creating buffer */ |
| if (response->result_return) { |
| pr_warn("virtmagma: magma_virt_create_image returned %d", |
| (int32_t)response->result_return); |
| return 0; /* the ioctl is still successful */ |
| } |
| |
| ret = get_buffer_size(instance, response->image_out, &buffer_size); |
| if (ret) |
| return ret; |
| |
| { |
| struct virtmagma_connection_object *object = |
| kzalloc(sizeof(*object), GFP_KERNEL); |
| if (!object) |
| return -ENOMEM; |
| |
| object->parent_connection = connection; |
| object->host_value = response->image_out; |
| object->type = MAGMA_BUFFER; |
| object->buffer.size_requested = buffer_size; |
| object->buffer.size_allocated = buffer_size; |
| |
| hash_add(connection->objects, &object->node, |
| object->host_value); |
| } |
| |
| return 0; |
| } |
| |
| static int virtmagma_command_magma_virt_get_image_info( |
| struct virtmagma_instance *instance, |
| struct virtmagma_virtio_command *command) |
| { |
| struct virtmagma_connection *connection; |
| struct virtio_magma_virt_get_image_info_ctrl *request = |
| command->request_ptr; |
| struct virtio_magma_virt_get_image_info_resp *response = |
| command->response_ptr; |
| struct virtmagma_get_image_info_wrapper wrapper; |
| char *ptr; |
| int ret; |
| |
| if (!COMMAND_OK(command, request, response)) |
| return -EINVAL; |
| |
| connection = get_connection(instance, request->connection); |
| if (!connection) |
| return -EINVAL; |
| |
| /* The wrapper struct includes pointers to the actual structs and their sizes. */ |
| ret = copy_from_user(&wrapper, (void *)request->image_info_out, |
| sizeof(wrapper)); |
| if (ret) |
| return ret; |
| |
| /* reallocate request buffer with enough space for the structures */ |
| command->request_size = sizeof(*request) + wrapper.image_info_size; |
| /* memory will be freed by the caller */ |
| ptr = kzalloc(command->request_size, GFP_KERNEL); |
| if (!ptr) |
| return -ENOMEM; |
| |
| memcpy(ptr, request, sizeof(*request)); |
| command->request_ptr = ptr; |
| |
| ret = vq_out_send_sync(instance->vi, command); |
| if (ret) |
| return ret; |
| |
| ret = virtmagma_check_expected_response_type(request, response); |
| if (ret) |
| return ret; |
| |
| /* pass on magma errors without creating buffer */ |
| if (response->result_return) { |
| pr_warn("virtmagma: magma_virt_get_image_info returned %d", |
| (int32_t)response->result_return); |
| return 0; /* the ioctl is still successful */ |
| } |
| |
| ptr += sizeof(*request); |
| ret = copy_to_user((void *)wrapper.image_info_out, ptr, |
| wrapper.image_info_size); |
| if (ret) |
| return ret; |
| |
| return 0; |
| } |
| |
| #if IS_ENABLED(CONFIG_VIRTIO_WL) |
| /* use the implementation in the virtio_wl module */ |
| extern int virtwl_create_fd_for_vfd(void *filp_private_data, uint32_t vfd_id); |
| extern int virtwl_get_vfd_from_fd(void *filp_private_data, int fd, |
| uint32_t *vfd_id); |
| #else |
| #define virtwl_create_fd_for_vfd(a, b) (-ENODEV) |
| #define virtwl_get_vfd_from_fd(a, b, c) (-ENODEV) |
| #endif |
| |
| static int |
| virtmagma_command_magma_export(struct virtmagma_instance *instance, |
| struct virtmagma_virtio_command *command) |
| { |
| int ret; |
| struct virtio_magma_export_ctrl *request = command->request_ptr; |
| struct virtio_magma_export_resp *response = command->response_ptr; |
| |
| if (!instance->wayland_device_private_data) |
| return -ENODEV; |
| |
| if (!COMMAND_OK(command, request, response)) |
| return -EINVAL; |
| |
| ret = vq_out_send_sync(instance->vi, command); |
| if (ret) |
| return ret; |
| |
| ret = virtmagma_check_expected_response_type(request, response); |
| if (ret) |
| return ret; |
| |
| /* pass on magma errors without creating a vfd */ |
| if (response->result_return) { |
| pr_warn("virtmagma: magma_export returned %d", |
| (int32_t)response->result_return); |
| return 0; /* the ioctl is still successful */ |
| } |
| |
| ret = virtwl_create_fd_for_vfd(instance->wayland_device_private_data, |
| response->buffer_handle_out); |
| if (ret < 0) { |
| pr_err("virtmagma: failed to get vfd creation info for vfd id %lu", |
| response->buffer_handle_out); |
| return ret; |
| } |
| |
| response->buffer_handle_out = ret; |
| |
| return 0; |
| } |
| |
| static int |
| virtmagma_command_magma_import(struct virtmagma_instance *instance, |
| struct virtmagma_virtio_command *command) |
| { |
| int ret; |
| struct virtmagma_connection *connection; |
| struct virtio_magma_import_ctrl *request = command->request_ptr; |
| struct virtio_magma_import_resp *response = command->response_ptr; |
| uint32_t vfd; |
| uint64_t buffer_size; |
| |
| if (!instance->wayland_device_private_data) |
| return -ENODEV; |
| |
| if (!COMMAND_OK(command, request, response)) |
| return -EINVAL; |
| |
| connection = get_connection(instance, request->connection); |
| if (!connection) |
| return -EINVAL; |
| |
| { |
| int fd = request->buffer_handle; |
| ret = virtwl_get_vfd_from_fd( |
| instance->wayland_device_private_data, fd, &vfd); |
| if (ret < 0) { |
| pr_err("virtmagma: failed to get vfd from fd %u", fd); |
| return ret; |
| } |
| } |
| |
| request->buffer_handle = vfd; |
| |
| ret = vq_out_send_sync(instance->vi, command); |
| if (ret) |
| return ret; |
| |
| ret = virtmagma_check_expected_response_type(request, response); |
| if (ret) |
| return ret; |
| |
| /* pass on magma errors without creating a vfd */ |
| if (response->result_return) { |
| pr_warn("virtmagma: magma_import returned %d", |
| (int32_t)response->result_return); |
| return 0; /* the ioctl is still successful */ |
| } |
| |
| ret = get_buffer_size(instance, response->buffer_out, &buffer_size); |
| if (ret) |
| return ret; |
| |
| { |
| struct virtmagma_connection_object *object = |
| kzalloc(sizeof(*object), GFP_KERNEL); |
| if (!object) |
| return -ENOMEM; |
| |
| object->parent_connection = connection; |
| object->host_value = response->buffer_out; |
| object->type = MAGMA_BUFFER; |
| object->buffer.size_requested = buffer_size; |
| object->buffer.size_allocated = buffer_size; |
| |
| hash_add(connection->objects, &object->node, |
| object->host_value); |
| } |
| |
| return 0; |
| } |
| |
| // Assumes that command->response_ptr is large enough for the response struct + |
| // uint64_t for the buffer size. |
| static int virtmagma_command_magma_get_buffer_handle2_core( |
| struct virtmagma_instance *instance, |
| struct virtmagma_virtio_command *command) |
| { |
| struct virtmagma_buffer_fd_priv *priv; |
| struct virtio_magma_get_buffer_handle2_ctrl *request = |
| command->request_ptr; |
| struct virtio_magma_get_buffer_handle2_resp *response = |
| command->response_ptr; |
| uint32_t handle; |
| int ret; |
| |
| if (!COMMAND_OK(command, request, response)) |
| return -EINVAL; |
| |
| ret = vq_out_send_sync(instance->vi, command); |
| if (ret) |
| return ret; |
| |
| ret = virtmagma_check_expected_response_type(request, response); |
| if (ret) |
| return ret; |
| |
| if (response->result_return) { |
| pr_warn("virtmagma: magma_get_buffer_handle2 returned %d", |
| (int32_t)response->result_return); |
| return 0; /* the ioctl is still successful */ |
| } |
| |
| handle = response->handle_out; |
| |
| priv = kzalloc(sizeof(struct virtmagma_buffer_fd_priv), GFP_KERNEL); |
| if (!priv) { |
| virtmagma_release_handle(instance->vi, handle); |
| return -ENOMEM; |
| } |
| |
| mutex_init(&priv->mutex_lock); |
| |
| priv->vi = instance->vi; |
| priv->buffer_handle = handle; |
| priv->buffer_size = *(uint64_t*)(response + 1); |
| |
| ret = anon_inode_getfd("[virtmagma]", &virtmagma_buffer_fd_fops, priv, |
| O_RDWR); |
| if (ret < 0) { |
| pr_err("virtmagma: failed to create fd: %d", ret); |
| virtmagma_release_handle(instance->vi, handle); |
| kfree(priv); |
| return ret; |
| } |
| |
| response->handle_out = ret; |
| |
| return 0; |
| } |
| |
| static int virtmagma_command_magma_get_buffer_handle2( |
| struct virtmagma_instance *instance, |
| struct virtmagma_virtio_command *command) |
| { |
| struct virtio_magma_get_buffer_handle2_resp *response; |
| |
| /* Reallocate response buffer with additional space for buffer size; |
| note that response_size is not modified, as we only want the response struct |
| itself to be copied back to the user by our caller. |
| Memory will be freed by caller. |
| */ |
| response = kzalloc(sizeof(*response) + sizeof(uint64_t), GFP_KERNEL); |
| if (!response) |
| return -ENOMEM; |
| |
| command->response_ptr = response; |
| |
| return virtmagma_command_magma_get_buffer_handle2_core(instance, command); |
| } |
| |
| static int virtmagma_command_magma_get_buffer_handle( |
| struct virtmagma_instance *instance, |
| struct virtmagma_virtio_command *command) |
| { |
| struct virtio_magma_get_buffer_handle_ctrl *request = command->request_ptr; |
| struct virtio_magma_get_buffer_handle_resp *response = command->response_ptr; |
| struct virtio_magma_get_buffer_handle2_ctrl *request2; |
| struct virtio_magma_get_buffer_handle2_resp *response2; |
| int ret; |
| |
| BUILD_BUG_ON(sizeof(*request2) > MESSAGE_CACHE_OBJECT_SIZE); |
| BUILD_BUG_ON(sizeof(*response2) + sizeof(uint64_t) > MESSAGE_CACHE_OBJECT_SIZE); |
| |
| request2 = kmem_cache_alloc(instance->msg_cache, GFP_KERNEL); |
| if (!request2) |
| return -ENOMEM; |
| |
| response2 = kmem_cache_alloc(instance->msg_cache, GFP_KERNEL); |
| if (!response2) { |
| kmem_cache_free(instance->msg_cache, request2); |
| return -ENOMEM; |
| } |
| |
| request2->hdr.type = VIRTIO_MAGMA_CMD_GET_BUFFER_HANDLE2; |
| request2->hdr.flags = 0; |
| request2->buffer = request->buffer; |
| |
| { |
| struct virtmagma_virtio_command command2 = { |
| .request_ptr = request2, |
| .request_size = sizeof(*request2), |
| .response_ptr = response2, |
| .response_size = sizeof(*response2) |
| }; |
| |
| ret = virtmagma_command_magma_get_buffer_handle2_core(instance, &command2); |
| } |
| |
| response->hdr.type = VIRTIO_MAGMA_RESP_GET_BUFFER_HANDLE; |
| response->handle_out = response2->handle_out; |
| response->result_return = response2->result_return; |
| |
| kmem_cache_free(instance->msg_cache, request2); |
| kmem_cache_free(instance->msg_cache, response2); |
| |
| return ret; |
| } |
| |
| static int virtmagma_command_magma_query_returns_buffer2( |
| struct virtmagma_instance *instance, |
| struct virtmagma_virtio_command *command) |
| { |
| struct virtmagma_buffer_fd_priv *priv; |
| struct virtio_magma_query_returns_buffer2_ctrl *request = |
| command->request_ptr; |
| struct virtio_magma_query_returns_buffer2_resp *response = |
| command->response_ptr; |
| uint64_t buffer_size; |
| uint32_t buffer_handle; |
| int ret; |
| |
| /* Reallocate response buffer with additional space for buffer size; |
| note that response_size is not modified, as we only want the response struct |
| itself to be copied back to the user by our caller. |
| Memory will be freed by caller. |
| */ |
| response = kzalloc(sizeof(*response) + sizeof(uint64_t), GFP_KERNEL); |
| if (!response) |
| return -ENOMEM; |
| |
| command->response_ptr = response; |
| |
| if (!COMMAND_OK(command, request, response)) |
| return -EINVAL; |
| |
| ret = vq_out_send_sync(instance->vi, command); |
| if (ret) |
| return ret; |
| |
| ret = virtmagma_check_expected_response_type(request, response); |
| if (ret) |
| return ret; |
| |
| if (response->result_return) { |
| pr_warn("virtmagma: magma_query_returns_buffer2 returned %d", |
| (int32_t)response->result_return); |
| return 0; /* the ioctl is still successful */ |
| } |
| |
| buffer_handle = response->handle_out; |
| buffer_size = *(uint64_t*)(response + 1); |
| |
| priv = kzalloc(sizeof(struct virtmagma_buffer_fd_priv), GFP_KERNEL); |
| if (!priv) { |
| virtmagma_release_handle(instance->vi, buffer_handle); |
| return -ENOMEM; |
| } |
| |
| mutex_init(&priv->mutex_lock); |
| |
| priv->vi = instance->vi; |
| priv->buffer_handle = buffer_handle; |
| priv->buffer_size = buffer_size; |
| |
| ret = anon_inode_getfd("[virtmagma]", &virtmagma_buffer_fd_fops, priv, |
| O_RDWR); |
| if (ret < 0) { |
| pr_err("virtmagma: failed to create fd: %d", ret); |
| virtmagma_release_handle(instance->vi, buffer_handle); |
| kfree(priv); |
| return ret; |
| } |
| |
| response->handle_out = ret; |
| |
| return 0; |
| } |
| |
| static int virtmagma_ioctl_handshake(struct file *filp, void __user *ptr) |
| { |
| struct virtmagma_ioctl_args_handshake ioctl_args; |
| int ret = copy_from_user(&ioctl_args, ptr, sizeof(ioctl_args)); |
| if (ret) |
| return ret; |
| if (ioctl_args.handshake_inout != VIRTMAGMA_HANDSHAKE_SEND) |
| return -EINVAL; |
| ioctl_args.handshake_inout = VIRTMAGMA_HANDSHAKE_RECV; |
| ioctl_args.version_out = VIRTMAGMA_VERSION; |
| return copy_to_user(ptr, &ioctl_args, sizeof(ioctl_args)); |
| } |
| |
| int virtmagma_ioctl_magma_command(struct file *filp, void __user *ptr) |
| { |
| struct virtmagma_ioctl_args_magma_command ioctl_args; |
| struct virtmagma_virtio_command command; |
| void *request; |
| void *response; |
| int request_type; |
| int ret; |
| struct virtmagma_instance *instance = filp->private_data; |
| command.request_ptr = NULL; |
| command.response_ptr = NULL; |
| |
| if (!instance) |
| return -ENODEV; |
| |
| /* copy in command arguments */ |
| |
| ret = copy_from_user(&ioctl_args, ptr, sizeof(ioctl_args)); |
| if (ret) |
| return ret; |
| |
| /* verify userspace-provided pointers are accessible */ |
| |
| ret = !access_ok(VERIFY_READ, (void *)ioctl_args.request_address, |
| ioctl_args.request_size); |
| if (ret) |
| return -EFAULT; |
| ret = !access_ok(VERIFY_WRITE, (void *)ioctl_args.response_address, |
| ioctl_args.response_size); |
| if (ret) |
| return -EFAULT; |
| |
| /* allocate buffers and copy in userspace data */ |
| |
| if (ioctl_args.request_size <= MESSAGE_CACHE_OBJECT_SIZE) |
| request = kmem_cache_alloc(instance->msg_cache, GFP_KERNEL); |
| else |
| request = kzalloc(ioctl_args.request_size, GFP_KERNEL); |
| if (!request) |
| return -ENOMEM; |
| if (ioctl_args.response_size <= MESSAGE_CACHE_OBJECT_SIZE) |
| response = kmem_cache_alloc(instance->msg_cache, GFP_KERNEL); |
| else |
| response = kzalloc(ioctl_args.response_size, GFP_KERNEL); |
| if (!response) { |
| ret = -ENOMEM; |
| goto free_request; |
| } |
| command.request_ptr = request; |
| command.response_ptr = response; |
| command.request_size = ioctl_args.request_size; |
| command.response_size = ioctl_args.response_size; |
| |
| ret = copy_from_user(command.request_ptr, |
| (void *)ioctl_args.request_address, |
| ioctl_args.request_size); |
| if (ret) |
| goto free_response; |
| |
| request_type = control_type(command.request_ptr); |
| switch (request_type) { |
| case VIRTIO_MAGMA_CMD_CREATE_CONNECTION2: |
| ret = virtmagma_command_magma_create_connection2(instance, |
| &command); |
| break; |
| case VIRTIO_MAGMA_CMD_RELEASE_CONNECTION: |
| ret = virtmagma_command_magma_release_connection(instance, |
| &command); |
| break; |
| case VIRTIO_MAGMA_CMD_CREATE_CONTEXT: |
| ret = virtmagma_command_magma_create_context(instance, |
| &command); |
| break; |
| case VIRTIO_MAGMA_CMD_RELEASE_CONTEXT: |
| ret = virtmagma_command_magma_release_context(instance, |
| &command); |
| break; |
| case VIRTIO_MAGMA_CMD_CREATE_BUFFER: |
| ret = virtmagma_command_magma_create_buffer(instance, &command); |
| break; |
| case VIRTIO_MAGMA_CMD_RELEASE_BUFFER: |
| ret = virtmagma_command_magma_release_buffer(instance, |
| &command); |
| break; |
| case VIRTIO_MAGMA_CMD_EXECUTE_COMMAND_BUFFER_WITH_RESOURCES2: |
| ret = virtmagma_command_magma_execute_command_buffer_with_resources2( |
| instance, &command); |
| break; |
| case VIRTIO_MAGMA_CMD_CREATE_SEMAPHORE: |
| ret = virtmagma_command_magma_create_semaphore(instance, |
| &command); |
| break; |
| case VIRTIO_MAGMA_CMD_RELEASE_SEMAPHORE: |
| ret = virtmagma_command_magma_release_semaphore(instance, |
| &command); |
| break; |
| case VIRTIO_MAGMA_CMD_IMPORT_SEMAPHORE: |
| ret = virtmagma_command_magma_import_semaphore(instance, |
| &command); |
| break; |
| case VIRTIO_MAGMA_CMD_EXPORT_SEMAPHORE: |
| ret = virtmagma_command_magma_export_semaphore(instance, |
| &command); |
| break; |
| case VIRTIO_MAGMA_CMD_POLL: |
| ret = virtmagma_command_magma_poll(instance, &command); |
| break; |
| case VIRTIO_MAGMA_CMD_READ_NOTIFICATION_CHANNEL2: |
| ret = virtmagma_command_magma_read_notification_channel2( |
| instance, &command); |
| break; |
| case VIRTIO_MAGMA_CMD_EXPORT: |
| ret = virtmagma_command_magma_export(instance, &command); |
| break; |
| case VIRTIO_MAGMA_CMD_IMPORT: |
| ret = virtmagma_command_magma_import(instance, &command); |
| break; |
| case VIRTIO_MAGMA_CMD_GET_BUFFER_HANDLE: |
| ret = virtmagma_command_magma_get_buffer_handle(instance, |
| &command); |
| break; |
| case VIRTIO_MAGMA_CMD_GET_BUFFER_HANDLE2: |
| ret = virtmagma_command_magma_get_buffer_handle2(instance, |
| &command); |
| break; |
| case VIRTIO_MAGMA_CMD_VIRT_CREATE_IMAGE: |
| ret = virtmagma_command_magma_virt_create_image(instance, |
| &command); |
| break; |
| case VIRTIO_MAGMA_CMD_VIRT_GET_IMAGE_INFO: |
| ret = virtmagma_command_magma_virt_get_image_info(instance, |
| &command); |
| break; |
| case VIRTIO_MAGMA_CMD_QUERY_RETURNS_BUFFER2: |
| ret = virtmagma_command_magma_query_returns_buffer2(instance, &command); |
| break; |
| /* pass-through handlers */ |
| case VIRTIO_MAGMA_CMD_QUERY2: |
| case VIRTIO_MAGMA_CMD_GET_ERROR: |
| case VIRTIO_MAGMA_CMD_SYNC: |
| case VIRTIO_MAGMA_CMD_GET_BUFFER_ID: |
| case VIRTIO_MAGMA_CMD_GET_BUFFER_SIZE: |
| case VIRTIO_MAGMA_CMD_CLEAN_CACHE: |
| case VIRTIO_MAGMA_CMD_SET_CACHE_POLICY: |
| case VIRTIO_MAGMA_CMD_GET_BUFFER_CACHE_POLICY: |
| case VIRTIO_MAGMA_CMD_MAP_BUFFER_GPU: |
| case VIRTIO_MAGMA_CMD_UNMAP_BUFFER_GPU: |
| case VIRTIO_MAGMA_CMD_GET_SEMAPHORE_ID: |
| case VIRTIO_MAGMA_CMD_SIGNAL_SEMAPHORE: |
| case VIRTIO_MAGMA_CMD_RESET_SEMAPHORE: |
| case VIRTIO_MAGMA_CMD_GET_NOTIFICATION_CHANNEL_HANDLE: |
| case VIRTIO_MAGMA_CMD_DEVICE_IMPORT: |
| case VIRTIO_MAGMA_CMD_DEVICE_RELEASE: |
| ret = vq_out_send_sync(instance->vi, &command); |
| if (!ret) |
| ret = virtmagma_check_expected_response_type( |
| command.request_ptr, command.response_ptr); |
| break; |
| default: |
| pr_warn("virtmagma: command %s (%d) not implemented", |
| virtio_magma_ctrl_type_string(request_type), |
| request_type); |
| ret = -EINVAL; |
| break; |
| } |
| if (ret) { |
| pr_err("virtmagma: error handling command %s (%d)", |
| virtio_magma_ctrl_type_string(request_type), |
| request_type); |
| dump_stack(); |
| goto free_response; |
| } |
| |
| /* copy responses back to userspace */ |
| |
| ret = copy_to_user((void *)ioctl_args.response_address, |
| command.response_ptr, ioctl_args.response_size); |
| |
| free_response: |
| if (ioctl_args.request_size <= MESSAGE_CACHE_OBJECT_SIZE) |
| kmem_cache_free(instance->msg_cache, request); |
| else |
| kfree(request); |
| |
| free_request: |
| if (ioctl_args.response_size <= MESSAGE_CACHE_OBJECT_SIZE) |
| kmem_cache_free(instance->msg_cache, response); |
| else |
| kfree(response); |
| |
| /* Some of the handlers above may override the command members and hand |
| off allocation ownership back to us. Free them now. */ |
| |
| if (command.request_ptr && command.request_ptr != request) |
| kfree(command.request_ptr); |
| |
| if (command.response_ptr && command.response_ptr != response) |
| kfree(command.response_ptr); |
| |
| return ret; |
| } |
| |
| static int virtmagma_open(struct inode *inodep, struct file *filp) |
| { |
| int ret; |
| struct virtmagma_instance *instance; |
| struct virtmagma_info *vi = |
| container_of(inodep->i_cdev, struct virtmagma_info, cdev); |
| |
| ret = create_instance(vi, &instance); |
| if (ret) |
| return ret; |
| |
| filp->private_data = instance; |
| |
| return 0; |
| } |
| |
| static int virtmagma_release(struct inode *inodep, struct file *filp) |
| { |
| struct virtmagma_instance *instance = filp->private_data; |
| return destroy_instance(instance->id, instance, instance->vi); |
| } |
| |
| static void vq_out_work_handler(struct work_struct *work) |
| { |
| struct virtmagma_info *vi = |
| container_of(work, struct virtmagma_info, out_vq_work); |
| unsigned int len; |
| struct completion *finish_completion; |
| bool wake_waitq = false; |
| |
| mutex_lock(&vi->vq_out_lock); |
| while ((finish_completion = virtqueue_get_buf(vi->vq_out, &len)) != |
| NULL) { |
| wake_waitq = true; |
| complete(finish_completion); |
| } |
| mutex_unlock(&vi->vq_out_lock); |
| |
| if (wake_waitq) |
| wake_up_interruptible_all(&vi->out_waitq); |
| } |
| |
| static void vq_out_cb(struct virtqueue *vq) |
| { |
| struct virtmagma_info *vi = vq->vdev->priv; |
| schedule_work(&vi->out_vq_work); |
| } |
| |
| static long virtmagma_ioctl_common(struct file *filp, unsigned int cmd, |
| void __user *ptr) |
| { |
| switch (cmd) { |
| case VIRTMAGMA_IOCTL_HANDSHAKE: |
| return virtmagma_ioctl_handshake(filp, ptr); |
| case VIRTMAGMA_IOCTL_MAGMA_COMMAND: |
| return virtmagma_ioctl_magma_command(filp, ptr); |
| default: |
| return -ENOTTY; |
| } |
| } |
| |
| static long virtmagma_ioctl(struct file *filp, unsigned int cmd, |
| unsigned long arg) |
| { |
| return virtmagma_ioctl_common(filp, cmd, (void __user *)arg); |
| } |
| |
| #ifdef CONFIG_COMPAT |
| static long virtmagma_ioctl_compat(struct file *filp, unsigned int cmd, |
| unsigned long arg) |
| { |
| return virtmagma_ioctl_common(filp, cmd, compat_ptr(arg)); |
| } |
| #else |
| #define virtmagma_ioctl_compat NULL |
| #endif |
| |
| static const struct file_operations virtmagma_fops = { |
| .open = virtmagma_open, |
| .unlocked_ioctl = virtmagma_ioctl, |
| .compat_ioctl = virtmagma_ioctl_compat, |
| .release = virtmagma_release, |
| }; |
| |
| static int virtmagma_probe(struct virtio_device *vdev) |
| { |
| int ret; |
| struct virtmagma_info *vi = NULL; |
| static const char *vq_out_name = "out"; |
| vq_callback_t *callback = &vq_out_cb; |
| |
| vi = kzalloc(sizeof(struct virtmagma_info), GFP_KERNEL); |
| if (!vi) |
| return -ENOMEM; |
| |
| vdev->priv = vi; |
| |
| ret = alloc_chrdev_region(&vi->dev_num, 0, 1, "magma"); |
| if (ret) { |
| ret = -ENOMEM; |
| pr_warn("virtmagma: failed to allocate wl chrdev region: %d\n", |
| ret); |
| goto free_vi; |
| } |
| |
| vi->class = class_create(THIS_MODULE, "magma"); |
| if (IS_ERR(vi->class)) { |
| ret = PTR_ERR(vi->class); |
| pr_warn("virtmagma: failed to create magma class: %d\n", ret); |
| goto unregister_region; |
| } |
| |
| vi->dev = device_create(vi->class, NULL, vi->dev_num, vi, "magma%d", 0); |
| if (IS_ERR(vi->dev)) { |
| ret = PTR_ERR(vi->dev); |
| pr_warn("virtmagma: failed to create magma0 device: %d\n", ret); |
| goto destroy_class; |
| } |
| |
| cdev_init(&vi->cdev, &virtmagma_fops); |
| ret = cdev_add(&vi->cdev, vi->dev_num, 1); |
| if (ret) { |
| pr_warn("virtmagma: failed to add virtio magma character device to system: %d\n", |
| ret); |
| goto destroy_device; |
| } |
| |
| mutex_init(&vi->vq_out_lock); |
| mutex_init(&vi->instances_lock); |
| idr_init(&vi->instances); |
| |
| ret = virtio_find_vqs(vdev, 1, &vi->vq_out, &callback, &vq_out_name, |
| NULL); |
| if (ret) { |
| pr_warn("virtmagma: failed to find virtio magma out queue: %d\n", |
| ret); |
| goto del_cdev; |
| } |
| |
| INIT_WORK(&vi->out_vq_work, vq_out_work_handler); |
| init_waitqueue_head(&vi->out_waitq); |
| |
| virtio_device_ready(vdev); |
| |
| return 0; |
| |
| del_cdev: |
| cdev_del(&vi->cdev); |
| destroy_device: |
| put_device(vi->dev); |
| destroy_class: |
| class_destroy(vi->class); |
| unregister_region: |
| unregister_chrdev_region(vi->dev_num, 0); |
| free_vi: |
| kfree(vi); |
| return ret; |
| } |
| |
| static void virtmagma_remove(struct virtio_device *vdev) |
| { |
| struct virtmagma_info *vi = vdev->priv; |
| |
| idr_for_each(&vi->instances, destroy_instance, vi); |
| mutex_destroy(&vi->instances_lock); |
| idr_destroy(&vi->instances); |
| cdev_del(&vi->cdev); |
| put_device(vi->dev); |
| class_destroy(vi->class); |
| unregister_chrdev_region(vi->dev_num, 0); |
| kfree(vi); |
| } |
| |
| static void virtmagma_scan(struct virtio_device *vdev) |
| { |
| } |
| |
| static struct virtio_device_id id_table[] = { |
| { VIRTIO_ID_MAGMA, VIRTIO_DEV_ANY_ID }, |
| { 0 }, |
| }; |
| |
| static struct virtio_driver virtio_magma_driver = { |
| .driver.name = KBUILD_MODNAME, |
| .driver.owner = THIS_MODULE, |
| .id_table = id_table, |
| .probe = virtmagma_probe, |
| .remove = virtmagma_remove, |
| .scan = virtmagma_scan, |
| }; |
| |
| module_virtio_driver(virtio_magma_driver); |
| MODULE_DEVICE_TABLE(virtio, id_table); |
| MODULE_DESCRIPTION("Virtio Magma driver"); |
| MODULE_LICENSE("GPL"); |