| // Copyright 2019 The Fuchsia Authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include <linux/uaccess.h> |
| #include <linux/virtio_magma.h> |
| #include <linux/vmalloc.h> |
| |
| #include "virtio_magma.h" |
| |
| #define VQ_DESCRIPTOR_SIZE PAGE_SIZE |
| #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT) |
| |
| struct virtmagma_info_private |
| { |
| struct mutex vq_locks[VIRTMAGMA_QUEUE_COUNT]; |
| struct virtqueue *vqs[VIRTMAGMA_QUEUE_COUNT]; |
| struct work_struct in_vq_work; |
| struct work_struct out_vq_work; |
| wait_queue_head_t out_waitq; |
| }; |
| |
| static int vq_return_inbuf_locked(struct virtqueue *vq, void *buffer) |
| { |
| int ret; |
| struct scatterlist sg[1]; |
| |
| sg_init_one(sg, buffer, VQ_DESCRIPTOR_SIZE); |
| |
| ret = virtqueue_add_inbuf(vq, sg, 1, buffer, GFP_KERNEL); |
| if (ret) { |
| pr_warn("virtmagma: failed to give inbuf to host: %d\n", ret); |
| return ret; |
| } |
| |
| return 0; |
| } |
| |
| static int vq_queue_out(struct virtmagma_info_private *vip, |
| struct scatterlist *out_sg, |
| struct scatterlist *in_sg, |
| struct completion *finish_completion, |
| bool nonblock) |
| { |
| struct virtqueue *vq = vip->vqs[VIRTMAGMA_VQ_OUT]; |
| struct mutex *vq_lock = &vip->vq_locks[VIRTMAGMA_VQ_OUT]; |
| struct scatterlist *sgs[] = { out_sg, in_sg }; |
| int ret = 0; |
| |
| mutex_lock(vq_lock); |
| while ((ret = virtqueue_add_sgs(vq, sgs, 1, 1, finish_completion, |
| GFP_KERNEL)) == -ENOSPC) { |
| mutex_unlock(vq_lock); |
| if (nonblock) |
| return -EAGAIN; |
| if (!wait_event_timeout(vip->out_waitq, vq->num_free > 0, HZ)) |
| return -EBUSY; |
| mutex_lock(vq_lock); |
| } |
| if (!ret) |
| virtqueue_kick(vq); |
| mutex_unlock(vq_lock); |
| |
| if (!nonblock) |
| wait_for_completion(finish_completion); |
| |
| return ret; |
| } |
| |
| static int vq_fill_locked(struct virtqueue *vq) |
| { |
| void *buffer; |
| int ret = 0; |
| |
| while (vq->num_free > 0) { |
| buffer = kmalloc(VQ_DESCRIPTOR_SIZE, GFP_KERNEL); |
| if (!buffer) { |
| ret = -ENOMEM; |
| goto clear_queue; |
| } |
| |
| ret = vq_return_inbuf_locked(vq, buffer); |
| if (ret) |
| goto clear_queue; |
| } |
| |
| return 0; |
| |
| clear_queue: |
| while ((buffer = virtqueue_detach_unused_buf(vq))) |
| kfree(buffer); |
| return ret; |
| } |
| |
| static int virtmagma_ioctl_handshake(void __user *ptr) |
| { |
| struct virtmagma_ioctl_args_handshake ioctl_args; |
| int ret; |
| ret = copy_from_user(&ioctl_args, ptr, sizeof(ioctl_args)); |
| if (ret) |
| return ret; |
| if (ioctl_args.handshake_inout != VIRTMAGMA_HANDSHAKE_SEND) |
| return -EINVAL; |
| ioctl_args.handshake_inout = VIRTMAGMA_HANDSHAKE_RECV; |
| ioctl_args.version_out = VIRTMAGMA_VERSION; |
| return copy_to_user(ptr, &ioctl_args, sizeof(ioctl_args)); |
| } |
| |
| static void* map_driver(unsigned long long pfn_base, size_t size) { |
| size_t num_pages; |
| struct page **pages; |
| size_t page_index; |
| void *ret; |
| |
| num_pages = PAGE_COUNT(size); |
| |
| pages = kzalloc(sizeof(pages[0]) * num_pages, GFP_KERNEL); |
| if (!pages) |
| return NULL; |
| |
| for (page_index = 0; page_index < num_pages; ++page_index) |
| pages[page_index] = pfn_to_page(pfn_base + page_index); |
| |
| /* TODO(MA-520): there should be a faster way to do this if pages are all contiguous in physmem */ |
| ret = vm_map_ram(pages, num_pages, 0, PAGE_KERNEL); |
| kfree(pages); |
| return ret; |
| } |
| |
| static void unmap_driver(void *addr, size_t size) { |
| vm_unmap_ram(addr, PAGE_COUNT(size)); |
| } |
| |
| static int write_driver_file(const char* path, const void* data, size_t size) { |
| struct file *driver_file; |
| ssize_t bytes_written; |
| loff_t offset; |
| int ret; |
| |
| driver_file = filp_open(path, O_CLOEXEC | O_CREAT | O_RDWR, 0555); |
| if (!driver_file) |
| return -EFAULT; |
| |
| offset = 0; |
| bytes_written = kernel_write(driver_file, data, size, &offset); |
| if (bytes_written != size) { |
| ret = -EFAULT; |
| goto close_file; |
| } |
| |
| ret = 0; |
| |
| close_file: |
| filp_close(driver_file, NULL); |
| |
| return ret; |
| } |
| |
| #define DRIVER_PATH "/libvulkan_magma.so" |
| |
| static int virtmagma_ioctl_get_driver(struct virtmagma_info* vi, void __user *ptr) |
| { |
| struct virtmagma_ioctl_args_get_driver ioctl_args; |
| struct virtio_magma_get_driver *virtio_ctrl; |
| struct virtio_magma_get_driver_resp *virtio_resp; |
| struct completion finish_completion; |
| struct scatterlist out_sg; |
| struct scatterlist in_sg; |
| unsigned long long driver_pfn_base; |
| size_t driver_size; |
| void *driver_data; |
| int ret; |
| |
| ret = copy_from_user(&ioctl_args, ptr, sizeof(ioctl_args)); |
| if (ret) |
| return -EFAULT; |
| |
| virtio_ctrl = kzalloc(sizeof(*virtio_ctrl), GFP_KERNEL); |
| if (!virtio_ctrl) |
| return -ENOMEM; |
| virtio_resp = kzalloc(sizeof(*virtio_resp), GFP_KERNEL); |
| if (!virtio_resp) { |
| ret = -ENOMEM; |
| goto free_virtio_ctrl; |
| } |
| |
| virtio_ctrl->hdr.type = VIRTIO_MAGMA_CMD_GET_DRIVER; |
| virtio_ctrl->page_size = cpu_to_le32(PAGE_SIZE); |
| |
| init_completion(&finish_completion); |
| |
| sg_init_one(&out_sg, virtio_ctrl, sizeof(*virtio_ctrl)); |
| sg_init_one(&in_sg, virtio_resp, sizeof(*virtio_resp)); |
| |
| ret = vq_queue_out(vi->private, &out_sg, &in_sg, &finish_completion, false /* block */); |
| if (ret) |
| goto free_virtio_resp; |
| |
| if (virtio_resp->hdr.type != VIRTIO_MAGMA_RESP_GET_DRIVER) { |
| ret = -EFAULT; |
| goto free_virtio_resp; |
| } |
| |
| driver_pfn_base = le64_to_cpu(virtio_resp->pfn); |
| driver_size = le64_to_cpu(virtio_resp->size); |
| driver_data = map_driver(driver_pfn_base, driver_size); |
| if (!driver_data) { |
| ret = -EFAULT; |
| goto free_virtio_resp; |
| } |
| |
| ret = write_driver_file(DRIVER_PATH, driver_data, driver_size); |
| if (ret) |
| goto free_driver_alloc; |
| |
| ioctl_args.unused = 0; |
| ret = copy_to_user(ptr, &ioctl_args, sizeof(ioctl_args)); |
| |
| free_driver_alloc: |
| unmap_driver(driver_data, driver_size); |
| |
| free_virtio_resp: |
| kfree(virtio_resp); |
| |
| free_virtio_ctrl: |
| kfree(virtio_ctrl); |
| |
| return ret; |
| } |
| |
| static int virtmagma_ioctl_query(struct virtmagma_info* vi, void __user *ptr) |
| { |
| struct virtmagma_ioctl_args_query ioctl_args; |
| struct virtio_magma_query *virtio_ctrl; |
| struct virtio_magma_query_resp *virtio_resp; |
| struct completion finish_completion; |
| struct scatterlist out_sg; |
| struct scatterlist in_sg; |
| int ret; |
| |
| ret = copy_from_user(&ioctl_args, ptr, sizeof(ioctl_args)); |
| if (ret) |
| return -EFAULT; |
| |
| virtio_ctrl = kzalloc(sizeof(*virtio_ctrl), GFP_KERNEL); |
| if (!virtio_ctrl) |
| return -ENOMEM; |
| virtio_resp = kzalloc(sizeof(*virtio_resp), GFP_KERNEL); |
| if (!virtio_resp) { |
| ret = -ENOMEM; |
| goto free_virtio_ctrl; |
| } |
| |
| virtio_ctrl->hdr.type = VIRTIO_MAGMA_CMD_QUERY; |
| virtio_ctrl->field_id = cpu_to_le64(ioctl_args.id); |
| |
| init_completion(&finish_completion); |
| |
| sg_init_one(&out_sg, virtio_ctrl, sizeof(*virtio_ctrl)); |
| sg_init_one(&in_sg, virtio_resp, sizeof(*virtio_resp)); |
| |
| ret = vq_queue_out(vi->private, &out_sg, &in_sg, &finish_completion, false /* block */); |
| if (ret) |
| goto free_virtio_resp; |
| |
| if (virtio_resp->hdr.type != VIRTIO_MAGMA_RESP_QUERY) { |
| ret = -EFAULT; |
| goto free_virtio_resp; |
| } |
| ioctl_args.value_out = le64_to_cpu(virtio_resp->field_value_out); |
| ioctl_args.status_return = le32_to_cpu(virtio_resp->status_return); |
| ret = copy_to_user(ptr, &ioctl_args, sizeof(ioctl_args)); |
| |
| free_virtio_resp: |
| kfree(virtio_resp); |
| |
| free_virtio_ctrl: |
| kfree(virtio_ctrl); |
| |
| return ret; |
| } |
| |
| static int virtmagma_ioctl_create_connection(struct virtmagma_info* vi, void __user *ptr) |
| { |
| struct virtmagma_ioctl_args_create_connection ioctl_args; |
| struct virtio_magma_create_connection *virtio_ctrl; |
| struct virtio_magma_create_connection_resp *virtio_resp; |
| struct completion finish_completion; |
| struct scatterlist out_sg; |
| struct scatterlist in_sg; |
| int ret; |
| |
| ret = copy_from_user(&ioctl_args, ptr, sizeof(ioctl_args)); |
| if (ret) |
| return -EFAULT; |
| |
| virtio_ctrl = kzalloc(sizeof(*virtio_ctrl), GFP_KERNEL); |
| if (!virtio_ctrl) |
| return -ENOMEM; |
| virtio_resp = kzalloc(sizeof(*virtio_resp), GFP_KERNEL); |
| if (!virtio_resp) { |
| ret = -ENOMEM; |
| goto free_virtio_ctrl; |
| } |
| |
| virtio_ctrl->hdr.type = VIRTIO_MAGMA_CMD_CREATE_CONNECTION; |
| |
| init_completion(&finish_completion); |
| |
| sg_init_one(&out_sg, virtio_ctrl, sizeof(*virtio_ctrl)); |
| sg_init_one(&in_sg, virtio_resp, sizeof(*virtio_resp)); |
| |
| ret = vq_queue_out(vi->private, &out_sg, &in_sg, &finish_completion, false /* block */); |
| if (ret) |
| goto free_virtio_resp; |
| |
| if (virtio_resp->hdr.type != VIRTIO_MAGMA_RESP_CREATE_CONNECTION) { |
| ret = -EFAULT; |
| goto free_virtio_resp; |
| } |
| |
| ioctl_args.connection_return = virtio_resp->connection_return; |
| ret = copy_to_user(ptr, &ioctl_args, sizeof(ioctl_args)); |
| |
| free_virtio_resp: |
| kfree(virtio_resp); |
| |
| free_virtio_ctrl: |
| kfree(virtio_ctrl); |
| |
| return ret; |
| } |
| |
| static int virtmagma_ioctl_release_connection(struct virtmagma_info* vi, void __user *ptr) |
| { |
| struct virtmagma_ioctl_args_release_connection ioctl_args; |
| struct virtio_magma_release_connection *virtio_ctrl; |
| struct virtio_magma_release_connection_resp *virtio_resp; |
| struct completion finish_completion; |
| struct scatterlist out_sg; |
| struct scatterlist in_sg; |
| int ret; |
| |
| ret = copy_from_user(&ioctl_args, ptr, sizeof(ioctl_args)); |
| if (ret) |
| return -EFAULT; |
| |
| virtio_ctrl = kzalloc(sizeof(*virtio_ctrl), GFP_KERNEL); |
| if (!virtio_ctrl) |
| return -ENOMEM; |
| virtio_resp = kzalloc(sizeof(*virtio_resp), GFP_KERNEL); |
| if (!virtio_resp) { |
| ret = -ENOMEM; |
| goto free_virtio_ctrl; |
| } |
| |
| virtio_ctrl->hdr.type = VIRTIO_MAGMA_CMD_RELEASE_CONNECTION; |
| virtio_ctrl->connection = ioctl_args.connection; |
| |
| init_completion(&finish_completion); |
| |
| sg_init_one(&out_sg, virtio_ctrl, sizeof(*virtio_ctrl)); |
| sg_init_one(&in_sg, virtio_resp, sizeof(*virtio_resp)); |
| |
| ret = vq_queue_out(vi->private, &out_sg, &in_sg, &finish_completion, false /* block */); |
| if (ret) |
| goto free_virtio_resp; |
| |
| if (virtio_resp->hdr.type != VIRTIO_MAGMA_RESP_RELEASE_CONNECTION) |
| ret = -EFAULT; |
| |
| free_virtio_resp: |
| kfree(virtio_resp); |
| |
| free_virtio_ctrl: |
| kfree(virtio_ctrl); |
| |
| return ret; |
| } |
| |
| static int virtmagma_probe(struct virtmagma_info* vi, struct virtio_device *vdev) |
| { |
| return 0; |
| } |
| |
| static void virtmagma_remove(struct virtmagma_info* vi, struct virtio_device *vdev) |
| { |
| } |
| |
| static long virtmagma_ioctl(struct virtmagma_info* vi, unsigned int cmd, void __user *ptr) |
| { |
| /* if vi->private is NULL, magma has not been initialized */ |
| if (!vi->private) |
| return -ENODEV; |
| switch (cmd) { |
| case VIRTMAGMA_IOCTL_HANDSHAKE: |
| return virtmagma_ioctl_handshake(ptr); |
| case VIRTMAGMA_IOCTL_GET_DRIVER: |
| return virtmagma_ioctl_get_driver(vi, ptr); |
| case VIRTMAGMA_IOCTL_QUERY: |
| return virtmagma_ioctl_query(vi, ptr); |
| case VIRTMAGMA_IOCTL_CREATE_CONNECTION: |
| return virtmagma_ioctl_create_connection(vi, ptr); |
| case VIRTMAGMA_IOCTL_RELEASE_CONNECTION: |
| return virtmagma_ioctl_release_connection(vi, ptr); |
| default: |
| return -EINVAL; |
| } |
| return 0; |
| } |
| |
| static void vq_in_work_handler(struct work_struct *work) |
| { |
| pr_warn("%s\n", __PRETTY_FUNCTION__); |
| } |
| |
| static void vq_out_work_handler(struct work_struct *work) |
| { |
| struct virtmagma_info_private *vip = container_of(work, struct virtmagma_info_private, |
| out_vq_work); |
| struct virtqueue *vq = vip->vqs[VIRTMAGMA_VQ_OUT]; |
| struct mutex *vq_lock = &vip->vq_locks[VIRTMAGMA_VQ_OUT]; |
| unsigned int len; |
| struct completion *finish_completion; |
| bool wake_waitq = false; |
| |
| mutex_lock(vq_lock); |
| while ((finish_completion = virtqueue_get_buf(vq, &len)) != NULL) { |
| wake_waitq = true; |
| complete(finish_completion); |
| } |
| mutex_unlock(vq_lock); |
| |
| if (wake_waitq) |
| wake_up_interruptible_all(&vip->out_waitq); |
| } |
| |
| static void vq_in_cb(struct virtqueue *vq) |
| { |
| struct virtmagma_info *vi = vq->vdev->priv + magma_info_offset; |
| struct virtmagma_info_private *vip = vi->private; |
| schedule_work(&vip->in_vq_work); |
| } |
| |
| static void vq_out_cb(struct virtqueue *vq) |
| { |
| struct virtmagma_info *vi = vq->vdev->priv + magma_info_offset; |
| struct virtmagma_info_private *vip = vi->private; |
| schedule_work(&vip->out_vq_work); |
| } |
| |
| static void virtmagma_virtio_find_vqs_prepare(struct virtmagma_info* vi, vq_callback_t **vq_callbacks, const char **vq_names) |
| { |
| vq_callbacks[VIRTMAGMA_VQ_IN] = vq_in_cb; |
| vq_callbacks[VIRTMAGMA_VQ_OUT] = vq_out_cb; |
| vq_names[VIRTMAGMA_VQ_IN] = "magma_in"; |
| vq_names[VIRTMAGMA_VQ_OUT] = "magma_out"; |
| } |
| |
| static void virtmagma_virtio_find_vqs_complete(struct virtmagma_info* vi, struct virtqueue **vqs) |
| { |
| struct virtmagma_info_private *vip = vi->private; |
| memcpy(vip->vqs, vqs, sizeof(vip->vqs)); |
| INIT_WORK(&vip->in_vq_work, vq_in_work_handler); |
| INIT_WORK(&vip->out_vq_work, vq_out_work_handler); |
| init_waitqueue_head(&vip->out_waitq); |
| } |
| |
| static int virtmagma_device_ready_prepare(struct virtmagma_info* vi) |
| { |
| struct virtmagma_info_private *vip = vi->private; |
| int ret; |
| ret = vq_fill_locked(vip->vqs[VIRTMAGMA_VQ_IN]); |
| if (ret) { |
| pr_warn("virtmagma: failed to fill in virtqueue: %d", ret); |
| return ret; |
| } |
| return 0; |
| } |
| |
| static void virtmagma_device_ready_complete(struct virtmagma_info* vi) |
| { |
| struct virtmagma_info_private *vip = vi->private; |
| virtqueue_kick(vip->vqs[VIRTMAGMA_VQ_IN]); |
| } |
| |
| int virtmagma_init(struct virtmagma_info *vi) |
| { |
| int i; |
| struct virtmagma_info_private *vip; |
| vi->virtio_probe = virtmagma_probe; |
| vi->virtio_remove = virtmagma_remove; |
| vi->ioctl = virtmagma_ioctl; |
| vi->virtio_find_vqs_prepare = virtmagma_virtio_find_vqs_prepare; |
| vi->virtio_find_vqs_complete = virtmagma_virtio_find_vqs_complete; |
| vi->virtio_device_ready_prepare = virtmagma_device_ready_prepare; |
| vi->virtio_device_ready_complete = virtmagma_device_ready_complete; |
| vi->queue_count = VIRTMAGMA_QUEUE_COUNT; |
| |
| vi->private = kzalloc(sizeof(*vip), GFP_KERNEL); |
| if (!vi->private) |
| return -ENOMEM; |
| vip = vi->private; |
| |
| for (i = 0; i < VIRTMAGMA_QUEUE_COUNT; i++) |
| mutex_init(&vip->vq_locks[i]); |
| |
| vi->enabled = true; |
| |
| return 0; |
| } |