| /* |
| * Copyright 2014 Advanced Micro Devices, Inc. |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining a |
| * copy of this software and associated documentation files (the "Software"), |
| * to deal in the Software without restriction, including without limitation |
| * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| * and/or sell copies of the Software, and to permit persons to whom the |
| * Software is furnished to do so, subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice shall be included in |
| * all copies or substantial portions of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| * OTHER DEALINGS IN THE SOFTWARE. |
| */ |
| |
| #include <linux/device.h> |
| #include <linux/export.h> |
| #include <linux/err.h> |
| #include <linux/fs.h> |
| #include <linux/file.h> |
| #include <linux/sched.h> |
| #include <linux/slab.h> |
| #include <linux/uaccess.h> |
| #include <linux/compat.h> |
| #include <uapi/linux/kfd_ioctl.h> |
| #include <linux/time.h> |
| #include <linux/mm.h> |
| #include <linux/mman.h> |
| #include <asm/processor.h> |
| #include "kfd_priv.h" |
| #include "kfd_device_queue_manager.h" |
| #include "kfd_dbgmgr.h" |
| |
| static long kfd_ioctl(struct file *, unsigned int, unsigned long); |
| static int kfd_open(struct inode *, struct file *); |
| static int kfd_mmap(struct file *, struct vm_area_struct *); |
| |
| static const char kfd_dev_name[] = "kfd"; |
| |
| static const struct file_operations kfd_fops = { |
| .owner = THIS_MODULE, |
| .unlocked_ioctl = kfd_ioctl, |
| .compat_ioctl = kfd_ioctl, |
| .open = kfd_open, |
| .mmap = kfd_mmap, |
| }; |
| |
| static int kfd_char_dev_major = -1; |
| static struct class *kfd_class; |
| struct device *kfd_device; |
| |
| int kfd_chardev_init(void) |
| { |
| int err = 0; |
| |
| kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops); |
| err = kfd_char_dev_major; |
| if (err < 0) |
| goto err_register_chrdev; |
| |
| kfd_class = class_create(THIS_MODULE, kfd_dev_name); |
| err = PTR_ERR(kfd_class); |
| if (IS_ERR(kfd_class)) |
| goto err_class_create; |
| |
| kfd_device = device_create(kfd_class, NULL, |
| MKDEV(kfd_char_dev_major, 0), |
| NULL, kfd_dev_name); |
| err = PTR_ERR(kfd_device); |
| if (IS_ERR(kfd_device)) |
| goto err_device_create; |
| |
| return 0; |
| |
| err_device_create: |
| class_destroy(kfd_class); |
| err_class_create: |
| unregister_chrdev(kfd_char_dev_major, kfd_dev_name); |
| err_register_chrdev: |
| return err; |
| } |
| |
| void kfd_chardev_exit(void) |
| { |
| device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0)); |
| class_destroy(kfd_class); |
| unregister_chrdev(kfd_char_dev_major, kfd_dev_name); |
| } |
| |
| struct device *kfd_chardev(void) |
| { |
| return kfd_device; |
| } |
| |
| |
| static int kfd_open(struct inode *inode, struct file *filep) |
| { |
| struct kfd_process *process; |
| bool is_32bit_user_mode; |
| |
| if (iminor(inode) != 0) |
| return -ENODEV; |
| |
| is_32bit_user_mode = in_compat_syscall(); |
| |
| if (is_32bit_user_mode) { |
| dev_warn(kfd_device, |
| "Process %d (32-bit) failed to open /dev/kfd\n" |
| "32-bit processes are not supported by amdkfd\n", |
| current->pid); |
| return -EPERM; |
| } |
| |
| process = kfd_create_process(filep); |
| if (IS_ERR(process)) |
| return PTR_ERR(process); |
| |
| dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n", |
| process->pasid, process->is_32bit_user_mode); |
| |
| return 0; |
| } |
| |
| static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p, |
| void *data) |
| { |
| struct kfd_ioctl_get_version_args *args = data; |
| |
| args->major_version = KFD_IOCTL_MAJOR_VERSION; |
| args->minor_version = KFD_IOCTL_MINOR_VERSION; |
| |
| return 0; |
| } |
| |
| static int set_queue_properties_from_user(struct queue_properties *q_properties, |
| struct kfd_ioctl_create_queue_args *args) |
| { |
| if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { |
| pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); |
| return -EINVAL; |
| } |
| |
| if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { |
| pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); |
| return -EINVAL; |
| } |
| |
| if ((args->ring_base_address) && |
| (!access_ok(VERIFY_WRITE, |
| (const void __user *) args->ring_base_address, |
| sizeof(uint64_t)))) { |
| pr_err("Can't access ring base address\n"); |
| return -EFAULT; |
| } |
| |
| if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { |
| pr_err("Ring size must be a power of 2 or 0\n"); |
| return -EINVAL; |
| } |
| |
| if (!access_ok(VERIFY_WRITE, |
| (const void __user *) args->read_pointer_address, |
| sizeof(uint32_t))) { |
| pr_err("Can't access read pointer\n"); |
| return -EFAULT; |
| } |
| |
| if (!access_ok(VERIFY_WRITE, |
| (const void __user *) args->write_pointer_address, |
| sizeof(uint32_t))) { |
| pr_err("Can't access write pointer\n"); |
| return -EFAULT; |
| } |
| |
| if (args->eop_buffer_address && |
| !access_ok(VERIFY_WRITE, |
| (const void __user *) args->eop_buffer_address, |
| sizeof(uint32_t))) { |
| pr_debug("Can't access eop buffer"); |
| return -EFAULT; |
| } |
| |
| if (args->ctx_save_restore_address && |
| !access_ok(VERIFY_WRITE, |
| (const void __user *) args->ctx_save_restore_address, |
| sizeof(uint32_t))) { |
| pr_debug("Can't access ctx save restore buffer"); |
| return -EFAULT; |
| } |
| |
| q_properties->is_interop = false; |
| q_properties->queue_percent = args->queue_percentage; |
| q_properties->priority = args->queue_priority; |
| q_properties->queue_address = args->ring_base_address; |
| q_properties->queue_size = args->ring_size; |
| q_properties->read_ptr = (uint32_t *) args->read_pointer_address; |
| q_properties->write_ptr = (uint32_t *) args->write_pointer_address; |
| q_properties->eop_ring_buffer_address = args->eop_buffer_address; |
| q_properties->eop_ring_buffer_size = args->eop_buffer_size; |
| q_properties->ctx_save_restore_area_address = |
| args->ctx_save_restore_address; |
| q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size; |
| q_properties->ctl_stack_size = args->ctl_stack_size; |
| if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE || |
| args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL) |
| q_properties->type = KFD_QUEUE_TYPE_COMPUTE; |
| else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA) |
| q_properties->type = KFD_QUEUE_TYPE_SDMA; |
| else |
| return -ENOTSUPP; |
| |
| if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL) |
| q_properties->format = KFD_QUEUE_FORMAT_AQL; |
| else |
| q_properties->format = KFD_QUEUE_FORMAT_PM4; |
| |
| pr_debug("Queue Percentage: %d, %d\n", |
| q_properties->queue_percent, args->queue_percentage); |
| |
| pr_debug("Queue Priority: %d, %d\n", |
| q_properties->priority, args->queue_priority); |
| |
| pr_debug("Queue Address: 0x%llX, 0x%llX\n", |
| q_properties->queue_address, args->ring_base_address); |
| |
| pr_debug("Queue Size: 0x%llX, %u\n", |
| q_properties->queue_size, args->ring_size); |
| |
| pr_debug("Queue r/w Pointers: %px, %px\n", |
| q_properties->read_ptr, |
| q_properties->write_ptr); |
| |
| pr_debug("Queue Format: %d\n", q_properties->format); |
| |
| pr_debug("Queue EOP: 0x%llX\n", q_properties->eop_ring_buffer_address); |
| |
| pr_debug("Queue CTX save area: 0x%llX\n", |
| q_properties->ctx_save_restore_area_address); |
| |
| return 0; |
| } |
| |
| static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, |
| void *data) |
| { |
| struct kfd_ioctl_create_queue_args *args = data; |
| struct kfd_dev *dev; |
| int err = 0; |
| unsigned int queue_id; |
| struct kfd_process_device *pdd; |
| struct queue_properties q_properties; |
| |
| memset(&q_properties, 0, sizeof(struct queue_properties)); |
| |
| pr_debug("Creating queue ioctl\n"); |
| |
| err = set_queue_properties_from_user(&q_properties, args); |
| if (err) |
| return err; |
| |
| pr_debug("Looking for gpu id 0x%x\n", args->gpu_id); |
| dev = kfd_device_by_id(args->gpu_id); |
| if (!dev) { |
| pr_debug("Could not find gpu id 0x%x\n", args->gpu_id); |
| return -EINVAL; |
| } |
| |
| mutex_lock(&p->mutex); |
| |
| pdd = kfd_bind_process_to_device(dev, p); |
| if (IS_ERR(pdd)) { |
| err = -ESRCH; |
| goto err_bind_process; |
| } |
| |
| pr_debug("Creating queue for PASID %d on gpu 0x%x\n", |
| p->pasid, |
| dev->id); |
| |
| err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id); |
| if (err != 0) |
| goto err_create_queue; |
| |
| args->queue_id = queue_id; |
| |
| |
| /* Return gpu_id as doorbell offset for mmap usage */ |
| args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL; |
| args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id); |
| args->doorbell_offset <<= PAGE_SHIFT; |
| if (KFD_IS_SOC15(dev->device_info->asic_family)) |
| /* On SOC15 ASICs, doorbell allocation must be |
| * per-device, and independent from the per-process |
| * queue_id. Return the doorbell offset within the |
| * doorbell aperture to user mode. |
| */ |
| args->doorbell_offset |= q_properties.doorbell_off; |
| |
| mutex_unlock(&p->mutex); |
| |
| pr_debug("Queue id %d was created successfully\n", args->queue_id); |
| |
| pr_debug("Ring buffer address == 0x%016llX\n", |
| args->ring_base_address); |
| |
| pr_debug("Read ptr address == 0x%016llX\n", |
| args->read_pointer_address); |
| |
| pr_debug("Write ptr address == 0x%016llX\n", |
| args->write_pointer_address); |
| |
| return 0; |
| |
| err_create_queue: |
| err_bind_process: |
| mutex_unlock(&p->mutex); |
| return err; |
| } |
| |
| static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p, |
| void *data) |
| { |
| int retval; |
| struct kfd_ioctl_destroy_queue_args *args = data; |
| |
| pr_debug("Destroying queue id %d for pasid %d\n", |
| args->queue_id, |
| p->pasid); |
| |
| mutex_lock(&p->mutex); |
| |
| retval = pqm_destroy_queue(&p->pqm, args->queue_id); |
| |
| mutex_unlock(&p->mutex); |
| return retval; |
| } |
| |
| static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, |
| void *data) |
| { |
| int retval; |
| struct kfd_ioctl_update_queue_args *args = data; |
| struct queue_properties properties; |
| |
| if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { |
| pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); |
| return -EINVAL; |
| } |
| |
| if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { |
| pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); |
| return -EINVAL; |
| } |
| |
| if ((args->ring_base_address) && |
| (!access_ok(VERIFY_WRITE, |
| (const void __user *) args->ring_base_address, |
| sizeof(uint64_t)))) { |
| pr_err("Can't access ring base address\n"); |
| return -EFAULT; |
| } |
| |
| if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { |
| pr_err("Ring size must be a power of 2 or 0\n"); |
| return -EINVAL; |
| } |
| |
| properties.queue_address = args->ring_base_address; |
| properties.queue_size = args->ring_size; |
| properties.queue_percent = args->queue_percentage; |
| properties.priority = args->queue_priority; |
| |
| pr_debug("Updating queue id %d for pasid %d\n", |
| args->queue_id, p->pasid); |
| |
| mutex_lock(&p->mutex); |
| |
| retval = pqm_update_queue(&p->pqm, args->queue_id, &properties); |
| |
| mutex_unlock(&p->mutex); |
| |
| return retval; |
| } |
| |
| static int kfd_ioctl_set_memory_policy(struct file *filep, |
| struct kfd_process *p, void *data) |
| { |
| struct kfd_ioctl_set_memory_policy_args *args = data; |
| struct kfd_dev *dev; |
| int err = 0; |
| struct kfd_process_device *pdd; |
| enum cache_policy default_policy, alternate_policy; |
| |
| if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT |
| && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { |
| return -EINVAL; |
| } |
| |
| if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT |
| && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { |
| return -EINVAL; |
| } |
| |
| dev = kfd_device_by_id(args->gpu_id); |
| if (!dev) |
| return -EINVAL; |
| |
| mutex_lock(&p->mutex); |
| |
| pdd = kfd_bind_process_to_device(dev, p); |
| if (IS_ERR(pdd)) { |
| err = -ESRCH; |
| goto out; |
| } |
| |
| default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT) |
| ? cache_policy_coherent : cache_policy_noncoherent; |
| |
| alternate_policy = |
| (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) |
| ? cache_policy_coherent : cache_policy_noncoherent; |
| |
| if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm, |
| &pdd->qpd, |
| default_policy, |
| alternate_policy, |
| (void __user *)args->alternate_aperture_base, |
| args->alternate_aperture_size)) |
| err = -EINVAL; |
| |
| out: |
| mutex_unlock(&p->mutex); |
| |
| return err; |
| } |
| |
| static int kfd_ioctl_set_trap_handler(struct file *filep, |
| struct kfd_process *p, void *data) |
| { |
| struct kfd_ioctl_set_trap_handler_args *args = data; |
| struct kfd_dev *dev; |
| int err = 0; |
| struct kfd_process_device *pdd; |
| |
| dev = kfd_device_by_id(args->gpu_id); |
| if (dev == NULL) |
| return -EINVAL; |
| |
| mutex_lock(&p->mutex); |
| |
| pdd = kfd_bind_process_to_device(dev, p); |
| if (IS_ERR(pdd)) { |
| err = -ESRCH; |
| goto out; |
| } |
| |
| if (dev->dqm->ops.set_trap_handler(dev->dqm, |
| &pdd->qpd, |
| args->tba_addr, |
| args->tma_addr)) |
| err = -EINVAL; |
| |
| out: |
| mutex_unlock(&p->mutex); |
| |
| return err; |
| } |
| |
| static int kfd_ioctl_dbg_register(struct file *filep, |
| struct kfd_process *p, void *data) |
| { |
| struct kfd_ioctl_dbg_register_args *args = data; |
| struct kfd_dev *dev; |
| struct kfd_dbgmgr *dbgmgr_ptr; |
| struct kfd_process_device *pdd; |
| bool create_ok; |
| long status = 0; |
| |
| dev = kfd_device_by_id(args->gpu_id); |
| if (!dev) |
| return -EINVAL; |
| |
| if (dev->device_info->asic_family == CHIP_CARRIZO) { |
| pr_debug("kfd_ioctl_dbg_register not supported on CZ\n"); |
| return -EINVAL; |
| } |
| |
| mutex_lock(&p->mutex); |
| mutex_lock(kfd_get_dbgmgr_mutex()); |
| |
| /* |
| * make sure that we have pdd, if this the first queue created for |
| * this process |
| */ |
| pdd = kfd_bind_process_to_device(dev, p); |
| if (IS_ERR(pdd)) { |
| status = PTR_ERR(pdd); |
| goto out; |
| } |
| |
| if (!dev->dbgmgr) { |
| /* In case of a legal call, we have no dbgmgr yet */ |
| create_ok = kfd_dbgmgr_create(&dbgmgr_ptr, dev); |
| if (create_ok) { |
| status = kfd_dbgmgr_register(dbgmgr_ptr, p); |
| if (status != 0) |
| kfd_dbgmgr_destroy(dbgmgr_ptr); |
| else |
| dev->dbgmgr = dbgmgr_ptr; |
| } |
| } else { |
| pr_debug("debugger already registered\n"); |
| status = -EINVAL; |
| } |
| |
| out: |
| mutex_unlock(kfd_get_dbgmgr_mutex()); |
| mutex_unlock(&p->mutex); |
| |
| return status; |
| } |
| |
| static int kfd_ioctl_dbg_unregister(struct file *filep, |
| struct kfd_process *p, void *data) |
| { |
| struct kfd_ioctl_dbg_unregister_args *args = data; |
| struct kfd_dev *dev; |
| long status; |
| |
| dev = kfd_device_by_id(args->gpu_id); |
| if (!dev || !dev->dbgmgr) |
| return -EINVAL; |
| |
| if (dev->device_info->asic_family == CHIP_CARRIZO) { |
| pr_debug("kfd_ioctl_dbg_unregister not supported on CZ\n"); |
| return -EINVAL; |
| } |
| |
| mutex_lock(kfd_get_dbgmgr_mutex()); |
| |
| status = kfd_dbgmgr_unregister(dev->dbgmgr, p); |
| if (!status) { |
| kfd_dbgmgr_destroy(dev->dbgmgr); |
| dev->dbgmgr = NULL; |
| } |
| |
| mutex_unlock(kfd_get_dbgmgr_mutex()); |
| |
| return status; |
| } |
| |
| /* |
| * Parse and generate variable size data structure for address watch. |
| * Total size of the buffer and # watch points is limited in order |
| * to prevent kernel abuse. (no bearing to the much smaller HW limitation |
| * which is enforced by dbgdev module) |
| * please also note that the watch address itself are not "copied from user", |
| * since it be set into the HW in user mode values. |
| * |
| */ |
| static int kfd_ioctl_dbg_address_watch(struct file *filep, |
| struct kfd_process *p, void *data) |
| { |
| struct kfd_ioctl_dbg_address_watch_args *args = data; |
| struct kfd_dev *dev; |
| struct dbg_address_watch_info aw_info; |
| unsigned char *args_buff; |
| long status; |
| void __user *cmd_from_user; |
| uint64_t watch_mask_value = 0; |
| unsigned int args_idx = 0; |
| |
| memset((void *) &aw_info, 0, sizeof(struct dbg_address_watch_info)); |
| |
| dev = kfd_device_by_id(args->gpu_id); |
| if (!dev) |
| return -EINVAL; |
| |
| if (dev->device_info->asic_family == CHIP_CARRIZO) { |
| pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n"); |
| return -EINVAL; |
| } |
| |
| cmd_from_user = (void __user *) args->content_ptr; |
| |
| /* Validate arguments */ |
| |
| if ((args->buf_size_in_bytes > MAX_ALLOWED_AW_BUFF_SIZE) || |
| (args->buf_size_in_bytes <= sizeof(*args) + sizeof(int) * 2) || |
| (cmd_from_user == NULL)) |
| return -EINVAL; |
| |
| /* this is the actual buffer to work with */ |
| args_buff = memdup_user(cmd_from_user, |
| args->buf_size_in_bytes - sizeof(*args)); |
| if (IS_ERR(args_buff)) |
| return PTR_ERR(args_buff); |
| |
| aw_info.process = p; |
| |
| aw_info.num_watch_points = *((uint32_t *)(&args_buff[args_idx])); |
| args_idx += sizeof(aw_info.num_watch_points); |
| |
| aw_info.watch_mode = (enum HSA_DBG_WATCH_MODE *) &args_buff[args_idx]; |
| args_idx += sizeof(enum HSA_DBG_WATCH_MODE) * aw_info.num_watch_points; |
| |
| /* |
| * set watch address base pointer to point on the array base |
| * within args_buff |
| */ |
| aw_info.watch_address = (uint64_t *) &args_buff[args_idx]; |
| |
| /* skip over the addresses buffer */ |
| args_idx += sizeof(aw_info.watch_address) * aw_info.num_watch_points; |
| |
| if (args_idx >= args->buf_size_in_bytes - sizeof(*args)) { |
| status = -EINVAL; |
| goto out; |
| } |
| |
| watch_mask_value = (uint64_t) args_buff[args_idx]; |
| |
| if (watch_mask_value > 0) { |
| /* |
| * There is an array of masks. |
| * set watch mask base pointer to point on the array base |
| * within args_buff |
| */ |
| aw_info.watch_mask = (uint64_t *) &args_buff[args_idx]; |
| |
| /* skip over the masks buffer */ |
| args_idx += sizeof(aw_info.watch_mask) * |
| aw_info.num_watch_points; |
| } else { |
| /* just the NULL mask, set to NULL and skip over it */ |
| aw_info.watch_mask = NULL; |
| args_idx += sizeof(aw_info.watch_mask); |
| } |
| |
| if (args_idx >= args->buf_size_in_bytes - sizeof(args)) { |
| status = -EINVAL; |
| goto out; |
| } |
| |
| /* Currently HSA Event is not supported for DBG */ |
| aw_info.watch_event = NULL; |
| |
| mutex_lock(kfd_get_dbgmgr_mutex()); |
| |
| status = kfd_dbgmgr_address_watch(dev->dbgmgr, &aw_info); |
| |
| mutex_unlock(kfd_get_dbgmgr_mutex()); |
| |
| out: |
| kfree(args_buff); |
| |
| return status; |
| } |
| |
| /* Parse and generate fixed size data structure for wave control */ |
| static int kfd_ioctl_dbg_wave_control(struct file *filep, |
| struct kfd_process *p, void *data) |
| { |
| struct kfd_ioctl_dbg_wave_control_args *args = data; |
| struct kfd_dev *dev; |
| struct dbg_wave_control_info wac_info; |
| unsigned char *args_buff; |
| uint32_t computed_buff_size; |
| long status; |
| void __user *cmd_from_user; |
| unsigned int args_idx = 0; |
| |
| memset((void *) &wac_info, 0, sizeof(struct dbg_wave_control_info)); |
| |
| /* we use compact form, independent of the packing attribute value */ |
| computed_buff_size = sizeof(*args) + |
| sizeof(wac_info.mode) + |
| sizeof(wac_info.operand) + |
| sizeof(wac_info.dbgWave_msg.DbgWaveMsg) + |
| sizeof(wac_info.dbgWave_msg.MemoryVA) + |
| sizeof(wac_info.trapId); |
| |
| dev = kfd_device_by_id(args->gpu_id); |
| if (!dev) |
| return -EINVAL; |
| |
| if (dev->device_info->asic_family == CHIP_CARRIZO) { |
| pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n"); |
| return -EINVAL; |
| } |
| |
| /* input size must match the computed "compact" size */ |
| if (args->buf_size_in_bytes != computed_buff_size) { |
| pr_debug("size mismatch, computed : actual %u : %u\n", |
| args->buf_size_in_bytes, computed_buff_size); |
| return -EINVAL; |
| } |
| |
| cmd_from_user = (void __user *) args->content_ptr; |
| |
| if (cmd_from_user == NULL) |
| return -EINVAL; |
| |
| /* copy the entire buffer from user */ |
| |
| args_buff = memdup_user(cmd_from_user, |
| args->buf_size_in_bytes - sizeof(*args)); |
| if (IS_ERR(args_buff)) |
| return PTR_ERR(args_buff); |
| |
| /* move ptr to the start of the "pay-load" area */ |
| wac_info.process = p; |
| |
| wac_info.operand = *((enum HSA_DBG_WAVEOP *)(&args_buff[args_idx])); |
| args_idx += sizeof(wac_info.operand); |
| |
| wac_info.mode = *((enum HSA_DBG_WAVEMODE *)(&args_buff[args_idx])); |
| args_idx += sizeof(wac_info.mode); |
| |
| wac_info.trapId = *((uint32_t *)(&args_buff[args_idx])); |
| args_idx += sizeof(wac_info.trapId); |
| |
| wac_info.dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value = |
| *((uint32_t *)(&args_buff[args_idx])); |
| wac_info.dbgWave_msg.MemoryVA = NULL; |
| |
| mutex_lock(kfd_get_dbgmgr_mutex()); |
| |
| pr_debug("Calling dbg manager process %p, operand %u, mode %u, trapId %u, message %u\n", |
| wac_info.process, wac_info.operand, |
| wac_info.mode, wac_info.trapId, |
| wac_info.dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value); |
| |
| status = kfd_dbgmgr_wave_control(dev->dbgmgr, &wac_info); |
| |
| pr_debug("Returned status of dbg manager is %ld\n", status); |
| |
| mutex_unlock(kfd_get_dbgmgr_mutex()); |
| |
| kfree(args_buff); |
| |
| return status; |
| } |
| |
| static int kfd_ioctl_get_clock_counters(struct file *filep, |
| struct kfd_process *p, void *data) |
| { |
| struct kfd_ioctl_get_clock_counters_args *args = data; |
| struct kfd_dev *dev; |
| struct timespec64 time; |
| |
| dev = kfd_device_by_id(args->gpu_id); |
| if (dev) |
| /* Reading GPU clock counter from KGD */ |
| args->gpu_clock_counter = |
| dev->kfd2kgd->get_gpu_clock_counter(dev->kgd); |
| else |
| /* Node without GPU resource */ |
| args->gpu_clock_counter = 0; |
| |
| /* No access to rdtsc. Using raw monotonic time */ |
| getrawmonotonic64(&time); |
| args->cpu_clock_counter = (uint64_t)timespec64_to_ns(&time); |
| |
| get_monotonic_boottime64(&time); |
| args->system_clock_counter = (uint64_t)timespec64_to_ns(&time); |
| |
| /* Since the counter is in nano-seconds we use 1GHz frequency */ |
| args->system_clock_freq = 1000000000; |
| |
| return 0; |
| } |
| |
| |
| static int kfd_ioctl_get_process_apertures(struct file *filp, |
| struct kfd_process *p, void *data) |
| { |
| struct kfd_ioctl_get_process_apertures_args *args = data; |
| struct kfd_process_device_apertures *pAperture; |
| struct kfd_process_device *pdd; |
| |
| dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid); |
| |
| args->num_of_nodes = 0; |
| |
| mutex_lock(&p->mutex); |
| |
| /*if the process-device list isn't empty*/ |
| if (kfd_has_process_device_data(p)) { |
| /* Run over all pdd of the process */ |
| pdd = kfd_get_first_process_device_data(p); |
| do { |
| pAperture = |
| &args->process_apertures[args->num_of_nodes]; |
| pAperture->gpu_id = pdd->dev->id; |
| pAperture->lds_base = pdd->lds_base; |
| pAperture->lds_limit = pdd->lds_limit; |
| pAperture->gpuvm_base = pdd->gpuvm_base; |
| pAperture->gpuvm_limit = pdd->gpuvm_limit; |
| pAperture->scratch_base = pdd->scratch_base; |
| pAperture->scratch_limit = pdd->scratch_limit; |
| |
| dev_dbg(kfd_device, |
| "node id %u\n", args->num_of_nodes); |
| dev_dbg(kfd_device, |
| "gpu id %u\n", pdd->dev->id); |
| dev_dbg(kfd_device, |
| "lds_base %llX\n", pdd->lds_base); |
| dev_dbg(kfd_device, |
| "lds_limit %llX\n", pdd->lds_limit); |
| dev_dbg(kfd_device, |
| "gpuvm_base %llX\n", pdd->gpuvm_base); |
| dev_dbg(kfd_device, |
| "gpuvm_limit %llX\n", pdd->gpuvm_limit); |
| dev_dbg(kfd_device, |
| "scratch_base %llX\n", pdd->scratch_base); |
| dev_dbg(kfd_device, |
| "scratch_limit %llX\n", pdd->scratch_limit); |
| |
| args->num_of_nodes++; |
| |
| pdd = kfd_get_next_process_device_data(p, pdd); |
| } while (pdd && (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS)); |
| } |
| |
| mutex_unlock(&p->mutex); |
| |
| return 0; |
| } |
| |
| static int kfd_ioctl_get_process_apertures_new(struct file *filp, |
| struct kfd_process *p, void *data) |
| { |
| struct kfd_ioctl_get_process_apertures_new_args *args = data; |
| struct kfd_process_device_apertures *pa; |
| struct kfd_process_device *pdd; |
| uint32_t nodes = 0; |
| int ret; |
| |
| dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid); |
| |
| if (args->num_of_nodes == 0) { |
| /* Return number of nodes, so that user space can alloacate |
| * sufficient memory |
| */ |
| mutex_lock(&p->mutex); |
| |
| if (!kfd_has_process_device_data(p)) |
| goto out_unlock; |
| |
| /* Run over all pdd of the process */ |
| pdd = kfd_get_first_process_device_data(p); |
| do { |
| args->num_of_nodes++; |
| pdd = kfd_get_next_process_device_data(p, pdd); |
| } while (pdd); |
| |
| goto out_unlock; |
| } |
| |
| /* Fill in process-aperture information for all available |
| * nodes, but not more than args->num_of_nodes as that is |
| * the amount of memory allocated by user |
| */ |
| pa = kzalloc((sizeof(struct kfd_process_device_apertures) * |
| args->num_of_nodes), GFP_KERNEL); |
| if (!pa) |
| return -ENOMEM; |
| |
| mutex_lock(&p->mutex); |
| |
| if (!kfd_has_process_device_data(p)) { |
| args->num_of_nodes = 0; |
| kfree(pa); |
| goto out_unlock; |
| } |
| |
| /* Run over all pdd of the process */ |
| pdd = kfd_get_first_process_device_data(p); |
| do { |
| pa[nodes].gpu_id = pdd->dev->id; |
| pa[nodes].lds_base = pdd->lds_base; |
| pa[nodes].lds_limit = pdd->lds_limit; |
| pa[nodes].gpuvm_base = pdd->gpuvm_base; |
| pa[nodes].gpuvm_limit = pdd->gpuvm_limit; |
| pa[nodes].scratch_base = pdd->scratch_base; |
| pa[nodes].scratch_limit = pdd->scratch_limit; |
| |
| dev_dbg(kfd_device, |
| "gpu id %u\n", pdd->dev->id); |
| dev_dbg(kfd_device, |
| "lds_base %llX\n", pdd->lds_base); |
| dev_dbg(kfd_device, |
| "lds_limit %llX\n", pdd->lds_limit); |
| dev_dbg(kfd_device, |
| "gpuvm_base %llX\n", pdd->gpuvm_base); |
| dev_dbg(kfd_device, |
| "gpuvm_limit %llX\n", pdd->gpuvm_limit); |
| dev_dbg(kfd_device, |
| "scratch_base %llX\n", pdd->scratch_base); |
| dev_dbg(kfd_device, |
| "scratch_limit %llX\n", pdd->scratch_limit); |
| nodes++; |
| |
| pdd = kfd_get_next_process_device_data(p, pdd); |
| } while (pdd && (nodes < args->num_of_nodes)); |
| mutex_unlock(&p->mutex); |
| |
| args->num_of_nodes = nodes; |
| ret = copy_to_user( |
| (void __user *)args->kfd_process_device_apertures_ptr, |
| pa, |
| (nodes * sizeof(struct kfd_process_device_apertures))); |
| kfree(pa); |
| return ret ? -EFAULT : 0; |
| |
| out_unlock: |
| mutex_unlock(&p->mutex); |
| return 0; |
| } |
| |
| static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p, |
| void *data) |
| { |
| struct kfd_ioctl_create_event_args *args = data; |
| int err; |
| |
| /* For dGPUs the event page is allocated in user mode. The |
| * handle is passed to KFD with the first call to this IOCTL |
| * through the event_page_offset field. |
| */ |
| if (args->event_page_offset) { |
| struct kfd_dev *kfd; |
| struct kfd_process_device *pdd; |
| void *mem, *kern_addr; |
| uint64_t size; |
| |
| if (p->signal_page) { |
| pr_err("Event page is already set\n"); |
| return -EINVAL; |
| } |
| |
| kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset)); |
| if (!kfd) { |
| pr_err("Getting device by id failed in %s\n", __func__); |
| return -EINVAL; |
| } |
| |
| mutex_lock(&p->mutex); |
| pdd = kfd_bind_process_to_device(kfd, p); |
| if (IS_ERR(pdd)) { |
| err = PTR_ERR(pdd); |
| goto out_unlock; |
| } |
| |
| mem = kfd_process_device_translate_handle(pdd, |
| GET_IDR_HANDLE(args->event_page_offset)); |
| if (!mem) { |
| pr_err("Can't find BO, offset is 0x%llx\n", |
| args->event_page_offset); |
| err = -EINVAL; |
| goto out_unlock; |
| } |
| mutex_unlock(&p->mutex); |
| |
| err = kfd->kfd2kgd->map_gtt_bo_to_kernel(kfd->kgd, |
| mem, &kern_addr, &size); |
| if (err) { |
| pr_err("Failed to map event page to kernel\n"); |
| return err; |
| } |
| |
| err = kfd_event_page_set(p, kern_addr, size); |
| if (err) { |
| pr_err("Failed to set event page\n"); |
| return err; |
| } |
| } |
| |
| err = kfd_event_create(filp, p, args->event_type, |
| args->auto_reset != 0, args->node_id, |
| &args->event_id, &args->event_trigger_data, |
| &args->event_page_offset, |
| &args->event_slot_index); |
| |
| return err; |
| |
| out_unlock: |
| mutex_unlock(&p->mutex); |
| return err; |
| } |
| |
| static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p, |
| void *data) |
| { |
| struct kfd_ioctl_destroy_event_args *args = data; |
| |
| return kfd_event_destroy(p, args->event_id); |
| } |
| |
| static int kfd_ioctl_set_event(struct file *filp, struct kfd_process *p, |
| void *data) |
| { |
| struct kfd_ioctl_set_event_args *args = data; |
| |
| return kfd_set_event(p, args->event_id); |
| } |
| |
| static int kfd_ioctl_reset_event(struct file *filp, struct kfd_process *p, |
| void *data) |
| { |
| struct kfd_ioctl_reset_event_args *args = data; |
| |
| return kfd_reset_event(p, args->event_id); |
| } |
| |
| static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p, |
| void *data) |
| { |
| struct kfd_ioctl_wait_events_args *args = data; |
| int err; |
| |
| err = kfd_wait_on_events(p, args->num_events, |
| (void __user *)args->events_ptr, |
| (args->wait_for_all != 0), |
| args->timeout, &args->wait_result); |
| |
| return err; |
| } |
| static int kfd_ioctl_set_scratch_backing_va(struct file *filep, |
| struct kfd_process *p, void *data) |
| { |
| struct kfd_ioctl_set_scratch_backing_va_args *args = data; |
| struct kfd_process_device *pdd; |
| struct kfd_dev *dev; |
| long err; |
| |
| dev = kfd_device_by_id(args->gpu_id); |
| if (!dev) |
| return -EINVAL; |
| |
| mutex_lock(&p->mutex); |
| |
| pdd = kfd_bind_process_to_device(dev, p); |
| if (IS_ERR(pdd)) { |
| err = PTR_ERR(pdd); |
| goto bind_process_to_device_fail; |
| } |
| |
| pdd->qpd.sh_hidden_private_base = args->va_addr; |
| |
| mutex_unlock(&p->mutex); |
| |
| if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS && |
| pdd->qpd.vmid != 0) |
| dev->kfd2kgd->set_scratch_backing_va( |
| dev->kgd, args->va_addr, pdd->qpd.vmid); |
| |
| return 0; |
| |
| bind_process_to_device_fail: |
| mutex_unlock(&p->mutex); |
| return err; |
| } |
| |
| static int kfd_ioctl_get_tile_config(struct file *filep, |
| struct kfd_process *p, void *data) |
| { |
| struct kfd_ioctl_get_tile_config_args *args = data; |
| struct kfd_dev *dev; |
| struct tile_config config; |
| int err = 0; |
| |
| dev = kfd_device_by_id(args->gpu_id); |
| if (!dev) |
| return -EINVAL; |
| |
| dev->kfd2kgd->get_tile_config(dev->kgd, &config); |
| |
| args->gb_addr_config = config.gb_addr_config; |
| args->num_banks = config.num_banks; |
| args->num_ranks = config.num_ranks; |
| |
| if (args->num_tile_configs > config.num_tile_configs) |
| args->num_tile_configs = config.num_tile_configs; |
| err = copy_to_user((void __user *)args->tile_config_ptr, |
| config.tile_config_ptr, |
| args->num_tile_configs * sizeof(uint32_t)); |
| if (err) { |
| args->num_tile_configs = 0; |
| return -EFAULT; |
| } |
| |
| if (args->num_macro_tile_configs > config.num_macro_tile_configs) |
| args->num_macro_tile_configs = |
| config.num_macro_tile_configs; |
| err = copy_to_user((void __user *)args->macro_tile_config_ptr, |
| config.macro_tile_config_ptr, |
| args->num_macro_tile_configs * sizeof(uint32_t)); |
| if (err) { |
| args->num_macro_tile_configs = 0; |
| return -EFAULT; |
| } |
| |
| return 0; |
| } |
| |
| static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p, |
| void *data) |
| { |
| struct kfd_ioctl_acquire_vm_args *args = data; |
| struct kfd_process_device *pdd; |
| struct kfd_dev *dev; |
| struct file *drm_file; |
| int ret; |
| |
| dev = kfd_device_by_id(args->gpu_id); |
| if (!dev) |
| return -EINVAL; |
| |
| drm_file = fget(args->drm_fd); |
| if (!drm_file) |
| return -EINVAL; |
| |
| mutex_lock(&p->mutex); |
| |
| pdd = kfd_get_process_device_data(dev, p); |
| if (!pdd) { |
| ret = -EINVAL; |
| goto err_unlock; |
| } |
| |
| if (pdd->drm_file) { |
| ret = pdd->drm_file == drm_file ? 0 : -EBUSY; |
| goto err_unlock; |
| } |
| |
| ret = kfd_process_device_init_vm(pdd, drm_file); |
| if (ret) |
| goto err_unlock; |
| /* On success, the PDD keeps the drm_file reference */ |
| mutex_unlock(&p->mutex); |
| |
| return 0; |
| |
| err_unlock: |
| mutex_unlock(&p->mutex); |
| fput(drm_file); |
| return ret; |
| } |
| |
| static bool kfd_dev_is_large_bar(struct kfd_dev *dev) |
| { |
| struct kfd_local_mem_info mem_info; |
| |
| if (debug_largebar) { |
| pr_debug("Simulate large-bar allocation on non large-bar machine\n"); |
| return true; |
| } |
| |
| if (dev->device_info->needs_iommu_device) |
| return false; |
| |
| dev->kfd2kgd->get_local_mem_info(dev->kgd, &mem_info); |
| if (mem_info.local_mem_size_private == 0 && |
| mem_info.local_mem_size_public > 0) |
| return true; |
| return false; |
| } |
| |
| static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, |
| struct kfd_process *p, void *data) |
| { |
| struct kfd_ioctl_alloc_memory_of_gpu_args *args = data; |
| struct kfd_process_device *pdd; |
| void *mem; |
| struct kfd_dev *dev; |
| int idr_handle; |
| long err; |
| uint64_t offset = args->mmap_offset; |
| uint32_t flags = args->flags; |
| |
| if (args->size == 0) |
| return -EINVAL; |
| |
| dev = kfd_device_by_id(args->gpu_id); |
| if (!dev) |
| return -EINVAL; |
| |
| if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) && |
| (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) && |
| !kfd_dev_is_large_bar(dev)) { |
| pr_err("Alloc host visible vram on small bar is not allowed\n"); |
| return -EINVAL; |
| } |
| |
| mutex_lock(&p->mutex); |
| |
| pdd = kfd_bind_process_to_device(dev, p); |
| if (IS_ERR(pdd)) { |
| err = PTR_ERR(pdd); |
| goto err_unlock; |
| } |
| |
| err = dev->kfd2kgd->alloc_memory_of_gpu( |
| dev->kgd, args->va_addr, args->size, |
| pdd->vm, (struct kgd_mem **) &mem, &offset, |
| flags); |
| |
| if (err) |
| goto err_unlock; |
| |
| idr_handle = kfd_process_device_create_obj_handle(pdd, mem); |
| if (idr_handle < 0) { |
| err = -EFAULT; |
| goto err_free; |
| } |
| |
| mutex_unlock(&p->mutex); |
| |
| args->handle = MAKE_HANDLE(args->gpu_id, idr_handle); |
| args->mmap_offset = offset; |
| |
| return 0; |
| |
| err_free: |
| dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem); |
| err_unlock: |
| mutex_unlock(&p->mutex); |
| return err; |
| } |
| |
| static int kfd_ioctl_free_memory_of_gpu(struct file *filep, |
| struct kfd_process *p, void *data) |
| { |
| struct kfd_ioctl_free_memory_of_gpu_args *args = data; |
| struct kfd_process_device *pdd; |
| void *mem; |
| struct kfd_dev *dev; |
| int ret; |
| |
| dev = kfd_device_by_id(GET_GPU_ID(args->handle)); |
| if (!dev) |
| return -EINVAL; |
| |
| mutex_lock(&p->mutex); |
| |
| pdd = kfd_get_process_device_data(dev, p); |
| if (!pdd) { |
| pr_err("Process device data doesn't exist\n"); |
| ret = -EINVAL; |
| goto err_unlock; |
| } |
| |
| mem = kfd_process_device_translate_handle( |
| pdd, GET_IDR_HANDLE(args->handle)); |
| if (!mem) { |
| ret = -EINVAL; |
| goto err_unlock; |
| } |
| |
| ret = dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem); |
| |
| /* If freeing the buffer failed, leave the handle in place for |
| * clean-up during process tear-down. |
| */ |
| if (!ret) |
| kfd_process_device_remove_obj_handle( |
| pdd, GET_IDR_HANDLE(args->handle)); |
| |
| err_unlock: |
| mutex_unlock(&p->mutex); |
| return ret; |
| } |
| |
| static int kfd_ioctl_map_memory_to_gpu(struct file *filep, |
| struct kfd_process *p, void *data) |
| { |
| struct kfd_ioctl_map_memory_to_gpu_args *args = data; |
| struct kfd_process_device *pdd, *peer_pdd; |
| void *mem; |
| struct kfd_dev *dev, *peer; |
| long err = 0; |
| int i; |
| uint32_t *devices_arr = NULL; |
| |
| dev = kfd_device_by_id(GET_GPU_ID(args->handle)); |
| if (!dev) |
| return -EINVAL; |
| |
| if (!args->n_devices) { |
| pr_debug("Device IDs array empty\n"); |
| return -EINVAL; |
| } |
| if (args->n_success > args->n_devices) { |
| pr_debug("n_success exceeds n_devices\n"); |
| return -EINVAL; |
| } |
| |
| devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr), |
| GFP_KERNEL); |
| if (!devices_arr) |
| return -ENOMEM; |
| |
| err = copy_from_user(devices_arr, |
| (void __user *)args->device_ids_array_ptr, |
| args->n_devices * sizeof(*devices_arr)); |
| if (err != 0) { |
| err = -EFAULT; |
| goto copy_from_user_failed; |
| } |
| |
| mutex_lock(&p->mutex); |
| |
| pdd = kfd_bind_process_to_device(dev, p); |
| if (IS_ERR(pdd)) { |
| err = PTR_ERR(pdd); |
| goto bind_process_to_device_failed; |
| } |
| |
| mem = kfd_process_device_translate_handle(pdd, |
| GET_IDR_HANDLE(args->handle)); |
| if (!mem) { |
| err = -ENOMEM; |
| goto get_mem_obj_from_handle_failed; |
| } |
| |
| for (i = args->n_success; i < args->n_devices; i++) { |
| peer = kfd_device_by_id(devices_arr[i]); |
| if (!peer) { |
| pr_debug("Getting device by id failed for 0x%x\n", |
| devices_arr[i]); |
| err = -EINVAL; |
| goto get_mem_obj_from_handle_failed; |
| } |
| |
| peer_pdd = kfd_bind_process_to_device(peer, p); |
| if (IS_ERR(peer_pdd)) { |
| err = PTR_ERR(peer_pdd); |
| goto get_mem_obj_from_handle_failed; |
| } |
| err = peer->kfd2kgd->map_memory_to_gpu( |
| peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm); |
| if (err) { |
| pr_err("Failed to map to gpu %d/%d\n", |
| i, args->n_devices); |
| goto map_memory_to_gpu_failed; |
| } |
| args->n_success = i+1; |
| } |
| |
| mutex_unlock(&p->mutex); |
| |
| err = dev->kfd2kgd->sync_memory(dev->kgd, (struct kgd_mem *) mem, true); |
| if (err) { |
| pr_debug("Sync memory failed, wait interrupted by user signal\n"); |
| goto sync_memory_failed; |
| } |
| |
| /* Flush TLBs after waiting for the page table updates to complete */ |
| for (i = 0; i < args->n_devices; i++) { |
| peer = kfd_device_by_id(devices_arr[i]); |
| if (WARN_ON_ONCE(!peer)) |
| continue; |
| peer_pdd = kfd_get_process_device_data(peer, p); |
| if (WARN_ON_ONCE(!peer_pdd)) |
| continue; |
| kfd_flush_tlb(peer_pdd); |
| } |
| |
| kfree(devices_arr); |
| |
| return err; |
| |
| bind_process_to_device_failed: |
| get_mem_obj_from_handle_failed: |
| map_memory_to_gpu_failed: |
| mutex_unlock(&p->mutex); |
| copy_from_user_failed: |
| sync_memory_failed: |
| kfree(devices_arr); |
| |
| return err; |
| } |
| |
| static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, |
| struct kfd_process *p, void *data) |
| { |
| struct kfd_ioctl_unmap_memory_from_gpu_args *args = data; |
| struct kfd_process_device *pdd, *peer_pdd; |
| void *mem; |
| struct kfd_dev *dev, *peer; |
| long err = 0; |
| uint32_t *devices_arr = NULL, i; |
| |
| dev = kfd_device_by_id(GET_GPU_ID(args->handle)); |
| if (!dev) |
| return -EINVAL; |
| |
| if (!args->n_devices) { |
| pr_debug("Device IDs array empty\n"); |
| return -EINVAL; |
| } |
| if (args->n_success > args->n_devices) { |
| pr_debug("n_success exceeds n_devices\n"); |
| return -EINVAL; |
| } |
| |
| devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr), |
| GFP_KERNEL); |
| if (!devices_arr) |
| return -ENOMEM; |
| |
| err = copy_from_user(devices_arr, |
| (void __user *)args->device_ids_array_ptr, |
| args->n_devices * sizeof(*devices_arr)); |
| if (err != 0) { |
| err = -EFAULT; |
| goto copy_from_user_failed; |
| } |
| |
| mutex_lock(&p->mutex); |
| |
| pdd = kfd_get_process_device_data(dev, p); |
| if (!pdd) { |
| err = -EINVAL; |
| goto bind_process_to_device_failed; |
| } |
| |
| mem = kfd_process_device_translate_handle(pdd, |
| GET_IDR_HANDLE(args->handle)); |
| if (!mem) { |
| err = -ENOMEM; |
| goto get_mem_obj_from_handle_failed; |
| } |
| |
| for (i = args->n_success; i < args->n_devices; i++) { |
| peer = kfd_device_by_id(devices_arr[i]); |
| if (!peer) { |
| err = -EINVAL; |
| goto get_mem_obj_from_handle_failed; |
| } |
| |
| peer_pdd = kfd_get_process_device_data(peer, p); |
| if (!peer_pdd) { |
| err = -ENODEV; |
| goto get_mem_obj_from_handle_failed; |
| } |
| err = dev->kfd2kgd->unmap_memory_to_gpu( |
| peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm); |
| if (err) { |
| pr_err("Failed to unmap from gpu %d/%d\n", |
| i, args->n_devices); |
| goto unmap_memory_from_gpu_failed; |
| } |
| args->n_success = i+1; |
| } |
| kfree(devices_arr); |
| |
| mutex_unlock(&p->mutex); |
| |
| return 0; |
| |
| bind_process_to_device_failed: |
| get_mem_obj_from_handle_failed: |
| unmap_memory_from_gpu_failed: |
| mutex_unlock(&p->mutex); |
| copy_from_user_failed: |
| kfree(devices_arr); |
| return err; |
| } |
| |
| #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \ |
| [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \ |
| .cmd_drv = 0, .name = #ioctl} |
| |
| /** Ioctl table */ |
| static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION, |
| kfd_ioctl_get_version, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE, |
| kfd_ioctl_create_queue, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE, |
| kfd_ioctl_destroy_queue, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY, |
| kfd_ioctl_set_memory_policy, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS, |
| kfd_ioctl_get_clock_counters, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES, |
| kfd_ioctl_get_process_apertures, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE, |
| kfd_ioctl_update_queue, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_EVENT, |
| kfd_ioctl_create_event, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_EVENT, |
| kfd_ioctl_destroy_event, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_EVENT, |
| kfd_ioctl_set_event, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_RESET_EVENT, |
| kfd_ioctl_reset_event, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_WAIT_EVENTS, |
| kfd_ioctl_wait_events, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER, |
| kfd_ioctl_dbg_register, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER, |
| kfd_ioctl_dbg_unregister, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH, |
| kfd_ioctl_dbg_address_watch, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL, |
| kfd_ioctl_dbg_wave_control, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA, |
| kfd_ioctl_set_scratch_backing_va, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG, |
| kfd_ioctl_get_tile_config, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER, |
| kfd_ioctl_set_trap_handler, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW, |
| kfd_ioctl_get_process_apertures_new, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM, |
| kfd_ioctl_acquire_vm, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_GPU, |
| kfd_ioctl_alloc_memory_of_gpu, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_FREE_MEMORY_OF_GPU, |
| kfd_ioctl_free_memory_of_gpu, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_MAP_MEMORY_TO_GPU, |
| kfd_ioctl_map_memory_to_gpu, 0), |
| |
| AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU, |
| kfd_ioctl_unmap_memory_from_gpu, 0), |
| |
| }; |
| |
| #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls) |
| |
| static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) |
| { |
| struct kfd_process *process; |
| amdkfd_ioctl_t *func; |
| const struct amdkfd_ioctl_desc *ioctl = NULL; |
| unsigned int nr = _IOC_NR(cmd); |
| char stack_kdata[128]; |
| char *kdata = NULL; |
| unsigned int usize, asize; |
| int retcode = -EINVAL; |
| |
| if (nr >= AMDKFD_CORE_IOCTL_COUNT) |
| goto err_i1; |
| |
| if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) { |
| u32 amdkfd_size; |
| |
| ioctl = &amdkfd_ioctls[nr]; |
| |
| amdkfd_size = _IOC_SIZE(ioctl->cmd); |
| usize = asize = _IOC_SIZE(cmd); |
| if (amdkfd_size > asize) |
| asize = amdkfd_size; |
| |
| cmd = ioctl->cmd; |
| } else |
| goto err_i1; |
| |
| dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg); |
| |
| process = kfd_get_process(current); |
| if (IS_ERR(process)) { |
| dev_dbg(kfd_device, "no process\n"); |
| goto err_i1; |
| } |
| |
| /* Do not trust userspace, use our own definition */ |
| func = ioctl->func; |
| |
| if (unlikely(!func)) { |
| dev_dbg(kfd_device, "no function\n"); |
| retcode = -EINVAL; |
| goto err_i1; |
| } |
| |
| if (cmd & (IOC_IN | IOC_OUT)) { |
| if (asize <= sizeof(stack_kdata)) { |
| kdata = stack_kdata; |
| } else { |
| kdata = kmalloc(asize, GFP_KERNEL); |
| if (!kdata) { |
| retcode = -ENOMEM; |
| goto err_i1; |
| } |
| } |
| if (asize > usize) |
| memset(kdata + usize, 0, asize - usize); |
| } |
| |
| if (cmd & IOC_IN) { |
| if (copy_from_user(kdata, (void __user *)arg, usize) != 0) { |
| retcode = -EFAULT; |
| goto err_i1; |
| } |
| } else if (cmd & IOC_OUT) { |
| memset(kdata, 0, usize); |
| } |
| |
| retcode = func(filep, process, kdata); |
| |
| if (cmd & IOC_OUT) |
| if (copy_to_user((void __user *)arg, kdata, usize) != 0) |
| retcode = -EFAULT; |
| |
| err_i1: |
| if (!ioctl) |
| dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n", |
| task_pid_nr(current), cmd, nr); |
| |
| if (kdata != stack_kdata) |
| kfree(kdata); |
| |
| if (retcode) |
| dev_dbg(kfd_device, "ret = %d\n", retcode); |
| |
| return retcode; |
| } |
| |
| static int kfd_mmap(struct file *filp, struct vm_area_struct *vma) |
| { |
| struct kfd_process *process; |
| struct kfd_dev *dev = NULL; |
| unsigned long vm_pgoff; |
| unsigned int gpu_id; |
| |
| process = kfd_get_process(current); |
| if (IS_ERR(process)) |
| return PTR_ERR(process); |
| |
| vm_pgoff = vma->vm_pgoff; |
| vma->vm_pgoff = KFD_MMAP_OFFSET_VALUE_GET(vm_pgoff); |
| gpu_id = KFD_MMAP_GPU_ID_GET(vm_pgoff); |
| if (gpu_id) |
| dev = kfd_device_by_id(gpu_id); |
| |
| switch (vm_pgoff & KFD_MMAP_TYPE_MASK) { |
| case KFD_MMAP_TYPE_DOORBELL: |
| if (!dev) |
| return -ENODEV; |
| return kfd_doorbell_mmap(dev, process, vma); |
| |
| case KFD_MMAP_TYPE_EVENTS: |
| return kfd_event_mmap(process, vma); |
| |
| case KFD_MMAP_TYPE_RESERVED_MEM: |
| if (!dev) |
| return -ENODEV; |
| return kfd_reserved_mem_mmap(dev, process, vma); |
| } |
| |
| return -EFAULT; |
| } |