merge biscotti-4.18 into machina-4.18
This integrates support for virtio-wayland and virtio-magma from the
biscotti-4.18 branch. It also merges biscotti_defconfig into
machina_defconfig, deleting the former.
Test: ran guest_integration_tests
Change-Id: I19b9df0e5766526f93dc0b89be6cc8fdb025be9e
diff --git a/arch/x86/configs/machina_defconfig b/arch/x86/configs/machina_defconfig
index a2aed73..53d424c 100644
--- a/arch/x86/configs/machina_defconfig
+++ b/arch/x86/configs/machina_defconfig
@@ -1,34 +1,421 @@
+CONFIG_DEFAULT_HOSTNAME="localhost"
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_USELIB is not set
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=18
CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_BLK_CGROUP=y
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_RELAY=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_UID16 is not set
+# CONFIG_PCSPKR_PLATFORM is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_ARCH_MMAP_RND_BITS=31
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_SMP=y
CONFIG_X86_X2APIC=y
CONFIG_HYPERVISOR_GUEST=y
CONFIG_PARAVIRT=y
CONFIG_KEXEC=y
+CONFIG_GART_IOMMU=y
+CONFIG_SCHED_SMT=y
+CONFIG_PREEMPT=y
+CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
+# CONFIG_X86_MCE is not set
+# CONFIG_MICROCODE is not set
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
+CONFIG_ZSMALLOC=y
+CONFIG_X86_CHECK_BIOS_CORRUPTION=y
+# CONFIG_MTRR_SANITIZER is not set
+CONFIG_HZ_1000=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_DEBUG=y
+CONFIG_PM_ADVANCED_DEBUG=y
+CONFIG_PM_TRACE_RTC=y
+# CONFIG_ACPI_AC is not set
+# CONFIG_ACPI_BATTERY is not set
+# CONFIG_ACPI_FAN is not set
+# CONFIG_ACPI_THERMAL is not set
+# CONFIG_X86_PM_TIMER is not set
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_INTEL_IDLE=y
+# CONFIG_PCI_MMCONFIG is not set
+CONFIG_PCIEPORTBUS=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCI_MSI=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_BINFMT_MISC=y
+CONFIG_IA32_EMULATION=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET6_XFRM_MODE_BEET is not set
+# CONFIG_IPV6_SIT is not set
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_ZONES=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_SNMP=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_SIP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NF_CT_NETLINK_TIMEOUT=y
+CONFIG_NF_CT_NETLINK_HELPER=y
+CONFIG_NETFILTER_NETLINK_GLUE_CT=y
+CONFIG_NF_TABLES=y
+CONFIG_NF_TABLES_INET=y
+CONFIG_NF_TABLES_NETDEV=y
+CONFIG_NFT_CT=y
+CONFIG_NFT_COUNTER=y
+CONFIG_NFT_LOG=y
+CONFIG_NFT_LIMIT=y
+CONFIG_NFT_MASQ=y
+CONFIG_NFT_REDIR=y
+CONFIG_NFT_NAT=y
+CONFIG_NFT_QUEUE=y
+CONFIG_NFT_REJECT=y
+CONFIG_NFT_COMPAT=y
+CONFIG_NFT_HASH=y
+CONFIG_NETFILTER_XT_SET=y
+CONFIG_NETFILTER_XT_TARGET_AUDIT=y
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_DSCP=y
+CONFIG_NETFILTER_XT_TARGET_HMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=y
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_CGROUP=y
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_CPU=y
+CONFIG_NETFILTER_XT_MATCH_DCCP=y
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_L2TP=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_NFACCT=y
+CONFIG_NETFILTER_XT_MATCH_OSF=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_RATEEST=y
+CONFIG_NETFILTER_XT_MATCH_REALM=y
+CONFIG_NETFILTER_XT_MATCH_RECENT=y
+CONFIG_NETFILTER_XT_MATCH_SCTP=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_SET=y
+CONFIG_IP_SET_BITMAP_IP=y
+CONFIG_IP_SET_BITMAP_IPMAC=y
+CONFIG_IP_SET_BITMAP_PORT=y
+CONFIG_IP_SET_HASH_IP=y
+CONFIG_IP_SET_HASH_IPMARK=y
+CONFIG_IP_SET_HASH_IPPORT=y
+CONFIG_IP_SET_HASH_IPPORTIP=y
+CONFIG_IP_SET_HASH_IPPORTNET=y
+CONFIG_IP_SET_HASH_MAC=y
+CONFIG_IP_SET_HASH_NETPORTNET=y
+CONFIG_IP_SET_HASH_NET=y
+CONFIG_IP_SET_HASH_NETNET=y
+CONFIG_IP_SET_HASH_NETPORT=y
+CONFIG_IP_SET_HASH_NETIFACE=y
+CONFIG_IP_SET_LIST_SET=y
+CONFIG_IP_VS=y
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_DEBUG=y
+CONFIG_IP_VS_NFCT=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NFT_CHAIN_ROUTE_IPV4=y
+CONFIG_NFT_DUP_IPV4=y
+CONFIG_NF_TABLES_ARP=y
+CONFIG_NF_LOG_ARP=y
+CONFIG_NFT_CHAIN_NAT_IPV4=y
+CONFIG_NFT_MASQ_IPV4=y
+CONFIG_NFT_REDIR_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_SYNPROXY=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_TARGET_CLUSTERIP=y
+CONFIG_IP_NF_TARGET_ECN=y
+CONFIG_IP_NF_TARGET_TTL=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_NFT_CHAIN_ROUTE_IPV6=y
+CONFIG_NFT_CHAIN_NAT_IPV6=y
+CONFIG_NFT_MASQ_IPV6=y
+CONFIG_NFT_REDIR_IPV6=y
+CONFIG_NFT_DUP_IPV6=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_EUI64=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_TARGET_HL=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_TARGET_SYNPROXY=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_IP6_NF_SECURITY=y
+CONFIG_IP6_NF_NAT=y
+CONFIG_IP6_NF_TARGET_MASQUERADE=y
+CONFIG_IP6_NF_TARGET_NPT=y
+CONFIG_NF_TABLES_BRIDGE=y
+CONFIG_NFT_BRIDGE_REJECT=y
+CONFIG_NF_LOG_BRIDGE=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_T_NAT=y
+CONFIG_BRIDGE_EBT_VLAN=y
+CONFIG_BRIDGE=y
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_VLAN_8021Q=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=y
CONFIG_VSOCKETS=y
CONFIG_VIRTIO_VSOCKETS=y
+# CONFIG_WIRELESS is not set
+CONFIG_RFKILL=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DEBUG_DEVRES=y
+CONFIG_CONNECTOR=y
+CONFIG_OF=y
+CONFIG_BLK_DEV_LOOP=y
CONFIG_VIRTIO_BLK=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SPI_ATTRS=y
+CONFIG_SCSI_VIRTIO=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_THIN_PROVISIONING=y
CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=y
+CONFIG_MACVTAP=y
+CONFIG_IPVLAN=y
+CONFIG_IPVTAP=y
+CONFIG_VXLAN=y
+CONFIG_TUN=y
+CONFIG_VETH=y
CONFIG_VIRTIO_NET=y
+# CONFIG_ETHERNET is not set
+CONFIG_PHYLIB=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO_I8042 is not set
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=1
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+# CONFIG_SERIAL_8250_MID is not set
CONFIG_VIRTIO_CONSOLE=y
+# CONFIG_HW_RANDOM_AMD is not set
+# CONFIG_HW_RANDOM_VIA is not set
CONFIG_HW_RANDOM_VIRTIO=y
-# CONFIG_PTP_1588_CLOCK is not set
+CONFIG_NVRAM=y
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL_OF is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_CORE=y
+# CONFIG_VGA_ARB is not set
CONFIG_DRM=y
CONFIG_DRM_VIRTIO_GPU=y
+# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_HCTOSYS is not set
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_INPUT=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+CONFIG_VIRTIO_WL=y
+CONFIG_VIRTIO_MAGMA=y
+CONFIG_STAGING=y
+CONFIG_POWERCAP=y
+CONFIG_INTEL_RAPL=y
+CONFIG_DAX=y
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_BTRFS_FS=y
+CONFIG_BTRFS_FS_POSIX_ACL=y
+# CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=y
+CONFIG_FSCACHE=y
+CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_FILE_DIRECT=y
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_NFS_FS=y
+# CONFIG_NFS_V2 is not set
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+CONFIG_9P_FS=y
+CONFIG_9P_FS_POSIX_ACL=y
+CONFIG_9P_FS_SECURITY=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_UTF8=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_HARDLOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
+CONFIG_SCHEDSTATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_STACKTRACE=y
+CONFIG_DEBUG_CREDENTIALS=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_FTRACE is not set
+CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
+CONFIG_EARLY_PRINTK_DBGP=y
+CONFIG_IO_DELAY_0XED=y
+CONFIG_DEBUG_BOOT_PARAMS=y
+CONFIG_OPTIMIZE_INLINING=y
+CONFIG_PUNIT_ATOM_DEBUG=y
+CONFIG_ENCRYPTED_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_PATH=y
+CONFIG_LSM_MMAP_MIN_ADDR=32768
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_YAMA=y
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_ECHAINIV is not set
+CONFIG_CRYPTO_LRW=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_AES_NI_INTEL=y
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_DES=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC_ITU_T=y
+CONFIG_XZ_DEC=y
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 3589764..e71de1f 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -83,4 +83,20 @@
If unsure, say 'N'.
+config VIRTIO_WL
+ bool "Virtio Wayland driver"
+ depends on VIRTIO
+ ---help---
+ This driver supports proxying of a wayland socket from host to guest.
+
+ If unsure, say 'N'.
+
+config VIRTIO_MAGMA
+ bool "Virtio Magma driver"
+ depends on VIRTIO
+ ---help---
+ This driver supports proxying of a magma device from host to guest.
+
+ If unsure, say 'N'.
+
endif # VIRTIO_MENU
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index 3a2b5c5..a0f07695 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -6,3 +6,5 @@
virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o
obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o
+obj-$(CONFIG_VIRTIO_WL) += virtio_wl.o
+obj-$(CONFIG_VIRTIO_MAGMA) += virtio_magma.o
diff --git a/drivers/virtio/virtio_magma.c b/drivers/virtio/virtio_magma.c
new file mode 100644
index 0000000..cd6da57
--- /dev/null
+++ b/drivers/virtio/virtio_magma.c
@@ -0,0 +1,1699 @@
+// Copyright 2019 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <linux/anon_inodes.h>
+#include <linux/cdev.h>
+#include <linux/compat.h>
+#include <linux/hashtable.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/virtio.h>
+#include <linux/virtio_magma.h>
+#include <linux/vmalloc.h>
+
+#define VQ_DESCRIPTOR_SIZE PAGE_SIZE
+#define CONNECTIONS_HASHTABLE_BITS 4
+#define CONNECTION_OBJECTS_HASHTABLE_BITS 12
+#define NOTIFICATION_MAX_BYTES 65536
+#define COMMAND_OK(command, request_ptr, response_ptr) \
+ ((command)->request_size >= sizeof(*(request_ptr)) && \
+ (command)->response_size >= sizeof(*(response_ptr)))
+#define WAYLAND_DEVICE_PATH "/dev/wl0"
+#define MESSAGE_CACHE_OBJECT_SIZE 64
+
+struct virtmagma_info {
+ dev_t dev_num;
+ struct device *dev;
+ struct class *class;
+ struct cdev cdev;
+
+ struct mutex vq_out_lock;
+ struct virtqueue *vq_out;
+ struct work_struct out_vq_work;
+ wait_queue_head_t out_waitq;
+
+ struct mutex instances_lock;
+ struct idr instances;
+};
+
+enum virtmagma_connection_object_type {
+ MAGMA_BUFFER,
+ MAGMA_SEMAPHORE,
+ MAGMA_CONTEXT
+};
+
+static const char *virtmagma_connection_object_type_string(
+ enum virtmagma_connection_object_type type)
+{
+ switch (type) {
+ case MAGMA_BUFFER:
+ return "MAGMA_BUFFER";
+ case MAGMA_SEMAPHORE:
+ return "MAGMA_SEMAPHORE";
+ case MAGMA_CONTEXT:
+ return "MAGMA_CONTEXT";
+ default:
+ return "[UNKNOWN]";
+ }
+}
+
+struct virtmagma_buffer {
+ size_t size_requested;
+ size_t size_allocated;
+ bool is_command_buffer;
+};
+
+struct virtmagma_semaphore {
+ uint8_t dummy;
+};
+
+struct virtmagma_context {
+ uint8_t dummy;
+};
+
+struct virtmagma_connection;
+struct virtmagma_connection_object {
+ struct virtmagma_connection *parent_connection;
+ enum virtmagma_connection_object_type type;
+ uint64_t host_value;
+ union {
+ struct virtmagma_buffer buffer;
+ struct virtmagma_semaphore semaphore;
+ struct virtmagma_context context;
+ };
+ struct hlist_node node;
+};
+
+struct virtmagma_connection {
+ struct virtmagma_instance *parent_instance;
+ uint64_t host_value;
+ DECLARE_HASHTABLE(objects, CONNECTION_OBJECTS_HASHTABLE_BITS);
+ struct hlist_node node;
+};
+
+struct virtmagma_instance {
+ struct virtmagma_info *vi;
+ int id;
+ DECLARE_HASHTABLE(connections, CONNECTIONS_HASHTABLE_BITS);
+ int mmfd;
+ struct {
+ struct virtmagma_buffer *buffer;
+ u64 phys_addr;
+ bool pending;
+ } mmap_params;
+ struct {
+ pid_t pid;
+ pid_t tgid;
+ char comm[TASK_COMM_LEN];
+ } creator;
+ struct kmem_cache *msg_cache;
+ void *wayland_device_private_data;
+};
+
+struct virtmagma_virtio_command {
+ void *request_ptr;
+ size_t request_size;
+ void *response_ptr;
+ size_t response_size;
+};
+
+static void virtmagma_cache_ctor(void *p)
+{
+ memset(p, 0, MESSAGE_CACHE_OBJECT_SIZE);
+}
+
+static int vq_out_send_sync(struct virtmagma_info *vi,
+ struct virtmagma_virtio_command *command)
+{
+ int ret;
+ DECLARE_COMPLETION_ONSTACK(finish_completion);
+ struct scatterlist sg_out;
+ struct scatterlist sg_in;
+ struct scatterlist *sgs[] = { &sg_out, &sg_in };
+ init_completion(&finish_completion);
+ sg_init_one(&sg_out, command->request_ptr, command->request_size);
+ sg_init_one(&sg_in, command->response_ptr, command->response_size);
+
+ mutex_lock(&vi->vq_out_lock);
+ while ((ret = virtqueue_add_sgs(vi->vq_out, sgs, 1, 1,
+ &finish_completion, GFP_KERNEL)) ==
+ -ENOSPC) {
+ mutex_unlock(&vi->vq_out_lock);
+ if (!wait_event_timeout(vi->out_waitq, vi->vq_out->num_free > 0,
+ HZ))
+ return -EBUSY;
+ mutex_lock(&vi->vq_out_lock);
+ }
+ if (!ret)
+ virtqueue_kick(vi->vq_out);
+ mutex_unlock(&vi->vq_out_lock);
+
+ wait_for_completion(&finish_completion);
+
+ return ret;
+}
+
+/* Verify that a virtio command's response matches its expected response.
+ Note that a match indicates only that the proxying of the magma command
+ has succeeded, not necessarily that the magma command itself did. */
+static int virtmagma_check_expected_response_type(void *request, void *response)
+{
+ struct virtio_magma_ctrl_hdr *request_hdr = request;
+ struct virtio_magma_ctrl_hdr *response_hdr = response;
+ if (virtio_magma_expected_response_type(request_hdr->type) !=
+ response_hdr->type) {
+ pr_warn("virtmagma: unexpected virtio response %s (%d) to request %s (%d)",
+ virtio_magma_ctrl_type_string(response_hdr->type),
+ response_hdr->type,
+ virtio_magma_ctrl_type_string(request_hdr->type),
+ request_hdr->type);
+ return -EIO;
+ }
+ return 0;
+}
+
+static struct virtmagma_connection *
+ get_connection(struct virtmagma_instance *instance, uint64_t id)
+{
+ struct virtmagma_connection *connection = NULL;
+ hash_for_each_possible (instance->connections, connection, node, id) {
+ if (connection->host_value == id)
+ break;
+ }
+ if (!connection) {
+ pr_warn("virtmagma: invalid connection id %lld", id);
+ }
+ return connection;
+}
+
+static struct virtmagma_connection_object *
+ get_connection_object(struct virtmagma_connection *connection,
+ uint64_t id,
+ enum virtmagma_connection_object_type type)
+{
+ struct virtmagma_connection_object *object = NULL;
+ hash_for_each_possible (connection->objects, object, node, id) {
+ if (object->type == type && object->host_value == id)
+ break;
+ }
+ if (!object) {
+ pr_warn("virtmagma: invalid %s object id %lld",
+ virtmagma_connection_object_type_string(type), id);
+ }
+ return object;
+}
+
+static int control_type(void *p)
+{
+ return ((struct virtio_magma_ctrl_hdr *)p)->type;
+}
+
+static int release_buffer(struct virtmagma_buffer *buffer)
+{
+ int ret;
+ struct virtio_magma_release_buffer_ctrl *request;
+ struct virtio_magma_release_buffer_resp *response;
+ struct virtmagma_virtio_command command;
+ struct virtmagma_connection_object *object = container_of(
+ buffer, struct virtmagma_connection_object, buffer);
+ BUILD_BUG_ON(sizeof(*request) > MESSAGE_CACHE_OBJECT_SIZE);
+ BUILD_BUG_ON(sizeof(*response) > MESSAGE_CACHE_OBJECT_SIZE);
+
+ request = kmem_cache_alloc(
+ object->parent_connection->parent_instance->msg_cache,
+ GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ response = kmem_cache_alloc(
+ object->parent_connection->parent_instance->msg_cache,
+ GFP_KERNEL);
+ if (!response) {
+ ret = -ENOMEM;
+ goto free_request;
+ }
+
+ request->hdr.type = VIRTIO_MAGMA_CMD_RELEASE_BUFFER;
+ request->connection = object->parent_connection->host_value;
+ request->buffer = object->host_value;
+
+ command.request_ptr = request;
+ command.request_size = sizeof(*request);
+ command.response_ptr = response;
+ command.response_size = sizeof(*response);
+
+ ret = vq_out_send_sync(object->parent_connection->parent_instance->vi,
+ &command);
+ if (ret)
+ goto free_response;
+
+ if (response->hdr.type != VIRTIO_MAGMA_RESP_RELEASE_BUFFER)
+ ret = -EIO;
+
+free_response:
+ kmem_cache_free(object->parent_connection->parent_instance->msg_cache,
+ response);
+
+free_request:
+ kmem_cache_free(object->parent_connection->parent_instance->msg_cache,
+ request);
+
+ return ret;
+}
+
+static int release_command_buffer(struct virtmagma_buffer *buffer)
+{
+ int ret;
+ struct virtio_magma_release_command_buffer_ctrl *request;
+ struct virtio_magma_release_command_buffer_resp *response;
+ struct virtmagma_virtio_command command;
+ struct virtmagma_connection_object *object = container_of(
+ buffer, struct virtmagma_connection_object, buffer);
+ BUILD_BUG_ON(sizeof(*request) > MESSAGE_CACHE_OBJECT_SIZE);
+ BUILD_BUG_ON(sizeof(*response) > MESSAGE_CACHE_OBJECT_SIZE);
+
+ request = kmem_cache_alloc(
+ object->parent_connection->parent_instance->msg_cache,
+ GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ response = kmem_cache_alloc(
+ object->parent_connection->parent_instance->msg_cache,
+ GFP_KERNEL);
+ if (!response) {
+ ret = -ENOMEM;
+ goto free_request;
+ }
+
+ request->hdr.type = VIRTIO_MAGMA_CMD_RELEASE_COMMAND_BUFFER;
+ request->connection = object->parent_connection->host_value;
+ request->command_buffer = object->host_value;
+
+ command.request_ptr = request;
+ command.request_size = sizeof(*request);
+ command.response_ptr = response;
+ command.response_size = sizeof(*response);
+
+ ret = vq_out_send_sync(object->parent_connection->parent_instance->vi,
+ &command);
+ if (ret)
+ goto free_response;
+
+ if (response->hdr.type != VIRTIO_MAGMA_RESP_RELEASE_COMMAND_BUFFER)
+ ret = -EIO;
+
+free_response:
+ kmem_cache_free(object->parent_connection->parent_instance->msg_cache,
+ response);
+
+free_request:
+ kmem_cache_free(object->parent_connection->parent_instance->msg_cache,
+ request);
+
+ return ret;
+}
+
+static int release_semaphore(struct virtmagma_semaphore *semaphore)
+{
+ int ret;
+ struct virtio_magma_release_semaphore_ctrl *request;
+ struct virtio_magma_release_semaphore_resp *response;
+ struct virtmagma_virtio_command command;
+ struct virtmagma_connection_object *object = container_of(
+ semaphore, struct virtmagma_connection_object, semaphore);
+ BUILD_BUG_ON(sizeof(*request) > MESSAGE_CACHE_OBJECT_SIZE);
+ BUILD_BUG_ON(sizeof(*response) > MESSAGE_CACHE_OBJECT_SIZE);
+
+ request = kmem_cache_alloc(
+ object->parent_connection->parent_instance->msg_cache,
+ GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ response = kmem_cache_alloc(
+ object->parent_connection->parent_instance->msg_cache,
+ GFP_KERNEL);
+ if (!response) {
+ ret = -ENOMEM;
+ goto free_request;
+ }
+
+ request->hdr.type = VIRTIO_MAGMA_CMD_RELEASE_SEMAPHORE;
+ request->connection = object->parent_connection->host_value;
+ request->semaphore = object->host_value;
+
+ command.request_ptr = request;
+ command.request_size = sizeof(*request);
+ command.response_ptr = response;
+ command.response_size = sizeof(*response);
+
+ ret = vq_out_send_sync(object->parent_connection->parent_instance->vi,
+ &command);
+ if (ret)
+ goto free_response;
+
+ if (response->hdr.type != VIRTIO_MAGMA_RESP_RELEASE_SEMAPHORE)
+ ret = -EIO;
+
+free_response:
+ kmem_cache_free(object->parent_connection->parent_instance->msg_cache,
+ response);
+
+free_request:
+ kmem_cache_free(object->parent_connection->parent_instance->msg_cache,
+ request);
+
+ return ret;
+}
+
+static int release_context(struct virtmagma_context *context)
+{
+ int ret;
+ struct virtio_magma_release_context_ctrl *request;
+ struct virtio_magma_release_context_resp *response;
+ struct virtmagma_virtio_command command;
+ struct virtmagma_connection_object *object = container_of(
+ context, struct virtmagma_connection_object, context);
+ BUILD_BUG_ON(sizeof(*request) > MESSAGE_CACHE_OBJECT_SIZE);
+ BUILD_BUG_ON(sizeof(*response) > MESSAGE_CACHE_OBJECT_SIZE);
+
+ request = kmem_cache_alloc(
+ object->parent_connection->parent_instance->msg_cache,
+ GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ response = kmem_cache_alloc(
+ object->parent_connection->parent_instance->msg_cache,
+ GFP_KERNEL);
+ if (!response) {
+ ret = -ENOMEM;
+ goto free_request;
+ }
+
+ request->hdr.type = VIRTIO_MAGMA_CMD_RELEASE_CONTEXT;
+ request->connection = object->parent_connection->host_value;
+ request->context_id = object->host_value;
+
+ command.request_ptr = request;
+ command.request_size = sizeof(*request);
+ command.response_ptr = response;
+ command.response_size = sizeof(*response);
+
+ ret = vq_out_send_sync(object->parent_connection->parent_instance->vi,
+ &command);
+ if (ret)
+ goto free_response;
+
+ if (response->hdr.type != VIRTIO_MAGMA_RESP_RELEASE_CONTEXT)
+ ret = -EIO;
+
+free_response:
+ kmem_cache_free(object->parent_connection->parent_instance->msg_cache,
+ response);
+
+free_request:
+ kmem_cache_free(object->parent_connection->parent_instance->msg_cache,
+ request);
+
+ return ret;
+}
+
+static int release_connection(struct virtmagma_connection *connection)
+{
+ int ret;
+ int bkt;
+ struct virtmagma_connection_object *object;
+ struct virtio_magma_release_connection_ctrl *request;
+ struct virtio_magma_release_connection_resp *response;
+ struct virtmagma_virtio_command command;
+ uint64_t leaked_buffers = 0;
+ uint64_t leaked_command_buffers = 0;
+ uint64_t leaked_semaphores = 0;
+ uint64_t leaked_contexts = 0;
+
+ /* first, release any child objects */
+
+ hash_for_each (connection->objects, bkt, object, node) {
+ switch (object->type) {
+ case MAGMA_BUFFER:
+ if (object->buffer.is_command_buffer) {
+ release_command_buffer(&object->buffer);
+ ++leaked_command_buffers;
+ } else {
+ release_buffer(&object->buffer);
+ ++leaked_buffers;
+ }
+ break;
+ case MAGMA_CONTEXT:
+ release_context(&object->context);
+ ++leaked_contexts;
+ break;
+ case MAGMA_SEMAPHORE:
+ release_semaphore(&object->semaphore);
+ ++leaked_semaphores;
+ break;
+ default:
+ pr_err("virtmagma: unknown connection object (%d)",
+ object->type);
+ break;
+ }
+ }
+ if (leaked_buffers || leaked_command_buffers || leaked_semaphores ||
+ leaked_contexts) {
+ pr_info("virtmagma: connection %lld from command %s closed with leaked objects:\n",
+ connection->host_value,
+ connection->parent_instance->creator.comm);
+ pr_cont("virtmagma: buffers: %lld\n", leaked_buffers);
+ pr_cont("virtmagma: command buffers: %lld\n",
+ leaked_command_buffers);
+ pr_cont("virtmagma: semaphores: %lld\n", leaked_semaphores);
+ pr_cont("virtmagma: contexts: %lld\n", leaked_contexts);
+ }
+
+ /* now release the connection */
+
+ BUILD_BUG_ON(sizeof(*request) > MESSAGE_CACHE_OBJECT_SIZE);
+ BUILD_BUG_ON(sizeof(*response) > MESSAGE_CACHE_OBJECT_SIZE);
+
+ request = kmem_cache_alloc(connection->parent_instance->msg_cache,
+ GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ response = kmem_cache_alloc(connection->parent_instance->msg_cache,
+ GFP_KERNEL);
+ if (!response) {
+ ret = -ENOMEM;
+ goto free_request;
+ }
+
+ request->hdr.type = VIRTIO_MAGMA_CMD_RELEASE_CONNECTION;
+ request->connection = connection->host_value;
+
+ command.request_ptr = request;
+ command.request_size = sizeof(*request);
+ command.response_ptr = response;
+ command.response_size = sizeof(*response);
+
+ ret = vq_out_send_sync(connection->parent_instance->vi, &command);
+ if (ret)
+ goto free_response;
+
+ if (response->hdr.type != VIRTIO_MAGMA_RESP_RELEASE_CONNECTION)
+ ret = -EIO;
+
+free_response:
+ kmem_cache_free(connection->parent_instance->msg_cache, response);
+
+free_request:
+ kmem_cache_free(connection->parent_instance->msg_cache, request);
+
+ return ret;
+}
+
+static int destroy_connection(struct virtmagma_connection *connection)
+{
+ int bkt;
+ struct virtmagma_connection_object *object;
+ hash_for_each (connection->objects, bkt, object, node) {
+ hash_del(&object->node);
+ kfree(object);
+ }
+ hash_del(&connection->node);
+ kfree(connection);
+ return 0;
+}
+
+static int destroy_instance(int id, void *p, void *data)
+{
+ struct virtmagma_instance *instance = p;
+ struct virtmagma_connection *connection;
+ int bkt;
+ uint64_t leaked_connections = 0;
+
+ instance = p;
+
+ hash_for_each (instance->connections, bkt, connection, node) {
+ ++leaked_connections;
+ }
+ if (leaked_connections) {
+ pr_info("virtmagma: command %s exited with %lld leaked connections",
+ instance->creator.comm, leaked_connections);
+ }
+ hash_for_each (instance->connections, bkt, connection, node) {
+ release_connection(connection);
+ destroy_connection(connection);
+ }
+
+ kmem_cache_destroy(instance->msg_cache);
+
+ kfree(instance);
+ return 0;
+}
+
+static int virtmagma_mmfd_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct virtmagma_instance *instance = filp->private_data;
+ unsigned long vm_size = vma->vm_end - vma->vm_start;
+ size_t max_map_size;
+ int ret;
+
+ if (!instance)
+ return -ENODEV;
+
+ if (!instance->mmap_params.pending) {
+ pr_warn("virtmagma: user called mmap on the mmfd without first submitting a magma_map ioctl");
+ return -EINVAL;
+ }
+
+ instance->mmap_params.pending = false;
+
+ if (instance->mmap_params.buffer->is_command_buffer)
+ max_map_size = instance->mmap_params.buffer->size_requested;
+ else
+ max_map_size = instance->mmap_params.buffer->size_allocated;
+ max_map_size = PAGE_ALIGN(max_map_size);
+
+ if (vm_size > max_map_size) {
+ pr_warn("virtmagma: user tried to mmap with a size (%ld) larger than the buffer's size (%ld)",
+ vm_size, max_map_size);
+ return -EINVAL;
+ }
+
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ instance->mmap_params.phys_addr / PAGE_SIZE,
+ vm_size, vma->vm_page_prot);
+
+ if (ret)
+ return ret;
+
+ vma->vm_flags |= VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+
+ return ret;
+}
+
+static const struct file_operations virtmagma_mmfd_fops = {
+ .mmap = virtmagma_mmfd_mmap,
+};
+
+static int create_instance(struct virtmagma_info *vi,
+ struct virtmagma_instance **instance_out)
+{
+ int ret;
+ struct file *filp;
+ struct virtmagma_instance *instance;
+
+ *instance_out = NULL;
+
+ instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+ if (!instance)
+ return -ENOMEM;
+ instance->vi = vi;
+
+ mutex_lock(&vi->instances_lock);
+ ret = idr_alloc(&vi->instances, instance, 1, -1, GFP_KERNEL);
+ mutex_unlock(&vi->instances_lock);
+ if (ret <= 0) {
+ ret = -ENOMEM;
+ goto free_instance;
+ }
+ instance->id = ret;
+
+ ret = anon_inode_getfd("[virtmagma_mmfd]", &virtmagma_mmfd_fops,
+ instance, O_RDWR);
+ if (ret < 0) {
+ goto free_instance;
+ }
+ instance->mmfd = ret;
+
+ hash_init(instance->connections);
+ instance->creator.pid = current->pid;
+ instance->creator.tgid = current->tgid;
+ memcpy(instance->creator.comm, current->comm,
+ sizeof(instance->creator.comm));
+ instance->creator.comm[sizeof(instance->creator.comm) - 1] = 0;
+
+ filp = filp_open(WAYLAND_DEVICE_PATH, O_RDWR, 0);
+ if (filp) {
+ instance->wayland_device_private_data = filp->private_data;
+ } else {
+ pr_warn("virtmagma: failed to open wayland device at %s\n",
+ WAYLAND_DEVICE_PATH);
+ pr_cont("virtmagma: magma_export will not be available\n");
+ }
+ filp_close(filp, 0);
+
+ instance->msg_cache =
+ kmem_cache_create("virtmagma_cache", MESSAGE_CACHE_OBJECT_SIZE,
+ MESSAGE_CACHE_OBJECT_SIZE, 0,
+ virtmagma_cache_ctor);
+ if (!instance->msg_cache) {
+ pr_err("virtmagma: failed to create message cache");
+ return -ENOMEM;
+ }
+
+ *instance_out = instance;
+
+ return 0;
+
+free_instance:
+ kfree(instance);
+
+ return ret;
+}
+
+static int virtmagma_command_magma_create_connection(
+ struct virtmagma_instance *instance,
+ struct virtmagma_virtio_command *command)
+{
+ int ret;
+ struct virtmagma_connection *connection;
+ struct virtio_magma_create_connection_ctrl *request =
+ command->request_ptr;
+ struct virtio_magma_create_connection_resp *response =
+ command->response_ptr;
+
+ if (!COMMAND_OK(command, request, response))
+ return -EINVAL;
+
+ ret = vq_out_send_sync(instance->vi, command);
+ if (ret)
+ return ret;
+
+ ret = virtmagma_check_expected_response_type(request, response);
+ if (ret)
+ return ret;
+
+ /* pass on magma errors without creating a connection object */
+ if (response->result_return) {
+ pr_warn("virtmagma: magma_create_connection returned %d",
+ (int32_t)response->result_return);
+ return 0; /* the ioctl is still successful */
+ }
+
+ connection = kzalloc(sizeof(*connection), GFP_KERNEL);
+ if (!connection)
+ return -ENOMEM;
+
+ connection->parent_instance = instance;
+ connection->host_value = response->connection_out;
+ hash_init(connection->objects);
+
+ hash_add(instance->connections, &connection->node,
+ connection->host_value);
+
+ return 0;
+}
+
+static int virtmagma_command_magma_release_connection(
+ struct virtmagma_instance *instance,
+ struct virtmagma_virtio_command *command)
+{
+ int ret;
+ struct virtmagma_connection *connection = NULL;
+ struct virtio_magma_release_connection_ctrl *request =
+ command->request_ptr;
+ struct virtio_magma_release_connection_resp *response =
+ command->response_ptr;
+
+ if (!COMMAND_OK(command, request, response))
+ return -EINVAL;
+
+ connection = get_connection(instance, request->connection);
+ if (!connection)
+ return -EINVAL;
+
+ ret = release_connection(connection);
+ if (ret)
+ return ret;
+
+ return destroy_connection(connection);
+}
+
+static int virtmagma_command_magma_create_context(
+ struct virtmagma_instance *instance,
+ struct virtmagma_virtio_command *command)
+{
+ int ret;
+ struct virtmagma_connection *connection;
+ struct virtmagma_connection_object *object;
+ struct virtio_magma_create_context_ctrl *request = command->request_ptr;
+ struct virtio_magma_create_context_resp *response =
+ command->response_ptr;
+
+ if (!COMMAND_OK(command, request, response))
+ return -EINVAL;
+
+ connection = get_connection(instance, request->connection);
+ if (!connection)
+ return -EINVAL;
+
+ ret = vq_out_send_sync(instance->vi, command);
+ if (ret)
+ return ret;
+
+ ret = virtmagma_check_expected_response_type(request, response);
+ if (ret)
+ return ret;
+
+ /* magma_create_context does not return errors */
+
+ object = kzalloc(sizeof(*object), GFP_KERNEL);
+ if (!object)
+ return -ENOMEM;
+
+ object->parent_connection = connection;
+ object->host_value = response->context_id_out;
+ object->type = MAGMA_CONTEXT;
+
+ hash_add(connection->objects, &object->node, object->host_value);
+
+ return 0;
+}
+
+static int virtmagma_command_magma_release_context(
+ struct virtmagma_instance *instance,
+ struct virtmagma_virtio_command *command)
+{
+ int ret;
+ struct virtmagma_connection *connection = NULL;
+ struct virtmagma_connection_object *object = NULL;
+ struct virtio_magma_release_context_ctrl *request =
+ command->request_ptr;
+ struct virtio_magma_release_context_resp *response =
+ command->response_ptr;
+
+ if (!COMMAND_OK(command, request, response))
+ return -EINVAL;
+
+ connection = get_connection(instance, request->connection);
+ if (!connection)
+ return -EINVAL;
+
+ object = get_connection_object(connection, request->context_id,
+ MAGMA_CONTEXT);
+ if (!object)
+ return -EINVAL;
+
+ ret = vq_out_send_sync(instance->vi, command);
+ if (ret)
+ return ret;
+
+ ret = virtmagma_check_expected_response_type(request, response);
+ if (ret)
+ return ret;
+
+ hash_del(&object->node);
+ kfree(object);
+
+ return 0;
+}
+
+static int virtmagma_command_magma_create_buffer(
+ struct virtmagma_instance *instance,
+ struct virtmagma_virtio_command *command)
+{
+ int ret;
+ struct virtmagma_connection *connection;
+ struct virtmagma_connection_object *object;
+ struct virtio_magma_create_buffer_ctrl *request = command->request_ptr;
+ struct virtio_magma_create_buffer_resp *response =
+ command->response_ptr;
+
+ if (!COMMAND_OK(command, request, response))
+ return -EINVAL;
+
+ connection = get_connection(instance, request->connection);
+ if (!connection)
+ return -EINVAL;
+
+ ret = vq_out_send_sync(instance->vi, command);
+ if (ret)
+ return ret;
+
+ ret = virtmagma_check_expected_response_type(request, response);
+ if (ret)
+ return ret;
+
+ /* pass on magma errors without creating a buffer object */
+ if (response->result_return) {
+ pr_warn("virtmagma: magma_create_buffer returned %d",
+ (int32_t)response->result_return);
+ return 0; /* the ioctl is still successful */
+ }
+
+ object = kzalloc(sizeof(*object), GFP_KERNEL);
+ if (!object)
+ return -ENOMEM;
+
+ object->parent_connection = connection;
+ object->host_value = response->buffer_out;
+ object->type = MAGMA_BUFFER;
+ object->buffer.size_requested = request->size;
+ object->buffer.size_allocated = response->size_out;
+ object->buffer.is_command_buffer = false;
+
+ hash_add(connection->objects, &object->node, object->host_value);
+
+ return 0;
+}
+
+static int virtmagma_command_magma_release_buffer(
+ struct virtmagma_instance *instance,
+ struct virtmagma_virtio_command *command)
+{
+ int ret;
+ struct virtmagma_connection *connection = NULL;
+ struct virtmagma_connection_object *object = NULL;
+ struct virtio_magma_release_buffer_ctrl *request = command->request_ptr;
+ struct virtio_magma_release_buffer_resp *response =
+ command->response_ptr;
+
+ if (!COMMAND_OK(command, request, response))
+ return -EINVAL;
+
+ connection = get_connection(instance, request->connection);
+ if (!connection)
+ return -EINVAL;
+
+ object = get_connection_object(connection, request->buffer,
+ MAGMA_BUFFER);
+ if (!object)
+ return -EINVAL;
+
+ ret = vq_out_send_sync(instance->vi, command);
+ if (ret)
+ return ret;
+
+ ret = virtmagma_check_expected_response_type(request, response);
+ if (ret)
+ return ret;
+
+ hash_del(&object->node);
+ kfree(object);
+
+ return 0;
+}
+
+static int virtmagma_command_magma_create_command_buffer(
+ struct virtmagma_instance *instance,
+ struct virtmagma_virtio_command *command)
+{
+ int ret;
+ struct virtmagma_connection *connection;
+ struct virtmagma_connection_object *object;
+ struct virtio_magma_create_command_buffer_ctrl *request =
+ command->request_ptr;
+ struct virtio_magma_create_command_buffer_resp *response =
+ command->response_ptr;
+
+ if (!COMMAND_OK(command, request, response))
+ return -EINVAL;
+
+ connection = get_connection(instance, request->connection);
+ if (!connection)
+ return -EINVAL;
+
+ ret = vq_out_send_sync(instance->vi, command);
+ if (ret)
+ return ret;
+
+ ret = virtmagma_check_expected_response_type(request, response);
+ if (ret)
+ return ret;
+
+ /* pass on magma errors without creating a command_buffer object */
+ if (response->result_return) {
+ pr_warn("virtmagma: magma_create_command_buffer returned %d",
+ (int32_t)response->result_return);
+ return 0; /* the ioctl is still successful */
+ }
+
+ object = kzalloc(sizeof(*object), GFP_KERNEL);
+ if (!object)
+ return -ENOMEM;
+
+ object->parent_connection = connection;
+ object->host_value = response->buffer_out;
+ object->type = MAGMA_BUFFER;
+ object->buffer.size_requested = request->size;
+ object->buffer.is_command_buffer = true;
+
+ hash_add(connection->objects, &object->node, object->host_value);
+
+ return 0;
+}
+
+static int virtmagma_command_magma_release_command_buffer(
+ struct virtmagma_instance *instance,
+ struct virtmagma_virtio_command *command)
+{
+ int ret;
+ struct virtmagma_connection *connection = NULL;
+ struct virtmagma_connection_object *object = NULL;
+ struct virtio_magma_release_command_buffer_ctrl *request =
+ command->request_ptr;
+ struct virtio_magma_release_command_buffer_resp *response =
+ command->response_ptr;
+
+ if (!COMMAND_OK(command, request, response))
+ return -EINVAL;
+
+ connection = get_connection(instance, request->connection);
+ if (!connection)
+ return -EINVAL;
+
+ object = get_connection_object(connection, request->command_buffer,
+ MAGMA_BUFFER);
+ if (!object)
+ return -EINVAL;
+
+ ret = vq_out_send_sync(instance->vi, command);
+ if (ret)
+ return ret;
+
+ ret = virtmagma_check_expected_response_type(request, response);
+ if (ret)
+ return ret;
+
+ hash_del(&object->node);
+ kfree(object);
+
+ return 0;
+}
+
+static int virtmagma_command_magma_submit_command_buffer(
+ struct virtmagma_instance *instance,
+ struct virtmagma_virtio_command *command)
+{
+ int ret;
+ struct virtmagma_connection *connection = NULL;
+ struct virtmagma_connection_object *object = NULL;
+ struct virtio_magma_submit_command_buffer_ctrl *request =
+ command->request_ptr;
+ struct virtio_magma_submit_command_buffer_resp *response =
+ command->response_ptr;
+
+ if (!COMMAND_OK(command, request, response))
+ return -EINVAL;
+
+ connection = get_connection(instance, request->connection);
+ if (!connection)
+ return -EINVAL;
+
+ object = get_connection_object(connection, request->command_buffer,
+ MAGMA_BUFFER);
+ if (!object)
+ return -EINVAL;
+
+ ret = vq_out_send_sync(instance->vi, command);
+ if (ret)
+ return ret;
+
+ ret = virtmagma_check_expected_response_type(request, response);
+ if (ret)
+ return ret;
+
+ /* command buffers are implicitly freed on submit */
+
+ hash_del(&object->node);
+ kfree(object);
+
+ return 0;
+}
+
+static int virtmagma_command_magma_create_semaphore(
+ struct virtmagma_instance *instance,
+ struct virtmagma_virtio_command *command)
+{
+ int ret;
+ struct virtmagma_connection *connection;
+ struct virtmagma_connection_object *object;
+ struct virtio_magma_create_semaphore_ctrl *request =
+ command->request_ptr;
+ struct virtio_magma_create_semaphore_resp *response =
+ command->response_ptr;
+
+ if (!COMMAND_OK(command, request, response))
+ return -EINVAL;
+
+ connection = get_connection(instance, request->connection);
+ if (!connection)
+ return -EINVAL;
+
+ ret = vq_out_send_sync(instance->vi, command);
+ if (ret)
+ return ret;
+
+ ret = virtmagma_check_expected_response_type(request, response);
+ if (ret)
+ return ret;
+
+ /* pass on magma errors without creating a semaphore object */
+ if (response->result_return) {
+ pr_warn("virtmagma: magma_create_semaphore returned %d",
+ (int32_t)response->result_return);
+ return 0; /* the ioctl is still successful */
+ }
+
+ object = kzalloc(sizeof(*object), GFP_KERNEL);
+ if (!object)
+ return -ENOMEM;
+
+ object->parent_connection = connection;
+ object->host_value = response->semaphore_out;
+ object->type = MAGMA_SEMAPHORE;
+
+ hash_add(connection->objects, &object->node, object->host_value);
+
+ return 0;
+}
+
+static int virtmagma_command_magma_release_semaphore(
+ struct virtmagma_instance *instance,
+ struct virtmagma_virtio_command *command)
+{
+ int ret;
+ struct virtmagma_connection *connection = NULL;
+ struct virtmagma_connection_object *object = NULL;
+ struct virtio_magma_release_semaphore_ctrl *request =
+ command->request_ptr;
+ struct virtio_magma_release_semaphore_resp *response =
+ command->response_ptr;
+
+ if (!COMMAND_OK(command, request, response))
+ return -EINVAL;
+
+ connection = get_connection(instance, request->connection);
+ if (!connection)
+ return -EINVAL;
+
+ object = get_connection_object(connection, request->semaphore,
+ MAGMA_SEMAPHORE);
+ if (!object)
+ return -EINVAL;
+
+ ret = vq_out_send_sync(instance->vi, command);
+ if (ret)
+ return ret;
+
+ ret = virtmagma_check_expected_response_type(request, response);
+ if (ret)
+ return ret;
+
+ hash_del(&object->node);
+ kfree(object);
+
+ return 0;
+}
+
+static int virtmagma_command_magma_map(struct virtmagma_instance *instance,
+ struct virtmagma_virtio_command *command)
+{
+ int ret;
+ struct virtmagma_connection *connection;
+ struct virtmagma_connection_object *object;
+ struct virtio_magma_map_ctrl *request = command->request_ptr;
+ /* this ioctl has a size_t output parameter appended */
+ struct {
+ struct virtio_magma_map_resp virtio_response;
+ size_t size_to_mmap_out;
+ } *response = command->response_ptr;
+
+ if (!COMMAND_OK(command, request, response))
+ return -EINVAL;
+
+ connection = get_connection(instance, request->connection);
+ if (!connection)
+ return -EINVAL;
+
+ object = get_connection_object(connection, request->buffer,
+ MAGMA_BUFFER);
+ if (!object)
+ return -EINVAL;
+
+ ret = vq_out_send_sync(instance->vi, command);
+ if (ret)
+ return ret;
+
+ ret = virtmagma_check_expected_response_type(request, response);
+ if (ret)
+ return ret;
+
+ /* store parameters for subsequent mmap call */
+
+ instance->mmap_params.buffer = &object->buffer;
+ instance->mmap_params.phys_addr = response->virtio_response.addr_out;
+ instance->mmap_params.pending = true;
+
+ /* user must use the returned size in its subsequent mmap call */
+
+ response->size_to_mmap_out = object->buffer.size_requested;
+
+ return 0;
+}
+
+static int virtmagma_command_magma_wait_semaphores(
+ struct virtmagma_instance *instance,
+ struct virtmagma_virtio_command *command)
+{
+ int ret;
+ size_t semaphores_size;
+ struct virtio_magma_wait_semaphores_ctrl *request =
+ command->request_ptr;
+ struct virtio_magma_wait_semaphores_resp *response =
+ command->response_ptr;
+
+ if (!COMMAND_OK(command, request, response))
+ return -EINVAL;
+
+ /* reallocate request buffer with enough space for the semaphores */
+ semaphores_size = request->count * sizeof(uint64_t);
+ command->request_size = sizeof(*request) + semaphores_size;
+ command->request_ptr = kzalloc(command->request_size, GFP_KERNEL);
+ if (!command->request_ptr)
+ return -ENOMEM;
+
+ memcpy(command->request_ptr, request, sizeof(*request));
+ ret = copy_from_user((char *)command->request_ptr + sizeof(*request),
+ (void *)request->semaphores, semaphores_size);
+ if (ret)
+ return ret;
+
+ ret = vq_out_send_sync(instance->vi, command);
+ if (ret)
+ return ret;
+
+ return virtmagma_check_expected_response_type(request, response);
+}
+
+static int virtmagma_command_magma_read_notification_channel(
+ struct virtmagma_instance *instance,
+ struct virtmagma_virtio_command *command)
+{
+ int ret;
+ struct virtio_magma_read_notification_channel_ctrl *request =
+ command->request_ptr;
+ struct virtio_magma_read_notification_channel_resp *response =
+ command->response_ptr;
+
+ if (!COMMAND_OK(command, request, response))
+ return -EINVAL;
+
+ /* reallocate response buffer with additional space for notification data.
+ note that the size is not modified, as we only want the response struct
+ itself to be copied back to the user by our caller */
+
+ command->response_ptr = response =
+ kzalloc(sizeof(*response) + NOTIFICATION_MAX_BYTES, GFP_KERNEL);
+ if (!command->response_ptr)
+ return -ENOMEM;
+
+ ret = vq_out_send_sync(instance->vi, command);
+ if (ret)
+ return ret;
+
+ ret = virtmagma_check_expected_response_type(request, response);
+ if (ret)
+ return ret;
+
+ /* pass on magma errors without writing to the buffer */
+ if (response->result_return) {
+ pr_warn("virtmagma: magma_read_notification_channel returned %d",
+ (int32_t)response->result_return);
+ return 0; /* the ioctl is still successful */
+ }
+
+ if (response->buffer_size_out > request->buffer_size) {
+ pr_err("virtmagma: magma_read_notification_channel returned buffer_size_out (%lld) larger than buffer_size (%lld)",
+ response->buffer_size_out, request->buffer_size);
+ return -EIO;
+ }
+
+ return copy_to_user((void *)request->buffer,
+ (char *)command->response_ptr + sizeof(*response),
+ response->buffer_size_out);
+}
+
+#if IS_ENABLED(CONFIG_VIRTIO_WL)
+/* use the implementation in the virtio_wl module */
+extern int virtwl_create_fd_for_vfd(void *filp_private_data, uint32_t vfd_id);
+#else
+#define virtwl_create_fd_for_vfd(a, b) (-ENODEV)
+#endif
+
+static int
+ virtmagma_command_magma_export(struct virtmagma_instance *instance,
+ struct virtmagma_virtio_command *command)
+{
+ int ret;
+ struct virtio_magma_export_ctrl *request = command->request_ptr;
+ struct virtio_magma_export_resp *response = command->response_ptr;
+
+ if (!instance->wayland_device_private_data)
+ return -ENODEV;
+
+ if (!COMMAND_OK(command, request, response))
+ return -EINVAL;
+
+ ret = vq_out_send_sync(instance->vi, command);
+ if (ret)
+ return ret;
+
+ ret = virtmagma_check_expected_response_type(request, response);
+ if (ret)
+ return ret;
+
+ /* pass on magma errors without creating a vfd */
+ if (response->result_return) {
+ pr_warn("virtmagma: magma_export returned %d",
+ (int32_t)response->result_return);
+ return 0; /* the ioctl is still successful */
+ }
+
+ ret = virtwl_create_fd_for_vfd(instance->wayland_device_private_data,
+ response->buffer_handle_out);
+ if (ret < 0) {
+ pr_err("virtmagma: failed to get vfd creation info for vfd id %lld",
+ response->buffer_handle_out);
+ return ret;
+ }
+
+ response->buffer_handle_out = ret;
+
+ return 0;
+}
+
+static int virtmagma_ioctl_handshake(struct file *filp, void __user *ptr)
+{
+ struct virtmagma_ioctl_args_handshake ioctl_args;
+ int ret = copy_from_user(&ioctl_args, ptr, sizeof(ioctl_args));
+ if (ret)
+ return ret;
+ if (ioctl_args.handshake_inout != VIRTMAGMA_HANDSHAKE_SEND)
+ return -EINVAL;
+ ioctl_args.handshake_inout = VIRTMAGMA_HANDSHAKE_RECV;
+ ioctl_args.version_out = VIRTMAGMA_VERSION;
+ return copy_to_user(ptr, &ioctl_args, sizeof(ioctl_args));
+}
+
+static int virtmagma_ioctl_get_mmfd(struct file *filp, void __user *ptr)
+{
+ struct virtmagma_ioctl_args_get_mmfd ioctl_args;
+ struct virtmagma_instance *instance = filp->private_data;
+ if (!instance)
+ return -ENODEV;
+ ioctl_args.fd_out = instance->mmfd;
+ return copy_to_user(ptr, &ioctl_args, sizeof(ioctl_args));
+}
+
+int virtmagma_ioctl_magma_command(struct file *filp, void __user *ptr)
+{
+ struct virtmagma_ioctl_args_magma_command ioctl_args;
+ struct virtmagma_virtio_command command;
+ void *request;
+ void *response;
+ int request_type;
+ int ret;
+ struct virtmagma_instance *instance = filp->private_data;
+ command.request_ptr = NULL;
+ command.response_ptr = NULL;
+
+ if (!instance)
+ return -ENODEV;
+
+ if (instance->mmap_params.pending) {
+ pr_warn("virtmagma: user failed to mmap on the mmfd after submitting a magma_map ioctl");
+ return -EINVAL;
+ }
+
+ /* copy in command arguments */
+
+ ret = copy_from_user(&ioctl_args, ptr, sizeof(ioctl_args));
+ if (ret)
+ return ret;
+
+ /* verify userspace-provided pointers are accessible */
+
+ ret = !access_ok(VERIFY_READ, (void *)ioctl_args.request_address,
+ ioctl_args.request_size);
+ if (ret)
+ return -EFAULT;
+ ret = !access_ok(VERIFY_WRITE, (void *)ioctl_args.response_address,
+ ioctl_args.response_size);
+ if (ret)
+ return -EFAULT;
+
+ /* allocate buffers and copy in userspace data */
+
+ if (ioctl_args.request_size <= MESSAGE_CACHE_OBJECT_SIZE)
+ request = kmem_cache_alloc(instance->msg_cache, GFP_KERNEL);
+ else
+ request = kzalloc(ioctl_args.request_size, GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+ if (ioctl_args.response_size <= MESSAGE_CACHE_OBJECT_SIZE)
+ response = kmem_cache_alloc(instance->msg_cache, GFP_KERNEL);
+ else
+ response = kzalloc(ioctl_args.response_size, GFP_KERNEL);
+ if (!response) {
+ ret = -ENOMEM;
+ goto free_request;
+ }
+ command.request_ptr = request;
+ command.response_ptr = response;
+ command.request_size = ioctl_args.request_size;
+ command.response_size = ioctl_args.response_size;
+
+ ret = copy_from_user(command.request_ptr,
+ (void *)ioctl_args.request_address,
+ ioctl_args.request_size);
+ if (ret)
+ goto free_response;
+
+ request_type = control_type(command.request_ptr);
+ switch (request_type) {
+ case VIRTIO_MAGMA_CMD_CREATE_CONNECTION:
+ ret = virtmagma_command_magma_create_connection(instance,
+ &command);
+ break;
+ case VIRTIO_MAGMA_CMD_RELEASE_CONNECTION:
+ ret = virtmagma_command_magma_release_connection(instance,
+ &command);
+ break;
+ case VIRTIO_MAGMA_CMD_CREATE_CONTEXT:
+ ret = virtmagma_command_magma_create_context(instance,
+ &command);
+ break;
+ case VIRTIO_MAGMA_CMD_RELEASE_CONTEXT:
+ ret = virtmagma_command_magma_release_context(instance,
+ &command);
+ break;
+ case VIRTIO_MAGMA_CMD_CREATE_BUFFER:
+ ret = virtmagma_command_magma_create_buffer(instance, &command);
+ break;
+ case VIRTIO_MAGMA_CMD_RELEASE_BUFFER:
+ ret = virtmagma_command_magma_release_buffer(instance,
+ &command);
+ break;
+ case VIRTIO_MAGMA_CMD_CREATE_COMMAND_BUFFER:
+ ret = virtmagma_command_magma_create_command_buffer(instance,
+ &command);
+ break;
+ case VIRTIO_MAGMA_CMD_RELEASE_COMMAND_BUFFER:
+ ret = virtmagma_command_magma_release_command_buffer(instance,
+ &command);
+ break;
+ case VIRTIO_MAGMA_CMD_SUBMIT_COMMAND_BUFFER:
+ ret = virtmagma_command_magma_submit_command_buffer(instance,
+ &command);
+ break;
+ case VIRTIO_MAGMA_CMD_CREATE_SEMAPHORE:
+ ret = virtmagma_command_magma_create_semaphore(instance,
+ &command);
+ break;
+ case VIRTIO_MAGMA_CMD_RELEASE_SEMAPHORE:
+ ret = virtmagma_command_magma_release_semaphore(instance,
+ &command);
+ break;
+ case VIRTIO_MAGMA_CMD_MAP:
+ ret = virtmagma_command_magma_map(instance, &command);
+ break;
+ case VIRTIO_MAGMA_CMD_WAIT_SEMAPHORES:
+ ret = virtmagma_command_magma_wait_semaphores(instance,
+ &command);
+ break;
+ case VIRTIO_MAGMA_CMD_READ_NOTIFICATION_CHANNEL:
+ ret = virtmagma_command_magma_read_notification_channel(
+ instance, &command);
+ break;
+ case VIRTIO_MAGMA_CMD_EXPORT:
+ ret = virtmagma_command_magma_export(instance, &command);
+ break;
+ /* pass-through handlers */
+ case VIRTIO_MAGMA_CMD_QUERY:
+ case VIRTIO_MAGMA_CMD_GET_ERROR:
+ case VIRTIO_MAGMA_CMD_GET_BUFFER_ID:
+ case VIRTIO_MAGMA_CMD_GET_BUFFER_SIZE:
+ case VIRTIO_MAGMA_CMD_CLEAN_CACHE:
+ case VIRTIO_MAGMA_CMD_SET_CACHE_POLICY:
+ case VIRTIO_MAGMA_CMD_GET_BUFFER_CACHE_POLICY:
+ case VIRTIO_MAGMA_CMD_GET_BUFFER_IS_MAPPABLE:
+ case VIRTIO_MAGMA_CMD_UNMAP:
+ case VIRTIO_MAGMA_CMD_MAP_BUFFER_GPU:
+ case VIRTIO_MAGMA_CMD_UNMAP_BUFFER_GPU:
+ case VIRTIO_MAGMA_CMD_COMMIT_BUFFER:
+ case VIRTIO_MAGMA_CMD_GET_SEMAPHORE_ID:
+ case VIRTIO_MAGMA_CMD_SIGNAL_SEMAPHORE:
+ case VIRTIO_MAGMA_CMD_RESET_SEMAPHORE:
+ case VIRTIO_MAGMA_CMD_GET_NOTIFICATION_CHANNEL_HANDLE:
+ case VIRTIO_MAGMA_CMD_WAIT_NOTIFICATION_CHANNEL:
+ ret = vq_out_send_sync(instance->vi, &command);
+ if (!ret)
+ ret = virtmagma_check_expected_response_type(
+ command.request_ptr, command.response_ptr);
+ break;
+ default:
+ pr_warn("virtmagma: command %s (%d) not implemented",
+ virtio_magma_ctrl_type_string(request_type),
+ request_type);
+ ret = -EINVAL;
+ break;
+ }
+ if (ret) {
+ pr_err("virtmagma: error handling command %s (%d)",
+ virtio_magma_ctrl_type_string(request_type),
+ request_type);
+ dump_stack();
+ goto free_response;
+ }
+
+ /* copy responses back to userspace */
+
+ ret = copy_to_user((void *)ioctl_args.response_address,
+ command.response_ptr, ioctl_args.response_size);
+
+free_response:
+ if (ioctl_args.request_size <= MESSAGE_CACHE_OBJECT_SIZE)
+ kmem_cache_free(instance->msg_cache, request);
+ else
+ kfree(request);
+
+free_request:
+ if (ioctl_args.response_size <= MESSAGE_CACHE_OBJECT_SIZE)
+ kmem_cache_free(instance->msg_cache, response);
+ else
+ kfree(response);
+
+ /* Some of the handlers above may override the command members and hand
+ off allocation ownership back to us. Free them now. */
+
+ if (command.request_ptr && command.request_ptr != request)
+ kfree(command.request_ptr);
+
+ if (command.response_ptr && command.response_ptr != response)
+ kfree(command.response_ptr);
+
+ return ret;
+}
+
+static int virtmagma_open(struct inode *inodep, struct file *filp)
+{
+ int ret;
+ struct virtmagma_instance *instance;
+ struct virtmagma_info *vi =
+ container_of(inodep->i_cdev, struct virtmagma_info, cdev);
+
+ ret = create_instance(vi, &instance);
+ if (ret)
+ return ret;
+
+ filp->private_data = instance;
+
+ return 0;
+}
+
+static int virtmagma_release(struct inode *inodep, struct file *filp)
+{
+ struct virtmagma_instance *instance = filp->private_data;
+ return destroy_instance(instance->id, instance, instance->vi);
+}
+
+static void vq_out_work_handler(struct work_struct *work)
+{
+ struct virtmagma_info *vi =
+ container_of(work, struct virtmagma_info, out_vq_work);
+ unsigned int len;
+ struct completion *finish_completion;
+ bool wake_waitq = false;
+
+ mutex_lock(&vi->vq_out_lock);
+ while ((finish_completion = virtqueue_get_buf(vi->vq_out, &len)) !=
+ NULL) {
+ wake_waitq = true;
+ complete(finish_completion);
+ }
+ mutex_unlock(&vi->vq_out_lock);
+
+ if (wake_waitq)
+ wake_up_interruptible_all(&vi->out_waitq);
+}
+
+static void vq_out_cb(struct virtqueue *vq)
+{
+ struct virtmagma_info *vi = vq->vdev->priv;
+ schedule_work(&vi->out_vq_work);
+}
+
+static long virtmagma_ioctl_common(struct file *filp, unsigned int cmd,
+ void __user *ptr)
+{
+ switch (cmd) {
+ case VIRTMAGMA_IOCTL_HANDSHAKE:
+ return virtmagma_ioctl_handshake(filp, ptr);
+ case VIRTMAGMA_IOCTL_GET_MMFD:
+ return virtmagma_ioctl_get_mmfd(filp, ptr);
+ case VIRTMAGMA_IOCTL_MAGMA_COMMAND:
+ return virtmagma_ioctl_magma_command(filp, ptr);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static long virtmagma_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return virtmagma_ioctl_common(filp, cmd, (void __user *)arg);
+}
+
+#ifdef CONFIG_COMPAT
+static long virtmagma_ioctl_compat(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return virtmagma_ioctl_common(filp, cmd, compat_ptr(arg));
+}
+#else
+#define virtmagma_ioctl_compat NULL
+#endif
+
+static const struct file_operations virtmagma_fops = {
+ .open = virtmagma_open,
+ .unlocked_ioctl = virtmagma_ioctl,
+ .compat_ioctl = virtmagma_ioctl_compat,
+ .release = virtmagma_release,
+};
+
+static int virtmagma_probe(struct virtio_device *vdev)
+{
+ int ret;
+ struct virtmagma_info *vi = NULL;
+ static const char *vq_out_name = "out";
+ vq_callback_t *callback = &vq_out_cb;
+
+ vi = kzalloc(sizeof(struct virtmagma_info), GFP_KERNEL);
+ if (!vi)
+ return -ENOMEM;
+
+ vdev->priv = vi;
+
+ ret = alloc_chrdev_region(&vi->dev_num, 0, 1, "magma");
+ if (ret) {
+ ret = -ENOMEM;
+ pr_warn("virtmagma: failed to allocate wl chrdev region: %d\n",
+ ret);
+ goto free_vi;
+ }
+
+ vi->class = class_create(THIS_MODULE, "magma");
+ if (IS_ERR(vi->class)) {
+ ret = PTR_ERR(vi->class);
+ pr_warn("virtmagma: failed to create magma class: %d\n", ret);
+ goto unregister_region;
+ }
+
+ vi->dev = device_create(vi->class, NULL, vi->dev_num, vi, "magma%d", 0);
+ if (IS_ERR(vi->dev)) {
+ ret = PTR_ERR(vi->dev);
+ pr_warn("virtmagma: failed to create magma0 device: %d\n", ret);
+ goto destroy_class;
+ }
+
+ cdev_init(&vi->cdev, &virtmagma_fops);
+ ret = cdev_add(&vi->cdev, vi->dev_num, 1);
+ if (ret) {
+ pr_warn("virtmagma: failed to add virtio magma character device to system: %d\n",
+ ret);
+ goto destroy_device;
+ }
+
+ mutex_init(&vi->vq_out_lock);
+ mutex_init(&vi->instances_lock);
+ idr_init(&vi->instances);
+
+ ret = virtio_find_vqs(vdev, 1, &vi->vq_out, &callback, &vq_out_name,
+ NULL);
+ if (ret) {
+ pr_warn("virtmagma: failed to find virtio magma out queue: %d\n",
+ ret);
+ goto del_cdev;
+ }
+
+ INIT_WORK(&vi->out_vq_work, vq_out_work_handler);
+ init_waitqueue_head(&vi->out_waitq);
+
+ virtio_device_ready(vdev);
+
+ return 0;
+
+del_cdev:
+ cdev_del(&vi->cdev);
+destroy_device:
+ put_device(vi->dev);
+destroy_class:
+ class_destroy(vi->class);
+unregister_region:
+ unregister_chrdev_region(vi->dev_num, 0);
+free_vi:
+ kfree(vi);
+ return ret;
+}
+
+static void virtmagma_remove(struct virtio_device *vdev)
+{
+ struct virtmagma_info *vi = vdev->priv;
+
+ idr_for_each(&vi->instances, destroy_instance, vi);
+ mutex_destroy(&vi->instances_lock);
+ idr_destroy(&vi->instances);
+ cdev_del(&vi->cdev);
+ put_device(vi->dev);
+ class_destroy(vi->class);
+ unregister_chrdev_region(vi->dev_num, 0);
+ kfree(vi);
+}
+
+static void virtmagma_scan(struct virtio_device *vdev)
+{
+}
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_MAGMA, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct virtio_driver virtio_magma_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = virtmagma_probe,
+ .remove = virtmagma_remove,
+ .scan = virtmagma_scan,
+};
+
+module_virtio_driver(virtio_magma_driver);
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio Magma driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_wl.c b/drivers/virtio/virtio_wl.c
new file mode 100644
index 0000000..3ee6225
--- /dev/null
+++ b/drivers/virtio/virtio_wl.c
@@ -0,0 +1,1425 @@
+/*
+ * Wayland Virtio Driver
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Virtio Wayland (virtio_wl or virtwl) is a virtual device that allows a guest
+ * virtual machine to use a wayland server on the host transparently (to the
+ * host). This is done by proxying the wayland protocol socket stream verbatim
+ * between the host and guest over 2 (recv and send) virtio queues. The guest
+ * can request new wayland server connections to give each guest wayland client
+ * a different server context. Each host connection's file descriptor is exposed
+ * to the guest as a virtual file descriptor (VFD). Additionally, the guest can
+ * request shared memory file descriptors which are also exposed as VFDs. These
+ * shared memory VFDs are directly writable by the guest via device memory
+ * injected by the host. Each VFD is sendable along a connection context VFD and
+ * will appear as ancillary data to the wayland server, just like a message from
+ * an ordinary wayland client. When the wayland server sends a shared memory
+ * file descriptor to the client (such as when sending a keymap), a VFD is
+ * allocated by the device automatically and its memory is injected into as
+ * device memory.
+ *
+ * This driver is intended to be paired with the `virtwl_guest_proxy` program
+ * which is run in the guest system and acts like a wayland server. It accepts
+ * wayland client connections and converts their socket messages to ioctl
+ * messages exposed by this driver via the `/dev/wl` device file. While it would
+ * be possible to expose a unix stream socket from this driver, the user space
+ * helper is much cleaner to write.
+ */
+
+#include <linux/anon_inodes.h>
+#include <linux/cdev.h>
+#include <linux/compat.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/kfifo.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/scatterlist.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/virtio.h>
+#include <linux/virtio_wl.h>
+
+#include <uapi/linux/dma-buf.h>
+
+#define VFD_ILLEGAL_SIGN_BIT 0x80000000
+#define VFD_HOST_VFD_ID_BIT 0x40000000
+
+struct virtwl_vfd_qentry {
+ struct list_head list;
+ struct virtio_wl_ctrl_hdr *hdr;
+ unsigned int len; /* total byte length of ctrl_vfd_* + vfds + data */
+ unsigned int vfd_offset; /* int offset into vfds */
+ unsigned int data_offset; /* byte offset into data */
+};
+
+struct virtwl_vfd {
+ struct kobject kobj;
+ struct mutex lock;
+
+ struct virtwl_info *vi;
+ uint32_t id;
+ uint32_t flags;
+ uint64_t pfn;
+ uint32_t size;
+ bool hungup;
+
+ struct list_head in_queue; /* list of virtwl_vfd_qentry */
+ wait_queue_head_t in_waitq;
+};
+
+struct virtwl_info {
+ dev_t dev_num;
+ struct device *dev;
+ struct class *class;
+ struct cdev cdev;
+
+ struct mutex vq_locks[VIRTWL_QUEUE_COUNT];
+ struct virtqueue *vqs[VIRTWL_QUEUE_COUNT];
+ struct work_struct in_vq_work;
+ struct work_struct out_vq_work;
+
+ wait_queue_head_t out_waitq;
+
+ struct mutex vfds_lock;
+ struct idr vfds;
+
+ uint32_t vfd_wait_id;
+ struct completion vfd_wait_completion;
+};
+
+static struct virtwl_vfd *virtwl_vfd_alloc(struct virtwl_info *vi);
+static void virtwl_vfd_free(struct virtwl_vfd *vfd);
+
+static const struct file_operations virtwl_vfd_fops;
+
+static int virtwl_resp_err(unsigned int type)
+{
+ switch (type) {
+ case VIRTIO_WL_RESP_OK:
+ case VIRTIO_WL_RESP_VFD_NEW:
+ case VIRTIO_WL_RESP_VFD_NEW_DMABUF:
+ return 0;
+ case VIRTIO_WL_RESP_ERR:
+ return -ENODEV; /* Device is no longer reliable */
+ case VIRTIO_WL_RESP_OUT_OF_MEMORY:
+ return -ENOMEM;
+ case VIRTIO_WL_RESP_INVALID_ID:
+ return -ENOENT;
+ case VIRTIO_WL_RESP_INVALID_TYPE:
+ return -EINVAL;
+ case VIRTIO_WL_RESP_INVALID_FLAGS:
+ return -EPERM;
+ case VIRTIO_WL_RESP_INVALID_CMD:
+ return -ENOTTY;
+ default:
+ return -EPROTO;
+ }
+}
+
+static int vq_return_inbuf_locked(struct virtqueue *vq, void *buffer)
+{
+ int ret;
+ struct scatterlist sg[1];
+
+ sg_init_one(sg, buffer, PAGE_SIZE);
+
+ ret = virtqueue_add_inbuf(vq, sg, 1, buffer, GFP_KERNEL);
+ if (ret) {
+ pr_warn("virtwl: failed to give inbuf to host: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vq_queue_out(struct virtwl_info *vi, struct scatterlist *out_sg,
+ struct scatterlist *in_sg,
+ struct completion *finish_completion,
+ bool nonblock)
+{
+ struct virtqueue *vq = vi->vqs[VIRTWL_VQ_OUT];
+ struct mutex *vq_lock = &vi->vq_locks[VIRTWL_VQ_OUT];
+ struct scatterlist *sgs[] = { out_sg, in_sg };
+ int ret = 0;
+
+ mutex_lock(vq_lock);
+ while ((ret = virtqueue_add_sgs(vq, sgs, 1, 1, finish_completion,
+ GFP_KERNEL)) == -ENOSPC) {
+ mutex_unlock(vq_lock);
+ if (nonblock)
+ return -EAGAIN;
+ if (!wait_event_timeout(vi->out_waitq, vq->num_free > 0, HZ))
+ return -EBUSY;
+ mutex_lock(vq_lock);
+ }
+ if (!ret)
+ virtqueue_kick(vq);
+ mutex_unlock(vq_lock);
+
+ return ret;
+}
+
+static int vq_fill_locked(struct virtqueue *vq)
+{
+ void *buffer;
+ int ret = 0;
+
+ while (vq->num_free > 0) {
+ buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto clear_queue;
+ }
+
+ ret = vq_return_inbuf_locked(vq, buffer);
+ if (ret)
+ goto clear_queue;
+ }
+
+ return 0;
+
+clear_queue:
+ while ((buffer = virtqueue_detach_unused_buf(vq)))
+ kfree(buffer);
+ return ret;
+}
+
+static bool vq_handle_new(struct virtwl_info *vi,
+ struct virtio_wl_ctrl_vfd_new *new, unsigned int len)
+{
+ struct virtwl_vfd *vfd;
+ u32 id = new->vfd_id;
+ int ret;
+
+ if (id == 0)
+ return true; /* return the inbuf to vq */
+
+ if (!(id & VFD_HOST_VFD_ID_BIT) || (id & VFD_ILLEGAL_SIGN_BIT)) {
+ pr_warn("virtwl: received a vfd with invalid id: %u\n", id);
+ return true; /* return the inbuf to vq */
+ }
+
+ vfd = virtwl_vfd_alloc(vi);
+ if (!vfd)
+ return true; /* return the inbuf to vq */
+
+ mutex_lock(&vi->vfds_lock);
+ ret = idr_alloc(&vi->vfds, vfd, id, id + 1, GFP_KERNEL);
+ mutex_unlock(&vi->vfds_lock);
+
+ if (ret <= 0) {
+ virtwl_vfd_free(vfd);
+ pr_warn("virtwl: failed to place received vfd: %d\n", ret);
+ return true; /* return the inbuf to vq */
+ }
+
+ vfd->id = id;
+ vfd->size = new->size;
+ vfd->pfn = new->pfn;
+ vfd->flags = new->flags;
+
+ if (vfd->id == vi->vfd_wait_id) {
+ vi->vfd_wait_id = 0;
+ complete(&vi->vfd_wait_completion);
+ }
+
+ return true; /* return the inbuf to vq */
+}
+
+static bool vq_handle_recv(struct virtwl_info *vi,
+ struct virtio_wl_ctrl_vfd_recv *recv,
+ unsigned int len)
+{
+ struct virtwl_vfd *vfd;
+ struct virtwl_vfd_qentry *qentry;
+
+ mutex_lock(&vi->vfds_lock);
+ vfd = idr_find(&vi->vfds, recv->vfd_id);
+ if (vfd)
+ mutex_lock(&vfd->lock);
+ mutex_unlock(&vi->vfds_lock);
+
+ if (!vfd) {
+ pr_warn("virtwl: recv for unknown vfd_id %u\n", recv->vfd_id);
+ return true; /* return the inbuf to vq */
+ }
+
+ qentry = kzalloc(sizeof(*qentry), GFP_KERNEL);
+ if (!qentry) {
+ mutex_unlock(&vfd->lock);
+ pr_warn("virtwl: failed to allocate qentry for vfd\n");
+ return true; /* return the inbuf to vq */
+ }
+
+ qentry->hdr = &recv->hdr;
+ qentry->len = len;
+
+ list_add_tail(&qentry->list, &vfd->in_queue);
+ wake_up_interruptible_all(&vfd->in_waitq);
+ mutex_unlock(&vfd->lock);
+
+ return false; /* no return the inbuf to vq */
+}
+
+static bool vq_handle_hup(struct virtwl_info *vi,
+ struct virtio_wl_ctrl_vfd *vfd_hup,
+ unsigned int len)
+{
+ struct virtwl_vfd *vfd;
+
+ mutex_lock(&vi->vfds_lock);
+ vfd = idr_find(&vi->vfds, vfd_hup->vfd_id);
+ if (vfd)
+ mutex_lock(&vfd->lock);
+ mutex_unlock(&vi->vfds_lock);
+
+ if (!vfd) {
+ pr_warn("virtwl: hup for unknown vfd_id %u\n", vfd_hup->vfd_id);
+ return true; /* return the inbuf to vq */
+ }
+
+ if (vfd->hungup)
+ pr_warn("virtwl: hup for hungup vfd_id %u\n", vfd_hup->vfd_id);
+
+ vfd->hungup = true;
+ wake_up_interruptible_all(&vfd->in_waitq);
+ mutex_unlock(&vfd->lock);
+
+ return true;
+}
+
+static bool vq_dispatch_hdr(struct virtwl_info *vi, unsigned int len,
+ struct virtio_wl_ctrl_hdr *hdr)
+{
+ struct virtqueue *vq = vi->vqs[VIRTWL_VQ_IN];
+ struct mutex *vq_lock = &vi->vq_locks[VIRTWL_VQ_IN];
+ bool return_vq = true;
+ int ret;
+
+ switch (hdr->type) {
+ case VIRTIO_WL_CMD_VFD_NEW:
+ return_vq = vq_handle_new(vi,
+ (struct virtio_wl_ctrl_vfd_new *)hdr,
+ len);
+ break;
+ case VIRTIO_WL_CMD_VFD_RECV:
+ return_vq = vq_handle_recv(vi,
+ (struct virtio_wl_ctrl_vfd_recv *)hdr, len);
+ break;
+ case VIRTIO_WL_CMD_VFD_HUP:
+ return_vq = vq_handle_hup(vi, (struct virtio_wl_ctrl_vfd *)hdr,
+ len);
+ break;
+ default:
+ pr_warn("virtwl: unhandled ctrl command: %u\n", hdr->type);
+ break;
+ }
+
+ if (!return_vq)
+ return false; /* no kick the vq */
+
+ mutex_lock(vq_lock);
+ ret = vq_return_inbuf_locked(vq, hdr);
+ mutex_unlock(vq_lock);
+ if (ret) {
+ pr_warn("virtwl: failed to return inbuf to host: %d\n", ret);
+ kfree(hdr);
+ }
+
+ return true; /* kick the vq */
+}
+
+static void vq_in_work_handler(struct work_struct *work)
+{
+ struct virtwl_info *vi = container_of(work, struct virtwl_info,
+ in_vq_work);
+ struct virtqueue *vq = vi->vqs[VIRTWL_VQ_IN];
+ struct mutex *vq_lock = &vi->vq_locks[VIRTWL_VQ_IN];
+ void *buffer;
+ unsigned int len;
+ bool kick_vq = false;
+
+ mutex_lock(vq_lock);
+ while ((buffer = virtqueue_get_buf(vq, &len)) != NULL) {
+ struct virtio_wl_ctrl_hdr *hdr = buffer;
+
+ mutex_unlock(vq_lock);
+ kick_vq |= vq_dispatch_hdr(vi, len, hdr);
+ mutex_lock(vq_lock);
+ }
+ mutex_unlock(vq_lock);
+
+ if (kick_vq)
+ virtqueue_kick(vq);
+}
+
+static void vq_out_work_handler(struct work_struct *work)
+{
+ struct virtwl_info *vi = container_of(work, struct virtwl_info,
+ out_vq_work);
+ struct virtqueue *vq = vi->vqs[VIRTWL_VQ_OUT];
+ struct mutex *vq_lock = &vi->vq_locks[VIRTWL_VQ_OUT];
+ unsigned int len;
+ struct completion *finish_completion;
+ bool wake_waitq = false;
+
+ mutex_lock(vq_lock);
+ while ((finish_completion = virtqueue_get_buf(vq, &len)) != NULL) {
+ wake_waitq = true;
+ complete(finish_completion);
+ }
+ mutex_unlock(vq_lock);
+
+ if (wake_waitq)
+ wake_up_interruptible_all(&vi->out_waitq);
+}
+
+static void vq_in_cb(struct virtqueue *vq)
+{
+ struct virtwl_info *vi = vq->vdev->priv;
+
+ schedule_work(&vi->in_vq_work);
+}
+
+static void vq_out_cb(struct virtqueue *vq)
+{
+ struct virtwl_info *vi = vq->vdev->priv;
+
+ schedule_work(&vi->out_vq_work);
+}
+
+static struct virtwl_vfd *virtwl_vfd_alloc(struct virtwl_info *vi)
+{
+ struct virtwl_vfd *vfd = kzalloc(sizeof(struct virtwl_vfd), GFP_KERNEL);
+
+ if (!vfd)
+ return ERR_PTR(-ENOMEM);
+
+ vfd->vi = vi;
+
+ mutex_init(&vfd->lock);
+ INIT_LIST_HEAD(&vfd->in_queue);
+ init_waitqueue_head(&vfd->in_waitq);
+
+ return vfd;
+}
+
+static int virtwl_vfd_file_flags(struct virtwl_vfd *vfd)
+{
+ int flags = 0;
+ int rw_mask = VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ;
+
+ if ((vfd->flags & rw_mask) == rw_mask)
+ flags |= O_RDWR;
+ else if (vfd->flags & VIRTIO_WL_VFD_WRITE)
+ flags |= O_WRONLY;
+ else if (vfd->flags & VIRTIO_WL_VFD_READ)
+ flags |= O_RDONLY;
+ if (vfd->pfn)
+ flags |= O_RDWR;
+ return flags;
+}
+
+/* Locks the vfd and unlinks its id from vi */
+static void virtwl_vfd_lock_unlink(struct virtwl_vfd *vfd)
+{
+ struct virtwl_info *vi = vfd->vi;
+
+ /* this order is important to avoid deadlock */
+ mutex_lock(&vi->vfds_lock);
+ mutex_lock(&vfd->lock);
+ idr_remove(&vi->vfds, vfd->id);
+ mutex_unlock(&vfd->lock);
+ mutex_unlock(&vi->vfds_lock);
+}
+
+/*
+ * Only used to free a vfd that is not referenced any place else and contains
+ * no queed virtio buffers. This must not be called while vfd is included in a
+ * vi->vfd.
+ */
+static void virtwl_vfd_free(struct virtwl_vfd *vfd)
+{
+ kfree(vfd);
+}
+
+/*
+ * Thread safe and also removes vfd from vi as well as any queued virtio buffers
+ */
+static void virtwl_vfd_remove(struct virtwl_vfd *vfd)
+{
+ struct virtwl_info *vi = vfd->vi;
+ struct virtqueue *vq = vi->vqs[VIRTWL_VQ_IN];
+ struct mutex *vq_lock = &vi->vq_locks[VIRTWL_VQ_IN];
+ struct virtwl_vfd_qentry *qentry, *next;
+
+ virtwl_vfd_lock_unlink(vfd);
+
+ mutex_lock(vq_lock);
+ list_for_each_entry_safe(qentry, next, &vfd->in_queue, list) {
+ vq_return_inbuf_locked(vq, qentry->hdr);
+ list_del(&qentry->list);
+ kfree(qentry);
+ }
+ mutex_unlock(vq_lock);
+
+ virtwl_vfd_free(vfd);
+}
+
+static void vfd_qentry_free_if_empty(struct virtwl_vfd *vfd,
+ struct virtwl_vfd_qentry *qentry)
+{
+ struct virtwl_info *vi = vfd->vi;
+ struct virtqueue *vq = vi->vqs[VIRTWL_VQ_IN];
+ struct mutex *vq_lock = &vi->vq_locks[VIRTWL_VQ_IN];
+
+ if (qentry->hdr->type == VIRTIO_WL_CMD_VFD_RECV) {
+ struct virtio_wl_ctrl_vfd_recv *recv =
+ (struct virtio_wl_ctrl_vfd_recv *)qentry->hdr;
+ ssize_t data_len =
+ (ssize_t)qentry->len - (ssize_t)sizeof(*recv) -
+ (ssize_t)recv->vfd_count * (ssize_t)sizeof(__le32);
+
+ if (qentry->vfd_offset < recv->vfd_count)
+ return;
+
+ if ((s64)qentry->data_offset < data_len)
+ return;
+ }
+
+ mutex_lock(vq_lock);
+ vq_return_inbuf_locked(vq, qentry->hdr);
+ mutex_unlock(vq_lock);
+ list_del(&qentry->list);
+ kfree(qentry);
+ virtqueue_kick(vq);
+}
+
+static ssize_t vfd_out_locked(struct virtwl_vfd *vfd, char __user *buffer,
+ size_t len)
+{
+ struct virtwl_vfd_qentry *qentry, *next;
+ ssize_t read_count = 0;
+
+ list_for_each_entry_safe(qentry, next, &vfd->in_queue, list) {
+ struct virtio_wl_ctrl_vfd_recv *recv =
+ (struct virtio_wl_ctrl_vfd_recv *)qentry->hdr;
+ size_t recv_offset = sizeof(*recv) + recv->vfd_count *
+ sizeof(__le32) + qentry->data_offset;
+ u8 *buf = (u8 *)recv + recv_offset;
+ ssize_t to_read = (ssize_t)qentry->len - (ssize_t)recv_offset;
+
+ if (qentry->hdr->type != VIRTIO_WL_CMD_VFD_RECV)
+ continue;
+
+ if ((to_read + read_count) > len)
+ to_read = len - read_count;
+
+ if (copy_to_user(buffer + read_count, buf, to_read)) {
+ read_count = -EFAULT;
+ break;
+ }
+
+ read_count += to_read;
+
+ qentry->data_offset += to_read;
+ vfd_qentry_free_if_empty(vfd, qentry);
+
+ if (read_count >= len)
+ break;
+ }
+
+ return read_count;
+}
+
+/* must hold both vfd->lock and vi->vfds_lock */
+static size_t vfd_out_vfds_locked(struct virtwl_vfd *vfd,
+ struct virtwl_vfd **vfds, size_t count)
+{
+ struct virtwl_info *vi = vfd->vi;
+ struct virtwl_vfd_qentry *qentry, *next;
+ size_t i;
+ size_t read_count = 0;
+
+ list_for_each_entry_safe(qentry, next, &vfd->in_queue, list) {
+ struct virtio_wl_ctrl_vfd_recv *recv =
+ (struct virtio_wl_ctrl_vfd_recv *)qentry->hdr;
+ size_t vfd_offset = sizeof(*recv) + qentry->vfd_offset *
+ sizeof(__le32);
+ __le32 *vfds_le = (__le32 *)((void *)recv + vfd_offset);
+ ssize_t vfds_to_read = recv->vfd_count - qentry->vfd_offset;
+
+ if (read_count >= count)
+ break;
+ if (vfds_to_read <= 0)
+ continue;
+ if (qentry->hdr->type != VIRTIO_WL_CMD_VFD_RECV)
+ continue;
+
+ if ((vfds_to_read + read_count) > count)
+ vfds_to_read = count - read_count;
+
+ for (i = 0; i < vfds_to_read; i++) {
+ uint32_t vfd_id = le32_to_cpu(vfds_le[i]);
+ vfds[read_count] = idr_find(&vi->vfds, vfd_id);
+ if (vfds[read_count]) {
+ read_count++;
+ } else {
+ pr_warn("virtwl: received a vfd with unrecognized id: %u\n",
+ vfd_id);
+ }
+ qentry->vfd_offset++;
+ }
+
+ vfd_qentry_free_if_empty(vfd, qentry);
+ }
+
+ return read_count;
+}
+
+/* this can only be called if the caller has unique ownership of the vfd */
+static int do_vfd_close(struct virtwl_vfd *vfd)
+{
+ struct virtio_wl_ctrl_vfd *ctrl_close;
+ struct virtwl_info *vi = vfd->vi;
+ struct completion finish_completion;
+ struct scatterlist out_sg;
+ struct scatterlist in_sg;
+ int ret = 0;
+
+ ctrl_close = kzalloc(sizeof(*ctrl_close), GFP_KERNEL);
+ if (!ctrl_close)
+ return -ENOMEM;
+
+ ctrl_close->hdr.type = VIRTIO_WL_CMD_VFD_CLOSE;
+ ctrl_close->vfd_id = vfd->id;
+
+ sg_init_one(&in_sg, &ctrl_close->hdr,
+ sizeof(struct virtio_wl_ctrl_vfd));
+ sg_init_one(&out_sg, &ctrl_close->hdr,
+ sizeof(struct virtio_wl_ctrl_hdr));
+
+ init_completion(&finish_completion);
+ ret = vq_queue_out(vi, &out_sg, &in_sg, &finish_completion,
+ false /* block */);
+ if (ret) {
+ pr_warn("virtwl: failed to queue close vfd id %u: %d\n",
+ vfd->id,
+ ret);
+ goto free_ctrl_close;
+ }
+
+ wait_for_completion(&finish_completion);
+ virtwl_vfd_remove(vfd);
+
+free_ctrl_close:
+ kfree(ctrl_close);
+ return ret;
+}
+
+static ssize_t virtwl_vfd_recv(struct file *filp, char __user *buffer,
+ size_t len, struct virtwl_vfd **vfds,
+ size_t *vfd_count)
+{
+ struct virtwl_vfd *vfd = filp->private_data;
+ struct virtwl_info *vi = vfd->vi;
+ ssize_t read_count = 0;
+ size_t vfd_read_count = 0;
+ bool force_to_wait = false;
+
+ mutex_lock(&vi->vfds_lock);
+ mutex_lock(&vfd->lock);
+
+ while (read_count == 0 && vfd_read_count == 0) {
+ while (force_to_wait || list_empty(&vfd->in_queue)) {
+ force_to_wait = false;
+ if (vfd->hungup)
+ goto out_unlock;
+
+ mutex_unlock(&vfd->lock);
+ mutex_unlock(&vi->vfds_lock);
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ if (wait_event_interruptible(vfd->in_waitq,
+ !list_empty(&vfd->in_queue) || vfd->hungup))
+ return -ERESTARTSYS;
+
+ mutex_lock(&vi->vfds_lock);
+ mutex_lock(&vfd->lock);
+ }
+
+ read_count = vfd_out_locked(vfd, buffer, len);
+ if (read_count < 0)
+ goto out_unlock;
+ if (vfds && vfd_count && *vfd_count)
+ vfd_read_count = vfd_out_vfds_locked(vfd, vfds,
+ *vfd_count);
+ else if (read_count == 0 && !list_empty(&vfd->in_queue))
+ /*
+ * Indicates a corner case where the in_queue has ONLY
+ * incoming VFDs but the caller has given us no space to
+ * store them. We force a wait for more activity on the
+ * in_queue to prevent busy waiting.
+ */
+ force_to_wait = true;
+ }
+
+out_unlock:
+ mutex_unlock(&vfd->lock);
+ mutex_unlock(&vi->vfds_lock);
+ if (vfd_count)
+ *vfd_count = vfd_read_count;
+ return read_count;
+}
+
+static int virtwl_vfd_send(struct file *filp, const char __user *buffer,
+ u32 len, int *vfd_fds)
+{
+ struct virtwl_vfd *vfd = filp->private_data;
+ struct virtwl_info *vi = vfd->vi;
+ struct fd vfd_files[VIRTWL_SEND_MAX_ALLOCS] = { { 0 } };
+ struct virtwl_vfd *vfds[VIRTWL_SEND_MAX_ALLOCS] = { 0 };
+ size_t vfd_count = 0;
+ size_t post_send_size;
+ struct virtio_wl_ctrl_vfd_send *ctrl_send;
+ __le32 *vfd_ids;
+ u8 *out_buffer;
+ struct completion finish_completion;
+ struct scatterlist out_sg;
+ struct scatterlist in_sg;
+ int ret;
+ int i;
+
+ if (vfd_fds) {
+ for (i = 0; i < VIRTWL_SEND_MAX_ALLOCS; i++) {
+ struct fd vfd_file;
+ int fd = vfd_fds[i];
+
+ if (fd < 0)
+ break;
+
+ vfd_file = fdget(vfd_fds[i]);
+ if (!vfd_file.file) {
+ ret = -EBADFD;
+ goto put_files;
+ }
+ vfd_files[i] = vfd_file;
+
+ vfds[i] = vfd_file.file->private_data;
+ if (!vfds[i] || !vfds[i]->id) {
+ ret = -EINVAL;
+ goto put_files;
+ }
+
+ vfd_count++;
+ }
+ }
+
+ /* Empty writes always succeed. */
+ if (len == 0 && vfd_count == 0)
+ return 0;
+
+ post_send_size = vfd_count * sizeof(__le32) + len;
+ ctrl_send = kzalloc(sizeof(*ctrl_send) + post_send_size, GFP_KERNEL);
+ if (!ctrl_send) {
+ ret = -ENOMEM;
+ goto put_files;
+ }
+
+ vfd_ids = (__le32 *)((u8 *)ctrl_send + sizeof(*ctrl_send));
+ out_buffer = (u8 *)vfd_ids + vfd_count * sizeof(__le32);
+
+ ctrl_send->hdr.type = VIRTIO_WL_CMD_VFD_SEND;
+ ctrl_send->vfd_id = vfd->id;
+ ctrl_send->vfd_count = vfd_count;
+ for (i = 0; i < vfd_count; i++)
+ vfd_ids[i] = cpu_to_le32(vfds[i]->id);
+
+ if (copy_from_user(out_buffer, buffer, len)) {
+ ret = -EFAULT;
+ goto free_ctrl_send;
+ }
+
+ init_completion(&finish_completion);
+ sg_init_one(&out_sg, ctrl_send, sizeof(*ctrl_send) + post_send_size);
+ sg_init_one(&in_sg, ctrl_send, sizeof(struct virtio_wl_ctrl_hdr));
+
+ ret = vq_queue_out(vi, &out_sg, &in_sg, &finish_completion,
+ filp->f_flags & O_NONBLOCK);
+ if (ret)
+ goto free_ctrl_send;
+
+ wait_for_completion(&finish_completion);
+
+ ret = virtwl_resp_err(ctrl_send->hdr.type);
+
+free_ctrl_send:
+ kfree(ctrl_send);
+put_files:
+ for (i = 0; i < VIRTWL_SEND_MAX_ALLOCS; i++) {
+ if (!vfd_files[i].file)
+ continue;
+ fdput(vfd_files[i]);
+ }
+ return ret;
+}
+
+static int virtwl_vfd_dmabuf_sync(struct file *filp, u32 flags)
+{
+ struct virtio_wl_ctrl_vfd_dmabuf_sync *ctrl_dmabuf_sync;
+ struct virtwl_vfd *vfd = filp->private_data;
+ struct virtwl_info *vi = vfd->vi;
+ struct completion finish_completion;
+ struct scatterlist out_sg;
+ struct scatterlist in_sg;
+ int ret = 0;
+
+ ctrl_dmabuf_sync = kzalloc(sizeof(*ctrl_dmabuf_sync), GFP_KERNEL);
+ if (!ctrl_dmabuf_sync)
+ return -ENOMEM;
+
+ ctrl_dmabuf_sync->hdr.type = VIRTIO_WL_CMD_VFD_DMABUF_SYNC;
+ ctrl_dmabuf_sync->vfd_id = vfd->id;
+ ctrl_dmabuf_sync->flags = flags;
+
+ sg_init_one(&in_sg, &ctrl_dmabuf_sync->hdr,
+ sizeof(struct virtio_wl_ctrl_vfd_dmabuf_sync));
+ sg_init_one(&out_sg, &ctrl_dmabuf_sync->hdr,
+ sizeof(struct virtio_wl_ctrl_hdr));
+
+ init_completion(&finish_completion);
+ ret = vq_queue_out(vi, &out_sg, &in_sg, &finish_completion,
+ false /* block */);
+ if (ret) {
+ pr_warn("virtwl: failed to queue dmabuf sync vfd id %u: %d\n",
+ vfd->id,
+ ret);
+ goto free_ctrl_dmabuf_sync;
+ }
+
+ wait_for_completion(&finish_completion);
+
+free_ctrl_dmabuf_sync:
+ kfree(ctrl_dmabuf_sync);
+ return ret;
+}
+
+static ssize_t virtwl_vfd_read(struct file *filp, char __user *buffer,
+ size_t size, loff_t *pos)
+{
+ return virtwl_vfd_recv(filp, buffer, size, NULL, NULL);
+}
+
+static ssize_t virtwl_vfd_write(struct file *filp, const char __user *buffer,
+ size_t size, loff_t *pos)
+{
+ int ret = 0;
+
+ if (size > U32_MAX)
+ size = U32_MAX;
+
+ ret = virtwl_vfd_send(filp, buffer, size, NULL);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static int virtwl_vfd_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct virtwl_vfd *vfd = filp->private_data;
+ unsigned long vm_size = vma->vm_end - vma->vm_start;
+ int ret = 0;
+
+ mutex_lock(&vfd->lock);
+
+ if (!vfd->pfn) {
+ ret = -EACCES;
+ goto out_unlock;
+ }
+
+ if (vm_size + (vma->vm_pgoff << PAGE_SHIFT) > PAGE_ALIGN(vfd->size)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = io_remap_pfn_range(vma, vma->vm_start, vfd->pfn, vm_size,
+ vma->vm_page_prot);
+ if (ret)
+ goto out_unlock;
+
+ vma->vm_flags |= VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+
+out_unlock:
+ mutex_unlock(&vfd->lock);
+ return ret;
+}
+
+static unsigned int virtwl_vfd_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct virtwl_vfd *vfd = filp->private_data;
+ struct virtwl_info *vi = vfd->vi;
+ unsigned int mask = 0;
+
+ mutex_lock(&vi->vq_locks[VIRTWL_VQ_OUT]);
+ poll_wait(filp, &vi->out_waitq, wait);
+ if (vi->vqs[VIRTWL_VQ_OUT]->num_free)
+ mask |= POLLOUT | POLLWRNORM;
+ mutex_unlock(&vi->vq_locks[VIRTWL_VQ_OUT]);
+
+ mutex_lock(&vfd->lock);
+ poll_wait(filp, &vfd->in_waitq, wait);
+ if (!list_empty(&vfd->in_queue))
+ mask |= POLLIN | POLLRDNORM;
+ if (vfd->hungup)
+ mask |= POLLHUP;
+ mutex_unlock(&vfd->lock);
+
+ return mask;
+}
+
+static int virtwl_vfd_release(struct inode *inodep, struct file *filp)
+{
+ struct virtwl_vfd *vfd = filp->private_data;
+ uint32_t vfd_id = vfd->id;
+ int ret;
+
+ /*
+ * If release is called, filp must be out of references and we have the
+ * last reference.
+ */
+ ret = do_vfd_close(vfd);
+ if (ret)
+ pr_warn("virtwl: failed to release vfd id %u: %d\n", vfd_id,
+ ret);
+ return 0;
+}
+
+/* externally visible function to create fds for existing vfds */
+int virtwl_create_fd_for_vfd(void *filp_private_data, uint32_t vfd_id)
+{
+ struct virtwl_info *vi = filp_private_data;
+ struct virtwl_vfd *vfd;
+
+ mutex_lock(&vi->vfds_lock);
+ vfd = idr_find(&vi->vfds, vfd_id);
+ mutex_unlock(&vi->vfds_lock);
+
+ if (!vfd) {
+ vi->vfd_wait_id = vfd_id;
+ reinit_completion(&vi->vfd_wait_completion);
+ if (wait_for_completion_timeout(&vi->vfd_wait_completion, HZ)) {
+ mutex_lock(&vi->vfds_lock);
+ vfd = idr_find(&vi->vfds, vfd_id);
+ mutex_unlock(&vi->vfds_lock);
+ }
+ }
+
+ if (!vfd) {
+ pr_warn("virtwl: request to create fd for non-existent vfd id %d",
+ vfd_id);
+ return -ENOENT;
+ }
+
+ return anon_inode_getfd("[virtwl_vfd]", &virtwl_vfd_fops, vfd,
+ virtwl_vfd_file_flags(vfd) | O_CLOEXEC);
+}
+
+static int virtwl_open(struct inode *inodep, struct file *filp)
+{
+ struct virtwl_info *vi = container_of(inodep->i_cdev,
+ struct virtwl_info, cdev);
+
+ filp->private_data = vi;
+
+ return 0;
+}
+
+static struct virtwl_vfd *do_new(struct virtwl_info *vi,
+ struct virtwl_ioctl_new *ioctl_new,
+ size_t ioctl_new_size, bool nonblock)
+{
+ struct virtio_wl_ctrl_vfd_new *ctrl_new;
+ struct virtwl_vfd *vfd;
+ struct completion finish_completion;
+ struct scatterlist out_sg;
+ struct scatterlist in_sg;
+ int ret = 0;
+
+ if (ioctl_new->type != VIRTWL_IOCTL_NEW_CTX &&
+ ioctl_new->type != VIRTWL_IOCTL_NEW_ALLOC &&
+ ioctl_new->type != VIRTWL_IOCTL_NEW_PIPE_READ &&
+ ioctl_new->type != VIRTWL_IOCTL_NEW_PIPE_WRITE &&
+ ioctl_new->type != VIRTWL_IOCTL_NEW_DMABUF)
+ return ERR_PTR(-EINVAL);
+
+ ctrl_new = kzalloc(sizeof(*ctrl_new), GFP_KERNEL);
+ if (!ctrl_new)
+ return ERR_PTR(-ENOMEM);
+
+ vfd = virtwl_vfd_alloc(vi);
+ if (!vfd) {
+ ret = -ENOMEM;
+ goto free_ctrl_new;
+ }
+
+ mutex_lock(&vi->vfds_lock);
+ /*
+ * Take the lock before adding it to the vfds list where others might
+ * reference it.
+ */
+ mutex_lock(&vfd->lock);
+ ret = idr_alloc(&vi->vfds, vfd, 1, VIRTWL_MAX_ALLOC, GFP_KERNEL);
+ mutex_unlock(&vi->vfds_lock);
+ if (ret <= 0)
+ goto remove_vfd;
+
+ vfd->id = ret;
+ ret = 0;
+
+ ctrl_new->vfd_id = vfd->id;
+ switch (ioctl_new->type) {
+ case VIRTWL_IOCTL_NEW_CTX:
+ ctrl_new->hdr.type = VIRTIO_WL_CMD_VFD_NEW_CTX;
+ ctrl_new->flags = VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ;
+ break;
+ case VIRTWL_IOCTL_NEW_ALLOC:
+ ctrl_new->hdr.type = VIRTIO_WL_CMD_VFD_NEW;
+ ctrl_new->size = PAGE_ALIGN(ioctl_new->size);
+ break;
+ case VIRTWL_IOCTL_NEW_PIPE_READ:
+ ctrl_new->hdr.type = VIRTIO_WL_CMD_VFD_NEW_PIPE;
+ ctrl_new->flags = VIRTIO_WL_VFD_READ;
+ break;
+ case VIRTWL_IOCTL_NEW_PIPE_WRITE:
+ ctrl_new->hdr.type = VIRTIO_WL_CMD_VFD_NEW_PIPE;
+ ctrl_new->flags = VIRTIO_WL_VFD_WRITE;
+ break;
+ case VIRTWL_IOCTL_NEW_DMABUF:
+ /* Make sure ioctl_new contains enough data for NEW_DMABUF. */
+ if (ioctl_new_size == sizeof(*ioctl_new)) {
+ ctrl_new->hdr.type = VIRTIO_WL_CMD_VFD_NEW_DMABUF;
+ /* FIXME: convert from host byte order. */
+ memcpy(&ctrl_new->dmabuf, &ioctl_new->dmabuf,
+ sizeof(ioctl_new->dmabuf));
+ break;
+ }
+ /* fall-through */
+ default:
+ ret = -EINVAL;
+ goto remove_vfd;
+ }
+
+ init_completion(&finish_completion);
+ sg_init_one(&out_sg, ctrl_new, sizeof(*ctrl_new));
+ sg_init_one(&in_sg, ctrl_new, sizeof(*ctrl_new));
+
+ ret = vq_queue_out(vi, &out_sg, &in_sg, &finish_completion, nonblock);
+ if (ret)
+ goto remove_vfd;
+
+ wait_for_completion(&finish_completion);
+
+ ret = virtwl_resp_err(ctrl_new->hdr.type);
+ if (ret)
+ goto remove_vfd;
+
+ vfd->size = ctrl_new->size;
+ vfd->pfn = ctrl_new->pfn;
+ vfd->flags = ctrl_new->flags;
+
+ mutex_unlock(&vfd->lock);
+
+ if (ioctl_new->type == VIRTWL_IOCTL_NEW_DMABUF) {
+ /* FIXME: convert to host byte order. */
+ memcpy(&ioctl_new->dmabuf, &ctrl_new->dmabuf,
+ sizeof(ctrl_new->dmabuf));
+ }
+
+ kfree(ctrl_new);
+ return vfd;
+
+remove_vfd:
+ /*
+ * unlock the vfd to avoid deadlock when unlinking it
+ * or freeing a held lock
+ */
+ mutex_unlock(&vfd->lock);
+ /* this is safe since the id cannot change after the vfd is created */
+ if (vfd->id)
+ virtwl_vfd_lock_unlink(vfd);
+ virtwl_vfd_free(vfd);
+free_ctrl_new:
+ kfree(ctrl_new);
+ return ERR_PTR(ret);
+}
+
+static long virtwl_ioctl_send(struct file *filp, void __user *ptr)
+{
+ struct virtwl_ioctl_txn ioctl_send;
+ void __user *user_data = ptr + sizeof(struct virtwl_ioctl_txn);
+ int ret;
+
+ ret = copy_from_user(&ioctl_send, ptr, sizeof(struct virtwl_ioctl_txn));
+ if (ret)
+ return -EFAULT;
+
+ /* Early check for user error; do_send still uses copy_from_user. */
+ ret = !access_ok(VERIFY_READ, user_data, ioctl_send.len);
+ if (ret)
+ return -EFAULT;
+
+ return virtwl_vfd_send(filp, user_data, ioctl_send.len, ioctl_send.fds);
+}
+
+static long virtwl_ioctl_recv(struct file *filp, void __user *ptr)
+{
+ struct virtwl_ioctl_txn ioctl_recv;
+ void __user *user_data = ptr + sizeof(struct virtwl_ioctl_txn);
+ int __user *user_fds = (int __user *)ptr;
+ size_t vfd_count = VIRTWL_SEND_MAX_ALLOCS;
+ struct virtwl_vfd *vfds[VIRTWL_SEND_MAX_ALLOCS] = { 0 };
+ int fds[VIRTWL_SEND_MAX_ALLOCS];
+ size_t i;
+ int ret = 0;
+
+ for (i = 0; i < VIRTWL_SEND_MAX_ALLOCS; i++)
+ fds[i] = -1;
+
+ ret = copy_from_user(&ioctl_recv, ptr, sizeof(struct virtwl_ioctl_txn));
+ if (ret)
+ return -EFAULT;
+
+ /* Early check for user error. */
+ ret = !access_ok(VERIFY_WRITE, user_data, ioctl_recv.len);
+ if (ret)
+ return -EFAULT;
+
+ ret = virtwl_vfd_recv(filp, user_data, ioctl_recv.len, vfds,
+ &vfd_count);
+ if (ret < 0)
+ return ret;
+
+ ret = copy_to_user(&((struct virtwl_ioctl_txn __user *)ptr)->len, &ret,
+ sizeof(ioctl_recv.len));
+ if (ret) {
+ ret = -EFAULT;
+ goto free_vfds;
+ }
+
+ for (i = 0; i < vfd_count; i++) {
+ ret = anon_inode_getfd("[virtwl_vfd]", &virtwl_vfd_fops,
+ vfds[i], virtwl_vfd_file_flags(vfds[i])
+ | O_CLOEXEC);
+ if (ret < 0)
+ goto free_vfds;
+
+ vfds[i] = NULL;
+ fds[i] = ret;
+ }
+
+ ret = copy_to_user(user_fds, fds, sizeof(int) * VIRTWL_SEND_MAX_ALLOCS);
+ if (ret) {
+ ret = -EFAULT;
+ goto free_vfds;
+ }
+
+ return 0;
+
+free_vfds:
+ for (i = 0; i < vfd_count; i++) {
+ if (vfds[i])
+ do_vfd_close(vfds[i]);
+ if (fds[i] >= 0)
+ __close_fd(current->files, fds[i]);
+ }
+ return ret;
+}
+
+static long virtwl_ioctl_dmabuf_sync(struct file *filp, void __user *ptr)
+{
+ struct virtwl_ioctl_dmabuf_sync ioctl_dmabuf_sync;
+ int ret;
+
+ ret = copy_from_user(&ioctl_dmabuf_sync, ptr,
+ sizeof(struct virtwl_ioctl_dmabuf_sync));
+ if (ret)
+ return -EFAULT;
+
+ if (ioctl_dmabuf_sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
+ return -EINVAL;
+
+ return virtwl_vfd_dmabuf_sync(filp, ioctl_dmabuf_sync.flags);
+}
+
+static long virtwl_vfd_ioctl(struct file *filp, unsigned int cmd,
+ void __user *ptr)
+{
+ switch (cmd) {
+ case VIRTWL_IOCTL_SEND:
+ return virtwl_ioctl_send(filp, ptr);
+ case VIRTWL_IOCTL_RECV:
+ return virtwl_ioctl_recv(filp, ptr);
+ case VIRTWL_IOCTL_DMABUF_SYNC:
+ return virtwl_ioctl_dmabuf_sync(filp, ptr);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static long virtwl_ioctl_new(struct file *filp, void __user *ptr,
+ size_t in_size)
+{
+ struct virtwl_info *vi = filp->private_data;
+ struct virtwl_vfd *vfd;
+ struct virtwl_ioctl_new ioctl_new = {};
+ size_t size = min(in_size, sizeof(ioctl_new));
+ int ret;
+
+ /* Early check for user error. */
+ ret = !access_ok(VERIFY_WRITE, ptr, size);
+ if (ret)
+ return -EFAULT;
+
+ ret = copy_from_user(&ioctl_new, ptr, size);
+ if (ret)
+ return -EFAULT;
+
+ vfd = do_new(vi, &ioctl_new, size, filp->f_flags & O_NONBLOCK);
+ if (IS_ERR(vfd))
+ return PTR_ERR(vfd);
+
+ ret = anon_inode_getfd("[virtwl_vfd]", &virtwl_vfd_fops, vfd,
+ virtwl_vfd_file_flags(vfd) | O_CLOEXEC);
+ if (ret < 0) {
+ do_vfd_close(vfd);
+ return ret;
+ }
+
+ ioctl_new.fd = ret;
+ ret = copy_to_user(ptr, &ioctl_new, size);
+ if (ret) {
+ /* The release operation will handle freeing this alloc */
+ ksys_close(ioctl_new.fd);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static long virtwl_ioctl_ptr(struct file *filp, unsigned int cmd,
+ void __user *ptr)
+{
+ if (filp->f_op == &virtwl_vfd_fops)
+ return virtwl_vfd_ioctl(filp, cmd, ptr);
+
+ switch (_IOC_NR(cmd)) {
+ case _IOC_NR(VIRTWL_IOCTL_NEW):
+ return virtwl_ioctl_new(filp, ptr, _IOC_SIZE(cmd));
+ default:
+ return -ENOTTY;
+ }
+}
+
+static long virtwl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ return virtwl_ioctl_ptr(filp, cmd, (void __user *)arg);
+}
+
+#ifdef CONFIG_COMPAT
+static long virtwl_ioctl_compat(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return virtwl_ioctl_ptr(filp, cmd, compat_ptr(arg));
+}
+#else
+#define virtwl_ioctl_compat NULL
+#endif
+
+static int virtwl_release(struct inode *inodep, struct file *filp)
+{
+ return 0;
+}
+
+static const struct file_operations virtwl_fops = {
+ .open = virtwl_open,
+ .unlocked_ioctl = virtwl_ioctl,
+ .compat_ioctl = virtwl_ioctl_compat,
+ .release = virtwl_release,
+};
+
+static const struct file_operations virtwl_vfd_fops = {
+ .read = virtwl_vfd_read,
+ .write = virtwl_vfd_write,
+ .mmap = virtwl_vfd_mmap,
+ .poll = virtwl_vfd_poll,
+ .unlocked_ioctl = virtwl_ioctl,
+ .compat_ioctl = virtwl_ioctl_compat,
+ .release = virtwl_vfd_release,
+};
+
+static int probe_common(struct virtio_device *vdev)
+{
+ int i;
+ int ret;
+ struct virtwl_info *vi = NULL;
+ vq_callback_t *vq_callbacks[] = { vq_in_cb, vq_out_cb };
+ static const char * const vq_names[] = { "in", "out" };
+
+ vi = kzalloc(sizeof(struct virtwl_info), GFP_KERNEL);
+ if (!vi)
+ return -ENOMEM;
+
+ vdev->priv = vi;
+
+ ret = alloc_chrdev_region(&vi->dev_num, 0, 1, "wl");
+ if (ret) {
+ ret = -ENOMEM;
+ pr_warn("virtwl: failed to allocate wl chrdev region: %d\n",
+ ret);
+ goto free_vi;
+ }
+
+ vi->class = class_create(THIS_MODULE, "wl");
+ if (IS_ERR(vi->class)) {
+ ret = PTR_ERR(vi->class);
+ pr_warn("virtwl: failed to create wl class: %d\n", ret);
+ goto unregister_region;
+
+ }
+
+ vi->dev = device_create(vi->class, NULL, vi->dev_num, vi, "wl%d", 0);
+ if (IS_ERR(vi->dev)) {
+ ret = PTR_ERR(vi->dev);
+ pr_warn("virtwl: failed to create wl0 device: %d\n", ret);
+ goto destroy_class;
+ }
+
+ cdev_init(&vi->cdev, &virtwl_fops);
+ ret = cdev_add(&vi->cdev, vi->dev_num, 1);
+ if (ret) {
+ pr_warn("virtwl: failed to add virtio wayland character device to system: %d\n",
+ ret);
+ goto destroy_device;
+ }
+
+ for (i = 0; i < VIRTWL_QUEUE_COUNT; i++)
+ mutex_init(&vi->vq_locks[i]);
+
+ ret = virtio_find_vqs(vdev, VIRTWL_QUEUE_COUNT, vi->vqs, vq_callbacks,
+ vq_names, NULL);
+ if (ret) {
+ pr_warn("virtwl: failed to find virtio wayland queues: %d\n",
+ ret);
+ goto del_cdev;
+ }
+
+ INIT_WORK(&vi->in_vq_work, vq_in_work_handler);
+ INIT_WORK(&vi->out_vq_work, vq_out_work_handler);
+ init_waitqueue_head(&vi->out_waitq);
+
+ mutex_init(&vi->vfds_lock);
+ idr_init(&vi->vfds);
+ init_completion(&vi->vfd_wait_completion);
+
+ /* lock is unneeded as we have unique ownership */
+ ret = vq_fill_locked(vi->vqs[VIRTWL_VQ_IN]);
+ if (ret) {
+ pr_warn("virtwl: failed to fill in virtqueue: %d", ret);
+ goto del_cdev;
+ }
+
+ virtio_device_ready(vdev);
+ virtqueue_kick(vi->vqs[VIRTWL_VQ_IN]);
+
+ return 0;
+
+del_cdev:
+ cdev_del(&vi->cdev);
+destroy_device:
+ put_device(vi->dev);
+destroy_class:
+ class_destroy(vi->class);
+unregister_region:
+ unregister_chrdev_region(vi->dev_num, 0);
+free_vi:
+ kfree(vi);
+ return ret;
+}
+
+static void remove_common(struct virtio_device *vdev)
+{
+ struct virtwl_info *vi = vdev->priv;
+
+ cdev_del(&vi->cdev);
+ put_device(vi->dev);
+ class_destroy(vi->class);
+ unregister_chrdev_region(vi->dev_num, 0);
+ kfree(vi);
+}
+
+static int virtwl_probe(struct virtio_device *vdev)
+{
+ return probe_common(vdev);
+}
+
+static void virtwl_remove(struct virtio_device *vdev)
+{
+ remove_common(vdev);
+}
+
+static void virtwl_scan(struct virtio_device *vdev)
+{
+}
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_WL, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static unsigned int features_legacy[] = {
+ VIRTIO_WL_F_TRANS_FLAGS
+};
+
+static unsigned int features[] = {
+ VIRTIO_WL_F_TRANS_FLAGS
+};
+
+static struct virtio_driver virtio_wl_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .feature_table_legacy = features_legacy,
+ .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
+ .probe = virtwl_probe,
+ .remove = virtwl_remove,
+ .scan = virtwl_scan,
+};
+
+module_virtio_driver(virtio_wl_driver);
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio wayland driver");
+MODULE_LICENSE("GPL");
diff --git a/fs/namei.c b/fs/namei.c
index 734cef5..389e48e 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -3711,8 +3711,7 @@
if (error)
return error;
- if ((S_ISCHR(mode) || S_ISBLK(mode)) &&
- !ns_capable(dentry->d_sb->s_user_ns, CAP_MKNOD))
+ if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD))
return -EPERM;
if (!dir->i_op->mknod)
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 6d5c3b2..c5cf1b2 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -43,5 +43,7 @@
#define VIRTIO_ID_INPUT 18 /* virtio input */
#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
#define VIRTIO_ID_CRYPTO 20 /* virtio crypto */
+#define VIRTIO_ID_WL 30 /* virtio wayland */
+#define VIRTIO_ID_MAGMA 50 /* virtio magma */
#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/uapi/linux/virtio_magma.h b/include/uapi/linux/virtio_magma.h
new file mode 100644
index 0000000..64e8c42
--- /dev/null
+++ b/include/uapi/linux/virtio_magma.h
@@ -0,0 +1,771 @@
+/* Copyright 2018 The Fuchsia Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style license that can be
+ found in the LICENSE file. */
+
+/* NOTE: DO NOT EDIT THIS FILE! It is generated automatically by:
+ //garnet/lib/magma/include/virtio/virtio_magma.h.gen.py */
+
+#ifndef _LINUX_VIRTIO_MAGMA_H
+#define _LINUX_VIRTIO_MAGMA_H
+
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/virtmagma.h>
+
+struct virtio_magma_config {
+ __le64 dummy;
+} __attribute((packed));
+
+enum virtio_magma_ctrl_type {
+ /* magma commands */
+ VIRTIO_MAGMA_CMD_QUERY = 0x1001,
+ VIRTIO_MAGMA_CMD_QUERY_RETURNS_BUFFER = 0x1002,
+ VIRTIO_MAGMA_CMD_CREATE_CONNECTION = 0x1003,
+ VIRTIO_MAGMA_CMD_RELEASE_CONNECTION = 0x1004,
+ VIRTIO_MAGMA_CMD_GET_ERROR = 0x1005,
+ VIRTIO_MAGMA_CMD_CREATE_CONTEXT = 0x1006,
+ VIRTIO_MAGMA_CMD_RELEASE_CONTEXT = 0x1007,
+ VIRTIO_MAGMA_CMD_CREATE_BUFFER = 0x1008,
+ VIRTIO_MAGMA_CMD_RELEASE_BUFFER = 0x1009,
+ VIRTIO_MAGMA_CMD_DUPLICATE_HANDLE = 0x100A,
+ VIRTIO_MAGMA_CMD_RELEASE_BUFFER_HANDLE = 0x100B,
+ VIRTIO_MAGMA_CMD_GET_BUFFER_ID = 0x100C,
+ VIRTIO_MAGMA_CMD_GET_BUFFER_SIZE = 0x100D,
+ VIRTIO_MAGMA_CMD_CLEAN_CACHE = 0x100E,
+ VIRTIO_MAGMA_CMD_SET_CACHE_POLICY = 0x100F,
+ VIRTIO_MAGMA_CMD_GET_BUFFER_CACHE_POLICY = 0x1010,
+ VIRTIO_MAGMA_CMD_GET_BUFFER_IS_MAPPABLE = 0x1011,
+ VIRTIO_MAGMA_CMD_SET_BUFFER_MAPPING_ADDRESS_RANGE = 0x1012,
+ VIRTIO_MAGMA_CMD_MAP = 0x1013,
+ VIRTIO_MAGMA_CMD_MAP_ALIGNED = 0x1014,
+ VIRTIO_MAGMA_CMD_MAP_SPECIFIC = 0x1015,
+ VIRTIO_MAGMA_CMD_UNMAP = 0x1016,
+ VIRTIO_MAGMA_CMD_MAP_BUFFER_GPU = 0x1017,
+ VIRTIO_MAGMA_CMD_UNMAP_BUFFER_GPU = 0x1018,
+ VIRTIO_MAGMA_CMD_COMMIT_BUFFER = 0x1019,
+ VIRTIO_MAGMA_CMD_EXPORT = 0x101A,
+ VIRTIO_MAGMA_CMD_IMPORT = 0x101B,
+ VIRTIO_MAGMA_CMD_CREATE_COMMAND_BUFFER = 0x101C,
+ VIRTIO_MAGMA_CMD_RELEASE_COMMAND_BUFFER = 0x101D,
+ VIRTIO_MAGMA_CMD_SUBMIT_COMMAND_BUFFER = 0x101E,
+ VIRTIO_MAGMA_CMD_EXECUTE_COMMAND_BUFFER_WITH_RESOURCES = 0x101F,
+ VIRTIO_MAGMA_CMD_EXECUTE_IMMEDIATE_COMMANDS = 0x1020,
+ VIRTIO_MAGMA_CMD_EXECUTE_IMMEDIATE_COMMANDS2 = 0x1021,
+ VIRTIO_MAGMA_CMD_CREATE_SEMAPHORE = 0x1022,
+ VIRTIO_MAGMA_CMD_RELEASE_SEMAPHORE = 0x1023,
+ VIRTIO_MAGMA_CMD_GET_SEMAPHORE_ID = 0x1024,
+ VIRTIO_MAGMA_CMD_SIGNAL_SEMAPHORE = 0x1025,
+ VIRTIO_MAGMA_CMD_RESET_SEMAPHORE = 0x1026,
+ VIRTIO_MAGMA_CMD_WAIT_SEMAPHORES = 0x1027,
+ VIRTIO_MAGMA_CMD_EXPORT_SEMAPHORE = 0x1028,
+ VIRTIO_MAGMA_CMD_IMPORT_SEMAPHORE = 0x1029,
+ VIRTIO_MAGMA_CMD_GET_NOTIFICATION_CHANNEL_HANDLE = 0x102A,
+ VIRTIO_MAGMA_CMD_WAIT_NOTIFICATION_CHANNEL = 0x102B,
+ VIRTIO_MAGMA_CMD_READ_NOTIFICATION_CHANNEL = 0x102C,
+ /* magma success responses */
+ VIRTIO_MAGMA_RESP_QUERY = 0x2001,
+ VIRTIO_MAGMA_RESP_QUERY_RETURNS_BUFFER = 0x2002,
+ VIRTIO_MAGMA_RESP_CREATE_CONNECTION = 0x2003,
+ VIRTIO_MAGMA_RESP_RELEASE_CONNECTION = 0x2004,
+ VIRTIO_MAGMA_RESP_GET_ERROR = 0x2005,
+ VIRTIO_MAGMA_RESP_CREATE_CONTEXT = 0x2006,
+ VIRTIO_MAGMA_RESP_RELEASE_CONTEXT = 0x2007,
+ VIRTIO_MAGMA_RESP_CREATE_BUFFER = 0x2008,
+ VIRTIO_MAGMA_RESP_RELEASE_BUFFER = 0x2009,
+ VIRTIO_MAGMA_RESP_DUPLICATE_HANDLE = 0x200A,
+ VIRTIO_MAGMA_RESP_RELEASE_BUFFER_HANDLE = 0x200B,
+ VIRTIO_MAGMA_RESP_GET_BUFFER_ID = 0x200C,
+ VIRTIO_MAGMA_RESP_GET_BUFFER_SIZE = 0x200D,
+ VIRTIO_MAGMA_RESP_CLEAN_CACHE = 0x200E,
+ VIRTIO_MAGMA_RESP_SET_CACHE_POLICY = 0x200F,
+ VIRTIO_MAGMA_RESP_GET_BUFFER_CACHE_POLICY = 0x2010,
+ VIRTIO_MAGMA_RESP_GET_BUFFER_IS_MAPPABLE = 0x2011,
+ VIRTIO_MAGMA_RESP_SET_BUFFER_MAPPING_ADDRESS_RANGE = 0x2012,
+ VIRTIO_MAGMA_RESP_MAP = 0x2013,
+ VIRTIO_MAGMA_RESP_MAP_ALIGNED = 0x2014,
+ VIRTIO_MAGMA_RESP_MAP_SPECIFIC = 0x2015,
+ VIRTIO_MAGMA_RESP_UNMAP = 0x2016,
+ VIRTIO_MAGMA_RESP_MAP_BUFFER_GPU = 0x2017,
+ VIRTIO_MAGMA_RESP_UNMAP_BUFFER_GPU = 0x2018,
+ VIRTIO_MAGMA_RESP_COMMIT_BUFFER = 0x2019,
+ VIRTIO_MAGMA_RESP_EXPORT = 0x201A,
+ VIRTIO_MAGMA_RESP_IMPORT = 0x201B,
+ VIRTIO_MAGMA_RESP_CREATE_COMMAND_BUFFER = 0x201C,
+ VIRTIO_MAGMA_RESP_RELEASE_COMMAND_BUFFER = 0x201D,
+ VIRTIO_MAGMA_RESP_SUBMIT_COMMAND_BUFFER = 0x201E,
+ VIRTIO_MAGMA_RESP_EXECUTE_COMMAND_BUFFER_WITH_RESOURCES = 0x201F,
+ VIRTIO_MAGMA_RESP_EXECUTE_IMMEDIATE_COMMANDS = 0x2020,
+ VIRTIO_MAGMA_RESP_EXECUTE_IMMEDIATE_COMMANDS2 = 0x2021,
+ VIRTIO_MAGMA_RESP_CREATE_SEMAPHORE = 0x2022,
+ VIRTIO_MAGMA_RESP_RELEASE_SEMAPHORE = 0x2023,
+ VIRTIO_MAGMA_RESP_GET_SEMAPHORE_ID = 0x2024,
+ VIRTIO_MAGMA_RESP_SIGNAL_SEMAPHORE = 0x2025,
+ VIRTIO_MAGMA_RESP_RESET_SEMAPHORE = 0x2026,
+ VIRTIO_MAGMA_RESP_WAIT_SEMAPHORES = 0x2027,
+ VIRTIO_MAGMA_RESP_EXPORT_SEMAPHORE = 0x2028,
+ VIRTIO_MAGMA_RESP_IMPORT_SEMAPHORE = 0x2029,
+ VIRTIO_MAGMA_RESP_GET_NOTIFICATION_CHANNEL_HANDLE = 0x202A,
+ VIRTIO_MAGMA_RESP_WAIT_NOTIFICATION_CHANNEL = 0x202B,
+ VIRTIO_MAGMA_RESP_READ_NOTIFICATION_CHANNEL = 0x202C,
+ /* magma error responses */
+ VIRTIO_MAGMA_RESP_ERR_UNIMPLEMENTED = 0x3001,
+ VIRTIO_MAGMA_RESP_ERR_INTERNAL = 0x3002,
+ VIRTIO_MAGMA_RESP_ERR_HOST_DISCONNECTED = 0x3003,
+ VIRTIO_MAGMA_RESP_ERR_OUT_OF_MEMORY = 0x3004,
+ VIRTIO_MAGMA_RESP_ERR_INVALID_COMMAND = 0x3005,
+ VIRTIO_MAGMA_RESP_ERR_INVALID_ARGUMENT = 0x3006,
+} __attribute((packed));
+
+inline const char* virtio_magma_ctrl_type_string(enum virtio_magma_ctrl_type type) {
+ switch (type) {
+ case VIRTIO_MAGMA_CMD_QUERY: return "VIRTIO_MAGMA_CMD_QUERY";
+ case VIRTIO_MAGMA_RESP_QUERY: return "VIRTIO_MAGMA_RESP_QUERY";
+ case VIRTIO_MAGMA_CMD_QUERY_RETURNS_BUFFER: return "VIRTIO_MAGMA_CMD_QUERY_RETURNS_BUFFER";
+ case VIRTIO_MAGMA_RESP_QUERY_RETURNS_BUFFER: return "VIRTIO_MAGMA_RESP_QUERY_RETURNS_BUFFER";
+ case VIRTIO_MAGMA_CMD_CREATE_CONNECTION: return "VIRTIO_MAGMA_CMD_CREATE_CONNECTION";
+ case VIRTIO_MAGMA_RESP_CREATE_CONNECTION: return "VIRTIO_MAGMA_RESP_CREATE_CONNECTION";
+ case VIRTIO_MAGMA_CMD_RELEASE_CONNECTION: return "VIRTIO_MAGMA_CMD_RELEASE_CONNECTION";
+ case VIRTIO_MAGMA_RESP_RELEASE_CONNECTION: return "VIRTIO_MAGMA_RESP_RELEASE_CONNECTION";
+ case VIRTIO_MAGMA_CMD_GET_ERROR: return "VIRTIO_MAGMA_CMD_GET_ERROR";
+ case VIRTIO_MAGMA_RESP_GET_ERROR: return "VIRTIO_MAGMA_RESP_GET_ERROR";
+ case VIRTIO_MAGMA_CMD_CREATE_CONTEXT: return "VIRTIO_MAGMA_CMD_CREATE_CONTEXT";
+ case VIRTIO_MAGMA_RESP_CREATE_CONTEXT: return "VIRTIO_MAGMA_RESP_CREATE_CONTEXT";
+ case VIRTIO_MAGMA_CMD_RELEASE_CONTEXT: return "VIRTIO_MAGMA_CMD_RELEASE_CONTEXT";
+ case VIRTIO_MAGMA_RESP_RELEASE_CONTEXT: return "VIRTIO_MAGMA_RESP_RELEASE_CONTEXT";
+ case VIRTIO_MAGMA_CMD_CREATE_BUFFER: return "VIRTIO_MAGMA_CMD_CREATE_BUFFER";
+ case VIRTIO_MAGMA_RESP_CREATE_BUFFER: return "VIRTIO_MAGMA_RESP_CREATE_BUFFER";
+ case VIRTIO_MAGMA_CMD_RELEASE_BUFFER: return "VIRTIO_MAGMA_CMD_RELEASE_BUFFER";
+ case VIRTIO_MAGMA_RESP_RELEASE_BUFFER: return "VIRTIO_MAGMA_RESP_RELEASE_BUFFER";
+ case VIRTIO_MAGMA_CMD_DUPLICATE_HANDLE: return "VIRTIO_MAGMA_CMD_DUPLICATE_HANDLE";
+ case VIRTIO_MAGMA_RESP_DUPLICATE_HANDLE: return "VIRTIO_MAGMA_RESP_DUPLICATE_HANDLE";
+ case VIRTIO_MAGMA_CMD_RELEASE_BUFFER_HANDLE: return "VIRTIO_MAGMA_CMD_RELEASE_BUFFER_HANDLE";
+ case VIRTIO_MAGMA_RESP_RELEASE_BUFFER_HANDLE: return "VIRTIO_MAGMA_RESP_RELEASE_BUFFER_HANDLE";
+ case VIRTIO_MAGMA_CMD_GET_BUFFER_ID: return "VIRTIO_MAGMA_CMD_GET_BUFFER_ID";
+ case VIRTIO_MAGMA_RESP_GET_BUFFER_ID: return "VIRTIO_MAGMA_RESP_GET_BUFFER_ID";
+ case VIRTIO_MAGMA_CMD_GET_BUFFER_SIZE: return "VIRTIO_MAGMA_CMD_GET_BUFFER_SIZE";
+ case VIRTIO_MAGMA_RESP_GET_BUFFER_SIZE: return "VIRTIO_MAGMA_RESP_GET_BUFFER_SIZE";
+ case VIRTIO_MAGMA_CMD_CLEAN_CACHE: return "VIRTIO_MAGMA_CMD_CLEAN_CACHE";
+ case VIRTIO_MAGMA_RESP_CLEAN_CACHE: return "VIRTIO_MAGMA_RESP_CLEAN_CACHE";
+ case VIRTIO_MAGMA_CMD_SET_CACHE_POLICY: return "VIRTIO_MAGMA_CMD_SET_CACHE_POLICY";
+ case VIRTIO_MAGMA_RESP_SET_CACHE_POLICY: return "VIRTIO_MAGMA_RESP_SET_CACHE_POLICY";
+ case VIRTIO_MAGMA_CMD_GET_BUFFER_CACHE_POLICY: return "VIRTIO_MAGMA_CMD_GET_BUFFER_CACHE_POLICY";
+ case VIRTIO_MAGMA_RESP_GET_BUFFER_CACHE_POLICY: return "VIRTIO_MAGMA_RESP_GET_BUFFER_CACHE_POLICY";
+ case VIRTIO_MAGMA_CMD_GET_BUFFER_IS_MAPPABLE: return "VIRTIO_MAGMA_CMD_GET_BUFFER_IS_MAPPABLE";
+ case VIRTIO_MAGMA_RESP_GET_BUFFER_IS_MAPPABLE: return "VIRTIO_MAGMA_RESP_GET_BUFFER_IS_MAPPABLE";
+ case VIRTIO_MAGMA_CMD_SET_BUFFER_MAPPING_ADDRESS_RANGE: return "VIRTIO_MAGMA_CMD_SET_BUFFER_MAPPING_ADDRESS_RANGE";
+ case VIRTIO_MAGMA_RESP_SET_BUFFER_MAPPING_ADDRESS_RANGE: return "VIRTIO_MAGMA_RESP_SET_BUFFER_MAPPING_ADDRESS_RANGE";
+ case VIRTIO_MAGMA_CMD_MAP: return "VIRTIO_MAGMA_CMD_MAP";
+ case VIRTIO_MAGMA_RESP_MAP: return "VIRTIO_MAGMA_RESP_MAP";
+ case VIRTIO_MAGMA_CMD_MAP_ALIGNED: return "VIRTIO_MAGMA_CMD_MAP_ALIGNED";
+ case VIRTIO_MAGMA_RESP_MAP_ALIGNED: return "VIRTIO_MAGMA_RESP_MAP_ALIGNED";
+ case VIRTIO_MAGMA_CMD_MAP_SPECIFIC: return "VIRTIO_MAGMA_CMD_MAP_SPECIFIC";
+ case VIRTIO_MAGMA_RESP_MAP_SPECIFIC: return "VIRTIO_MAGMA_RESP_MAP_SPECIFIC";
+ case VIRTIO_MAGMA_CMD_UNMAP: return "VIRTIO_MAGMA_CMD_UNMAP";
+ case VIRTIO_MAGMA_RESP_UNMAP: return "VIRTIO_MAGMA_RESP_UNMAP";
+ case VIRTIO_MAGMA_CMD_MAP_BUFFER_GPU: return "VIRTIO_MAGMA_CMD_MAP_BUFFER_GPU";
+ case VIRTIO_MAGMA_RESP_MAP_BUFFER_GPU: return "VIRTIO_MAGMA_RESP_MAP_BUFFER_GPU";
+ case VIRTIO_MAGMA_CMD_UNMAP_BUFFER_GPU: return "VIRTIO_MAGMA_CMD_UNMAP_BUFFER_GPU";
+ case VIRTIO_MAGMA_RESP_UNMAP_BUFFER_GPU: return "VIRTIO_MAGMA_RESP_UNMAP_BUFFER_GPU";
+ case VIRTIO_MAGMA_CMD_COMMIT_BUFFER: return "VIRTIO_MAGMA_CMD_COMMIT_BUFFER";
+ case VIRTIO_MAGMA_RESP_COMMIT_BUFFER: return "VIRTIO_MAGMA_RESP_COMMIT_BUFFER";
+ case VIRTIO_MAGMA_CMD_EXPORT: return "VIRTIO_MAGMA_CMD_EXPORT";
+ case VIRTIO_MAGMA_RESP_EXPORT: return "VIRTIO_MAGMA_RESP_EXPORT";
+ case VIRTIO_MAGMA_CMD_IMPORT: return "VIRTIO_MAGMA_CMD_IMPORT";
+ case VIRTIO_MAGMA_RESP_IMPORT: return "VIRTIO_MAGMA_RESP_IMPORT";
+ case VIRTIO_MAGMA_CMD_CREATE_COMMAND_BUFFER: return "VIRTIO_MAGMA_CMD_CREATE_COMMAND_BUFFER";
+ case VIRTIO_MAGMA_RESP_CREATE_COMMAND_BUFFER: return "VIRTIO_MAGMA_RESP_CREATE_COMMAND_BUFFER";
+ case VIRTIO_MAGMA_CMD_RELEASE_COMMAND_BUFFER: return "VIRTIO_MAGMA_CMD_RELEASE_COMMAND_BUFFER";
+ case VIRTIO_MAGMA_RESP_RELEASE_COMMAND_BUFFER: return "VIRTIO_MAGMA_RESP_RELEASE_COMMAND_BUFFER";
+ case VIRTIO_MAGMA_CMD_SUBMIT_COMMAND_BUFFER: return "VIRTIO_MAGMA_CMD_SUBMIT_COMMAND_BUFFER";
+ case VIRTIO_MAGMA_RESP_SUBMIT_COMMAND_BUFFER: return "VIRTIO_MAGMA_RESP_SUBMIT_COMMAND_BUFFER";
+ case VIRTIO_MAGMA_CMD_EXECUTE_COMMAND_BUFFER_WITH_RESOURCES: return "VIRTIO_MAGMA_CMD_EXECUTE_COMMAND_BUFFER_WITH_RESOURCES";
+ case VIRTIO_MAGMA_RESP_EXECUTE_COMMAND_BUFFER_WITH_RESOURCES: return "VIRTIO_MAGMA_RESP_EXECUTE_COMMAND_BUFFER_WITH_RESOURCES";
+ case VIRTIO_MAGMA_CMD_EXECUTE_IMMEDIATE_COMMANDS: return "VIRTIO_MAGMA_CMD_EXECUTE_IMMEDIATE_COMMANDS";
+ case VIRTIO_MAGMA_RESP_EXECUTE_IMMEDIATE_COMMANDS: return "VIRTIO_MAGMA_RESP_EXECUTE_IMMEDIATE_COMMANDS";
+ case VIRTIO_MAGMA_CMD_EXECUTE_IMMEDIATE_COMMANDS2: return "VIRTIO_MAGMA_CMD_EXECUTE_IMMEDIATE_COMMANDS2";
+ case VIRTIO_MAGMA_RESP_EXECUTE_IMMEDIATE_COMMANDS2: return "VIRTIO_MAGMA_RESP_EXECUTE_IMMEDIATE_COMMANDS2";
+ case VIRTIO_MAGMA_CMD_CREATE_SEMAPHORE: return "VIRTIO_MAGMA_CMD_CREATE_SEMAPHORE";
+ case VIRTIO_MAGMA_RESP_CREATE_SEMAPHORE: return "VIRTIO_MAGMA_RESP_CREATE_SEMAPHORE";
+ case VIRTIO_MAGMA_CMD_RELEASE_SEMAPHORE: return "VIRTIO_MAGMA_CMD_RELEASE_SEMAPHORE";
+ case VIRTIO_MAGMA_RESP_RELEASE_SEMAPHORE: return "VIRTIO_MAGMA_RESP_RELEASE_SEMAPHORE";
+ case VIRTIO_MAGMA_CMD_GET_SEMAPHORE_ID: return "VIRTIO_MAGMA_CMD_GET_SEMAPHORE_ID";
+ case VIRTIO_MAGMA_RESP_GET_SEMAPHORE_ID: return "VIRTIO_MAGMA_RESP_GET_SEMAPHORE_ID";
+ case VIRTIO_MAGMA_CMD_SIGNAL_SEMAPHORE: return "VIRTIO_MAGMA_CMD_SIGNAL_SEMAPHORE";
+ case VIRTIO_MAGMA_RESP_SIGNAL_SEMAPHORE: return "VIRTIO_MAGMA_RESP_SIGNAL_SEMAPHORE";
+ case VIRTIO_MAGMA_CMD_RESET_SEMAPHORE: return "VIRTIO_MAGMA_CMD_RESET_SEMAPHORE";
+ case VIRTIO_MAGMA_RESP_RESET_SEMAPHORE: return "VIRTIO_MAGMA_RESP_RESET_SEMAPHORE";
+ case VIRTIO_MAGMA_CMD_WAIT_SEMAPHORES: return "VIRTIO_MAGMA_CMD_WAIT_SEMAPHORES";
+ case VIRTIO_MAGMA_RESP_WAIT_SEMAPHORES: return "VIRTIO_MAGMA_RESP_WAIT_SEMAPHORES";
+ case VIRTIO_MAGMA_CMD_EXPORT_SEMAPHORE: return "VIRTIO_MAGMA_CMD_EXPORT_SEMAPHORE";
+ case VIRTIO_MAGMA_RESP_EXPORT_SEMAPHORE: return "VIRTIO_MAGMA_RESP_EXPORT_SEMAPHORE";
+ case VIRTIO_MAGMA_CMD_IMPORT_SEMAPHORE: return "VIRTIO_MAGMA_CMD_IMPORT_SEMAPHORE";
+ case VIRTIO_MAGMA_RESP_IMPORT_SEMAPHORE: return "VIRTIO_MAGMA_RESP_IMPORT_SEMAPHORE";
+ case VIRTIO_MAGMA_CMD_GET_NOTIFICATION_CHANNEL_HANDLE: return "VIRTIO_MAGMA_CMD_GET_NOTIFICATION_CHANNEL_HANDLE";
+ case VIRTIO_MAGMA_RESP_GET_NOTIFICATION_CHANNEL_HANDLE: return "VIRTIO_MAGMA_RESP_GET_NOTIFICATION_CHANNEL_HANDLE";
+ case VIRTIO_MAGMA_CMD_WAIT_NOTIFICATION_CHANNEL: return "VIRTIO_MAGMA_CMD_WAIT_NOTIFICATION_CHANNEL";
+ case VIRTIO_MAGMA_RESP_WAIT_NOTIFICATION_CHANNEL: return "VIRTIO_MAGMA_RESP_WAIT_NOTIFICATION_CHANNEL";
+ case VIRTIO_MAGMA_CMD_READ_NOTIFICATION_CHANNEL: return "VIRTIO_MAGMA_CMD_READ_NOTIFICATION_CHANNEL";
+ case VIRTIO_MAGMA_RESP_READ_NOTIFICATION_CHANNEL: return "VIRTIO_MAGMA_RESP_READ_NOTIFICATION_CHANNEL";
+ case VIRTIO_MAGMA_RESP_ERR_UNIMPLEMENTED: return "VIRTIO_MAGMA_RESP_ERR_UNIMPLEMENTED";
+ case VIRTIO_MAGMA_RESP_ERR_INTERNAL: return "VIRTIO_MAGMA_RESP_ERR_INTERNAL";
+ case VIRTIO_MAGMA_RESP_ERR_HOST_DISCONNECTED: return "VIRTIO_MAGMA_RESP_ERR_HOST_DISCONNECTED";
+ case VIRTIO_MAGMA_RESP_ERR_OUT_OF_MEMORY: return "VIRTIO_MAGMA_RESP_ERR_OUT_OF_MEMORY";
+ case VIRTIO_MAGMA_RESP_ERR_INVALID_COMMAND: return "VIRTIO_MAGMA_RESP_ERR_INVALID_COMMAND";
+ case VIRTIO_MAGMA_RESP_ERR_INVALID_ARGUMENT: return "VIRTIO_MAGMA_RESP_ERR_INVALID_ARGUMENT";
+ default: return "[invalid virtio_magma_ctrl_type]";
+ }
+}
+
+inline enum virtio_magma_ctrl_type virtio_magma_expected_response_type(enum virtio_magma_ctrl_type type) {
+ switch (type) {
+ case VIRTIO_MAGMA_CMD_QUERY: return VIRTIO_MAGMA_RESP_QUERY;
+ case VIRTIO_MAGMA_CMD_QUERY_RETURNS_BUFFER: return VIRTIO_MAGMA_RESP_QUERY_RETURNS_BUFFER;
+ case VIRTIO_MAGMA_CMD_CREATE_CONNECTION: return VIRTIO_MAGMA_RESP_CREATE_CONNECTION;
+ case VIRTIO_MAGMA_CMD_RELEASE_CONNECTION: return VIRTIO_MAGMA_RESP_RELEASE_CONNECTION;
+ case VIRTIO_MAGMA_CMD_GET_ERROR: return VIRTIO_MAGMA_RESP_GET_ERROR;
+ case VIRTIO_MAGMA_CMD_CREATE_CONTEXT: return VIRTIO_MAGMA_RESP_CREATE_CONTEXT;
+ case VIRTIO_MAGMA_CMD_RELEASE_CONTEXT: return VIRTIO_MAGMA_RESP_RELEASE_CONTEXT;
+ case VIRTIO_MAGMA_CMD_CREATE_BUFFER: return VIRTIO_MAGMA_RESP_CREATE_BUFFER;
+ case VIRTIO_MAGMA_CMD_RELEASE_BUFFER: return VIRTIO_MAGMA_RESP_RELEASE_BUFFER;
+ case VIRTIO_MAGMA_CMD_DUPLICATE_HANDLE: return VIRTIO_MAGMA_RESP_DUPLICATE_HANDLE;
+ case VIRTIO_MAGMA_CMD_RELEASE_BUFFER_HANDLE: return VIRTIO_MAGMA_RESP_RELEASE_BUFFER_HANDLE;
+ case VIRTIO_MAGMA_CMD_GET_BUFFER_ID: return VIRTIO_MAGMA_RESP_GET_BUFFER_ID;
+ case VIRTIO_MAGMA_CMD_GET_BUFFER_SIZE: return VIRTIO_MAGMA_RESP_GET_BUFFER_SIZE;
+ case VIRTIO_MAGMA_CMD_CLEAN_CACHE: return VIRTIO_MAGMA_RESP_CLEAN_CACHE;
+ case VIRTIO_MAGMA_CMD_SET_CACHE_POLICY: return VIRTIO_MAGMA_RESP_SET_CACHE_POLICY;
+ case VIRTIO_MAGMA_CMD_GET_BUFFER_CACHE_POLICY: return VIRTIO_MAGMA_RESP_GET_BUFFER_CACHE_POLICY;
+ case VIRTIO_MAGMA_CMD_GET_BUFFER_IS_MAPPABLE: return VIRTIO_MAGMA_RESP_GET_BUFFER_IS_MAPPABLE;
+ case VIRTIO_MAGMA_CMD_SET_BUFFER_MAPPING_ADDRESS_RANGE: return VIRTIO_MAGMA_RESP_SET_BUFFER_MAPPING_ADDRESS_RANGE;
+ case VIRTIO_MAGMA_CMD_MAP: return VIRTIO_MAGMA_RESP_MAP;
+ case VIRTIO_MAGMA_CMD_MAP_ALIGNED: return VIRTIO_MAGMA_RESP_MAP_ALIGNED;
+ case VIRTIO_MAGMA_CMD_MAP_SPECIFIC: return VIRTIO_MAGMA_RESP_MAP_SPECIFIC;
+ case VIRTIO_MAGMA_CMD_UNMAP: return VIRTIO_MAGMA_RESP_UNMAP;
+ case VIRTIO_MAGMA_CMD_MAP_BUFFER_GPU: return VIRTIO_MAGMA_RESP_MAP_BUFFER_GPU;
+ case VIRTIO_MAGMA_CMD_UNMAP_BUFFER_GPU: return VIRTIO_MAGMA_RESP_UNMAP_BUFFER_GPU;
+ case VIRTIO_MAGMA_CMD_COMMIT_BUFFER: return VIRTIO_MAGMA_RESP_COMMIT_BUFFER;
+ case VIRTIO_MAGMA_CMD_EXPORT: return VIRTIO_MAGMA_RESP_EXPORT;
+ case VIRTIO_MAGMA_CMD_IMPORT: return VIRTIO_MAGMA_RESP_IMPORT;
+ case VIRTIO_MAGMA_CMD_CREATE_COMMAND_BUFFER: return VIRTIO_MAGMA_RESP_CREATE_COMMAND_BUFFER;
+ case VIRTIO_MAGMA_CMD_RELEASE_COMMAND_BUFFER: return VIRTIO_MAGMA_RESP_RELEASE_COMMAND_BUFFER;
+ case VIRTIO_MAGMA_CMD_SUBMIT_COMMAND_BUFFER: return VIRTIO_MAGMA_RESP_SUBMIT_COMMAND_BUFFER;
+ case VIRTIO_MAGMA_CMD_EXECUTE_COMMAND_BUFFER_WITH_RESOURCES: return VIRTIO_MAGMA_RESP_EXECUTE_COMMAND_BUFFER_WITH_RESOURCES;
+ case VIRTIO_MAGMA_CMD_EXECUTE_IMMEDIATE_COMMANDS: return VIRTIO_MAGMA_RESP_EXECUTE_IMMEDIATE_COMMANDS;
+ case VIRTIO_MAGMA_CMD_EXECUTE_IMMEDIATE_COMMANDS2: return VIRTIO_MAGMA_RESP_EXECUTE_IMMEDIATE_COMMANDS2;
+ case VIRTIO_MAGMA_CMD_CREATE_SEMAPHORE: return VIRTIO_MAGMA_RESP_CREATE_SEMAPHORE;
+ case VIRTIO_MAGMA_CMD_RELEASE_SEMAPHORE: return VIRTIO_MAGMA_RESP_RELEASE_SEMAPHORE;
+ case VIRTIO_MAGMA_CMD_GET_SEMAPHORE_ID: return VIRTIO_MAGMA_RESP_GET_SEMAPHORE_ID;
+ case VIRTIO_MAGMA_CMD_SIGNAL_SEMAPHORE: return VIRTIO_MAGMA_RESP_SIGNAL_SEMAPHORE;
+ case VIRTIO_MAGMA_CMD_RESET_SEMAPHORE: return VIRTIO_MAGMA_RESP_RESET_SEMAPHORE;
+ case VIRTIO_MAGMA_CMD_WAIT_SEMAPHORES: return VIRTIO_MAGMA_RESP_WAIT_SEMAPHORES;
+ case VIRTIO_MAGMA_CMD_EXPORT_SEMAPHORE: return VIRTIO_MAGMA_RESP_EXPORT_SEMAPHORE;
+ case VIRTIO_MAGMA_CMD_IMPORT_SEMAPHORE: return VIRTIO_MAGMA_RESP_IMPORT_SEMAPHORE;
+ case VIRTIO_MAGMA_CMD_GET_NOTIFICATION_CHANNEL_HANDLE: return VIRTIO_MAGMA_RESP_GET_NOTIFICATION_CHANNEL_HANDLE;
+ case VIRTIO_MAGMA_CMD_WAIT_NOTIFICATION_CHANNEL: return VIRTIO_MAGMA_RESP_WAIT_NOTIFICATION_CHANNEL;
+ case VIRTIO_MAGMA_CMD_READ_NOTIFICATION_CHANNEL: return VIRTIO_MAGMA_RESP_READ_NOTIFICATION_CHANNEL;
+ default: return VIRTIO_MAGMA_RESP_ERR_INVALID_COMMAND;
+ }
+}
+
+struct virtio_magma_ctrl_hdr {
+ __le32 type;
+ __le32 flags;
+} __attribute((packed));
+
+struct virtio_magma_query_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le32 file_descriptor;
+ __le64 id;
+} __attribute((packed));
+
+struct virtio_magma_query_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 value_out;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_query_returns_buffer_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le32 file_descriptor;
+ __le64 id;
+} __attribute((packed));
+
+struct virtio_magma_query_returns_buffer_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 handle_out;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_create_connection_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le32 file_descriptor;
+} __attribute((packed));
+
+struct virtio_magma_create_connection_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection_out;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_release_connection_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+} __attribute((packed));
+
+struct virtio_magma_release_connection_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
+
+struct virtio_magma_get_error_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+} __attribute((packed));
+
+struct virtio_magma_get_error_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_create_context_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+} __attribute((packed));
+
+struct virtio_magma_create_context_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 context_id_out;
+} __attribute((packed));
+
+struct virtio_magma_release_context_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le32 context_id;
+} __attribute((packed));
+
+struct virtio_magma_release_context_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
+
+struct virtio_magma_create_buffer_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le64 size;
+} __attribute((packed));
+
+struct virtio_magma_create_buffer_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 size_out;
+ __le64 buffer_out;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_release_buffer_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le64 buffer;
+} __attribute((packed));
+
+struct virtio_magma_release_buffer_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
+
+struct virtio_magma_duplicate_handle_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le32 buffer_handle;
+} __attribute((packed));
+
+struct virtio_magma_duplicate_handle_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 buffer_handle_out;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_release_buffer_handle_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le32 buffer_handle;
+} __attribute((packed));
+
+struct virtio_magma_release_buffer_handle_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_get_buffer_id_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 buffer;
+} __attribute((packed));
+
+struct virtio_magma_get_buffer_id_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_get_buffer_size_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 buffer;
+} __attribute((packed));
+
+struct virtio_magma_get_buffer_size_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_clean_cache_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 buffer;
+ __le64 offset;
+ __le64 size;
+ __le64 operation;
+} __attribute((packed));
+
+struct virtio_magma_clean_cache_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_set_cache_policy_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 buffer;
+ __le64 policy;
+} __attribute((packed));
+
+struct virtio_magma_set_cache_policy_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_get_buffer_cache_policy_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 buffer;
+} __attribute((packed));
+
+struct virtio_magma_get_buffer_cache_policy_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 cache_policy_out;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_get_buffer_is_mappable_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 buffer;
+ __le32 flags;
+} __attribute((packed));
+
+struct virtio_magma_get_buffer_is_mappable_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 is_mappable_out;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_set_buffer_mapping_address_range_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 buffer;
+ __le32 handle;
+} __attribute((packed));
+
+struct virtio_magma_set_buffer_mapping_address_range_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_map_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le64 buffer;
+} __attribute((packed));
+
+struct virtio_magma_map_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 addr_out;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_map_aligned_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le64 buffer;
+ __le64 alignment;
+} __attribute((packed));
+
+struct virtio_magma_map_aligned_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 addr_out;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_map_specific_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le64 buffer;
+ __le64 addr;
+ __le64 offset;
+ __le64 length;
+} __attribute((packed));
+
+struct virtio_magma_map_specific_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_unmap_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le64 buffer;
+} __attribute((packed));
+
+struct virtio_magma_unmap_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_map_buffer_gpu_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le64 buffer;
+ __le64 page_offset;
+ __le64 page_count;
+ __le64 gpu_va;
+ __le64 map_flags;
+} __attribute((packed));
+
+struct virtio_magma_map_buffer_gpu_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
+
+struct virtio_magma_unmap_buffer_gpu_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le64 buffer;
+ __le64 gpu_va;
+} __attribute((packed));
+
+struct virtio_magma_unmap_buffer_gpu_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
+
+struct virtio_magma_commit_buffer_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le64 buffer;
+ __le64 page_offset;
+ __le64 page_count;
+} __attribute((packed));
+
+struct virtio_magma_commit_buffer_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
+
+struct virtio_magma_export_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le64 buffer;
+} __attribute((packed));
+
+struct virtio_magma_export_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 buffer_handle_out;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_import_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le32 buffer_handle;
+} __attribute((packed));
+
+struct virtio_magma_import_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 buffer_out;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_create_command_buffer_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le64 size;
+} __attribute((packed));
+
+struct virtio_magma_create_command_buffer_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 buffer_out;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_release_command_buffer_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le64 command_buffer;
+} __attribute((packed));
+
+struct virtio_magma_release_command_buffer_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
+
+struct virtio_magma_submit_command_buffer_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le64 command_buffer;
+ __le32 context_id;
+} __attribute((packed));
+
+struct virtio_magma_submit_command_buffer_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
+
+struct virtio_magma_execute_command_buffer_with_resources_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le32 context_id;
+ __le64 command_buffer;
+ __le64 resources;
+ __le64 semaphore_ids;
+} __attribute((packed));
+
+struct virtio_magma_execute_command_buffer_with_resources_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
+
+struct virtio_magma_execute_immediate_commands_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le32 context_id;
+ __le64 command_count;
+ __le64 command_buffers;
+} __attribute((packed));
+
+struct virtio_magma_execute_immediate_commands_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
+
+struct virtio_magma_execute_immediate_commands2_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le32 context_id;
+ __le64 command_count;
+ __le64 command_buffers;
+} __attribute((packed));
+
+struct virtio_magma_execute_immediate_commands2_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
+
+struct virtio_magma_create_semaphore_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+} __attribute((packed));
+
+struct virtio_magma_create_semaphore_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 semaphore_out;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_release_semaphore_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le64 semaphore;
+} __attribute((packed));
+
+struct virtio_magma_release_semaphore_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
+
+struct virtio_magma_get_semaphore_id_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 semaphore;
+} __attribute((packed));
+
+struct virtio_magma_get_semaphore_id_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_signal_semaphore_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 semaphore;
+} __attribute((packed));
+
+struct virtio_magma_signal_semaphore_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
+
+struct virtio_magma_reset_semaphore_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 semaphore;
+} __attribute((packed));
+
+struct virtio_magma_reset_semaphore_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+} __attribute((packed));
+
+struct virtio_magma_wait_semaphores_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 semaphores;
+ __le32 count;
+ __le64 timeout_ms;
+ u8 wait_all;
+} __attribute((packed));
+
+struct virtio_magma_wait_semaphores_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_export_semaphore_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le64 semaphore;
+} __attribute((packed));
+
+struct virtio_magma_export_semaphore_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 semaphore_handle_out;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_import_semaphore_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le32 semaphore_handle;
+} __attribute((packed));
+
+struct virtio_magma_import_semaphore_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 semaphore_out;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_get_notification_channel_handle_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+} __attribute((packed));
+
+struct virtio_magma_get_notification_channel_handle_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le32 result_return;
+} __attribute((packed));
+
+struct virtio_magma_wait_notification_channel_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le64 timeout_ns;
+} __attribute((packed));
+
+struct virtio_magma_wait_notification_channel_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 result_return;
+} __attribute((packed));
+
+struct virtio_magma_read_notification_channel_ctrl {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 connection;
+ __le64 buffer;
+ __le64 buffer_size;
+} __attribute((packed));
+
+struct virtio_magma_read_notification_channel_resp {
+ struct virtio_magma_ctrl_hdr hdr;
+ __le64 buffer_size_out;
+ __le64 result_return;
+} __attribute((packed));
+
+#endif /* _LINUX_VIRTIO_MAGMA_H */
diff --git a/include/uapi/linux/virtio_wl.h b/include/uapi/linux/virtio_wl.h
new file mode 100644
index 0000000..b8cbb4f
--- /dev/null
+++ b/include/uapi/linux/virtio_wl.h
@@ -0,0 +1,118 @@
+#ifndef _LINUX_VIRTIO_WL_H
+#define _LINUX_VIRTIO_WL_H
+/*
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ */
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/virtwl.h>
+
+#define VIRTWL_IN_BUFFER_SIZE 4096
+#define VIRTWL_OUT_BUFFER_SIZE 4096
+#define VIRTWL_VQ_IN 0
+#define VIRTWL_VQ_OUT 1
+#define VIRTWL_QUEUE_COUNT 2
+#define VIRTWL_MAX_QUEUES 4
+#define VIRTWL_MAX_ALLOC 0x800
+#define VIRTWL_PFN_SHIFT 12
+
+/* Enables the transition to new flag semantics */
+#define VIRTIO_WL_F_TRANS_FLAGS 1
+
+/* Enables virtio-magma child device */
+#define VIRTIO_WL_F_MAGMA 2
+
+struct virtio_wl_config {
+};
+
+/*
+ * The structure of each of these is virtio_wl_ctrl_hdr or one of its subclasses
+ * where noted.
+ */
+enum virtio_wl_ctrl_type {
+ VIRTIO_WL_CMD_VFD_NEW = 0x100, /* virtio_wl_ctrl_vfd_new */
+ VIRTIO_WL_CMD_VFD_CLOSE, /* virtio_wl_ctrl_vfd */
+ VIRTIO_WL_CMD_VFD_SEND, /* virtio_wl_ctrl_vfd_send + data */
+ VIRTIO_WL_CMD_VFD_RECV, /* virtio_wl_ctrl_vfd_recv + data */
+ VIRTIO_WL_CMD_VFD_NEW_CTX, /* virtio_wl_ctrl_vfd_new */
+ VIRTIO_WL_CMD_VFD_NEW_PIPE, /* virtio_wl_ctrl_vfd_new */
+ VIRTIO_WL_CMD_VFD_HUP, /* virtio_wl_ctrl_vfd */
+ VIRTIO_WL_CMD_VFD_NEW_DMABUF, /* virtio_wl_ctrl_vfd_new */
+ VIRTIO_WL_CMD_VFD_DMABUF_SYNC, /* virtio_wl_ctrl_vfd_dmabuf_sync */
+
+ VIRTIO_WL_RESP_OK = 0x1000,
+ VIRTIO_WL_RESP_VFD_NEW = 0x1001, /* virtio_wl_ctrl_vfd_new */
+ VIRTIO_WL_RESP_VFD_NEW_DMABUF = 0x1002, /* virtio_wl_ctrl_vfd_new */
+
+ VIRTIO_WL_RESP_ERR = 0x1100,
+ VIRTIO_WL_RESP_OUT_OF_MEMORY,
+ VIRTIO_WL_RESP_INVALID_ID,
+ VIRTIO_WL_RESP_INVALID_TYPE,
+ VIRTIO_WL_RESP_INVALID_FLAGS,
+ VIRTIO_WL_RESP_INVALID_CMD,
+};
+
+struct virtio_wl_ctrl_hdr {
+ __le32 type; /* one of virtio_wl_ctrl_type */
+ __le32 flags; /* always 0 */
+};
+
+enum virtio_wl_vfd_flags {
+ VIRTIO_WL_VFD_WRITE = 0x1, /* intended to be written by guest */
+ VIRTIO_WL_VFD_READ = 0x2, /* intended to be read by guest */
+};
+
+struct virtio_wl_ctrl_vfd {
+ struct virtio_wl_ctrl_hdr hdr;
+ __le32 vfd_id;
+};
+
+/*
+ * If this command is sent to the guest, it indicates that the VFD has been
+ * created and the fields indicate the properties of the VFD being offered.
+ *
+ * If this command is sent to the host, it represents a request to create a VFD
+ * of the given properties. The pfn field is ignored by the host.
+ */
+struct virtio_wl_ctrl_vfd_new {
+ struct virtio_wl_ctrl_hdr hdr;
+ __le32 vfd_id; /* MSB indicates device allocated vfd */
+ __le32 flags; /* virtio_wl_vfd_flags */
+ __le64 pfn; /* first guest physical page frame number if VFD_MAP */
+ __le32 size; /* size in bytes if VIRTIO_WL_CMD_VFD_NEW* */
+ /* buffer description if VIRTIO_WL_CMD_VFD_NEW_DMABUF */
+ struct {
+ __le32 width; /* width in pixels */
+ __le32 height; /* height in pixels */
+ __le32 format; /* fourcc format */
+ __le32 stride0; /* return stride0 */
+ __le32 stride1; /* return stride1 */
+ __le32 stride2; /* return stride2 */
+ __le32 offset0; /* return offset0 */
+ __le32 offset1; /* return offset1 */
+ __le32 offset2; /* return offset2 */
+ } dmabuf;
+};
+
+struct virtio_wl_ctrl_vfd_send {
+ struct virtio_wl_ctrl_hdr hdr;
+ __le32 vfd_id;
+ __le32 vfd_count; /* struct is followed by this many IDs */
+ /* the remainder is raw data */
+};
+
+struct virtio_wl_ctrl_vfd_recv {
+ struct virtio_wl_ctrl_hdr hdr;
+ __le32 vfd_id;
+ __le32 vfd_count; /* struct is followed by this many IDs */
+ /* the remainder is raw data */
+};
+
+struct virtio_wl_ctrl_vfd_dmabuf_sync {
+ struct virtio_wl_ctrl_hdr hdr;
+ __le32 vfd_id;
+ __le32 flags;
+};
+
+#endif /* _LINUX_VIRTIO_WL_H */
diff --git a/include/uapi/linux/virtmagma.h b/include/uapi/linux/virtmagma.h
new file mode 100644
index 0000000..5da7a3a
--- /dev/null
+++ b/include/uapi/linux/virtmagma.h
@@ -0,0 +1,46 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef _LINUX_VIRTMAGMA_H
+#define _LINUX_VIRTMAGMA_H
+
+#include <asm/ioctl.h>
+#include <linux/types.h>
+
+#define VIRTMAGMA_IOCTL_BASE 'm'
+#define VIRTMAGMA_IO(nr) _IO(VIRTMAGMA_IOCTL_BASE, nr)
+#define VIRTMAGMA_IOR(nr, type) _IOR(VIRTMAGMA_IOCTL_BASE, nr, type)
+#define VIRTMAGMA_IOW(nr, type) _IOW(VIRTMAGMA_IOCTL_BASE, nr, type)
+#define VIRTMAGMA_IOWR(nr, type) _IOWR(VIRTMAGMA_IOCTL_BASE, nr, type)
+#define VIRTMAGMA_MAKE_VERSION(major, minor, patch) \
+ (((major) << 24) | ((minor) << 12) | (patch))
+#define VIRTMAGMA_GET_VERSION(version, major, minor, patch) (\
+ (major = ((version) >> 24)), \
+ (minor = ((version) >> 12) & 0x3FF), \
+ (patch = (version) & 0x3FF), (version))
+
+#define VIRTMAGMA_HANDSHAKE_SEND 0x46434853
+#define VIRTMAGMA_HANDSHAKE_RECV 0x474F4F47
+#define VIRTMAGMA_VERSION VIRTMAGMA_MAKE_VERSION(0,1,0)
+struct virtmagma_ioctl_args_handshake {
+ __u32 handshake_inout;
+ __u32 version_out;
+};
+
+struct virtmagma_ioctl_args_get_mmfd {
+ __s32 fd_out;
+};
+
+struct virtmagma_ioctl_args_magma_command {
+ __u64 request_address;
+ __u64 request_size;
+ __u64 response_address;
+ __u64 response_size;
+};
+
+#define VIRTMAGMA_IOCTL_HANDSHAKE VIRTMAGMA_IOWR(0x00, struct virtmagma_ioctl_args_handshake)
+#define VIRTMAGMA_IOCTL_GET_MMFD VIRTMAGMA_IOWR(0x01, struct virtmagma_ioctl_args_get_mmfd)
+#define VIRTMAGMA_IOCTL_MAGMA_COMMAND VIRTMAGMA_IOWR(0x02, struct virtmagma_ioctl_args_magma_command)
+
+#endif /* _LINUX_VIRTMAGMA_H */
diff --git a/include/uapi/linux/virtwl.h b/include/uapi/linux/virtwl.h
new file mode 100644
index 0000000..9390413
--- /dev/null
+++ b/include/uapi/linux/virtwl.h
@@ -0,0 +1,64 @@
+#ifndef _LINUX_VIRTWL_H
+#define _LINUX_VIRTWL_H
+
+#include <asm/ioctl.h>
+#include <linux/types.h>
+
+#define VIRTWL_SEND_MAX_ALLOCS 28
+
+#define VIRTWL_IOCTL_BASE 'w'
+#define VIRTWL_IO(nr) _IO(VIRTWL_IOCTL_BASE, nr)
+#define VIRTWL_IOR(nr, type) _IOR(VIRTWL_IOCTL_BASE, nr, type)
+#define VIRTWL_IOW(nr, type) _IOW(VIRTWL_IOCTL_BASE, nr, type)
+#define VIRTWL_IOWR(nr, type) _IOWR(VIRTWL_IOCTL_BASE, nr, type)
+
+enum virtwl_ioctl_new_type {
+ VIRTWL_IOCTL_NEW_CTX, /* open a new wayland connection context */
+ VIRTWL_IOCTL_NEW_ALLOC, /* create a new virtwl shm allocation */
+ /* create a new virtwl pipe that is readable via the returned fd */
+ VIRTWL_IOCTL_NEW_PIPE_READ,
+ /* create a new virtwl pipe that is writable via the returned fd */
+ VIRTWL_IOCTL_NEW_PIPE_WRITE,
+ /* create a new virtwl dmabuf that is writable via the returned fd */
+ VIRTWL_IOCTL_NEW_DMABUF,
+};
+
+struct virtwl_ioctl_new {
+ __u32 type; /* VIRTWL_IOCTL_NEW_* */
+ int fd; /* return fd */
+ __u32 flags; /* currently always 0 */
+ union {
+ /* size of allocation if type == VIRTWL_IOCTL_NEW_ALLOC */
+ __u32 size;
+ /* buffer description if type == VIRTWL_IOCTL_NEW_DMABUF */
+ struct {
+ __u32 width; /* width in pixels */
+ __u32 height; /* height in pixels */
+ __u32 format; /* fourcc format */
+ __u32 stride0; /* return stride0 */
+ __u32 stride1; /* return stride1 */
+ __u32 stride2; /* return stride2 */
+ __u32 offset0; /* return offset0 */
+ __u32 offset1; /* return offset1 */
+ __u32 offset2; /* return offset2 */
+ } dmabuf;
+ };
+};
+
+struct virtwl_ioctl_txn {
+ int fds[VIRTWL_SEND_MAX_ALLOCS];
+ __u32 len;
+ __u8 data[0];
+};
+
+struct virtwl_ioctl_dmabuf_sync {
+ __u32 flags; /* synchronization flags (see dma-buf.h) */
+};
+
+#define VIRTWL_IOCTL_NEW VIRTWL_IOWR(0x00, struct virtwl_ioctl_new)
+#define VIRTWL_IOCTL_SEND VIRTWL_IOR(0x01, struct virtwl_ioctl_txn)
+#define VIRTWL_IOCTL_RECV VIRTWL_IOW(0x02, struct virtwl_ioctl_txn)
+#define VIRTWL_IOCTL_DMABUF_SYNC VIRTWL_IOR(0x03, \
+ struct virtwl_ioctl_dmabuf_sync)
+
+#endif /* _LINUX_VIRTWL_H */