Merge 4.15-rc6 into char-misc-next

We want the fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
diff --git a/Documentation/ABI/testing/sysfs-bus-siox b/Documentation/ABI/testing/sysfs-bus-siox
new file mode 100644
index 0000000..fed7c37
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-siox
@@ -0,0 +1,87 @@
+What:		/sys/bus/siox/devices/siox-X/active
+KernelVersion:	4.16
+Contact:	Gavin Schenk <g.schenk@eckelmann.de>, Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Description:
+		On reading represents the current state of the bus. If it
+		contains a "0" the bus is stopped and connected devices are
+		expected to not do anything because their watchdog triggered.
+		When the file contains a "1" the bus is operated and periodically
+		does a push-pull cycle to write and read data from the
+		connected devices.
+		When writing a "0" or "1" the bus moves to the described state.
+
+What:		/sys/bus/siox/devices/siox-X/device_add
+KernelVersion:	4.16
+Contact:	Gavin Schenk <g.schenk@eckelmann.de>, Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Description:
+		Write-only file. Write
+
+			<type> <inbytes> <outbytes> <statustype>
+
+		to add a new device dynamically. <type> is the name that is used to match
+		to a driver (similar to the platform bus). <inbytes> and <outbytes> define
+		the length of the input and output shift register in bytes respectively.
+		<statustype> defines the 4 bit device type that is check to identify connection
+		problems.
+		The new device is added to the end of the existing chain.
+
+What:		/sys/bus/siox/devices/siox-X/device_remove
+KernelVersion:	4.16
+Contact:	Gavin Schenk <g.schenk@eckelmann.de>, Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Description:
+		Write-only file. A single write removes the last device in the siox chain.
+
+What:		/sys/bus/siox/devices/siox-X/poll_interval_ns
+KernelVersion:	4.16
+Contact:	Gavin Schenk <g.schenk@eckelmann.de>, Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Description:
+		Defines the interval between two poll cycles in nano seconds.
+		Note this is rounded to jiffies on writing. On reading the current value
+		is returned.
+
+What:		/sys/bus/siox/devices/siox-X-Y/connected
+KernelVersion:	4.16
+Contact:	Gavin Schenk <g.schenk@eckelmann.de>, Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Description:
+		Read-only value. "0" means the Yth device on siox bus X isn't "connected" i.e.
+		communication with it is not ensured. "1" signals a working connection.
+
+What:		/sys/bus/siox/devices/siox-X-Y/inbytes
+KernelVersion:	4.16
+Contact:	Gavin Schenk <g.schenk@eckelmann.de>, Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Description:
+		Read-only value reporting the inbytes value provided to siox-X/device_add
+
+What:		/sys/bus/siox/devices/siox-X-Y/status_errors
+KernelVersion:	4.16
+Contact:	Gavin Schenk <g.schenk@eckelmann.de>, Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Description:
+		Counts the number of time intervals when the read status byte doesn't yield the
+		expected value.
+
+What:		/sys/bus/siox/devices/siox-X-Y/type
+KernelVersion:	4.16
+Contact:	Gavin Schenk <g.schenk@eckelmann.de>, Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Description:
+		Read-only value reporting the type value provided to siox-X/device_add.
+
+What:		/sys/bus/siox/devices/siox-X-Y/watchdog
+KernelVersion:	4.16
+Contact:	Gavin Schenk <g.schenk@eckelmann.de>, Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Description:
+		Read-only value reporting if the watchdog of the siox device is
+		active. "0" means the watchdog is not active and the device is expected to
+		be operational. "1" means the watchdog keeps the device in reset.
+
+What:		/sys/bus/siox/devices/siox-X-Y/watchdog_errors
+KernelVersion:	4.16
+Contact:	Gavin Schenk <g.schenk@eckelmann.de>, Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Description:
+		Read-only value reporting the number to time intervals when the
+		watchdog was active.
+
+What:		/sys/bus/siox/devices/siox-X-Y/outbytes
+KernelVersion:	4.16
+Contact:	Gavin Schenk <g.schenk@eckelmann.de>, Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Description:
+		Read-only value reporting the outbytes value provided to siox-X/device_add.
diff --git a/Documentation/devicetree/bindings/eeprom/at25.txt b/Documentation/devicetree/bindings/eeprom/at25.txt
index e823d90..b3bde97 100644
--- a/Documentation/devicetree/bindings/eeprom/at25.txt
+++ b/Documentation/devicetree/bindings/eeprom/at25.txt
@@ -11,7 +11,9 @@
 - spi-max-frequency : max spi frequency to use
 - pagesize : size of the eeprom page
 - size : total eeprom size in bytes
-- address-width : number of address bits (one of 8, 16, or 24)
+- address-width : number of address bits (one of 8, 9, 16, or 24).
+  For 9 bits, the MSB of the address is sent as bit 3 of the instruction
+  byte, before the address byte.
 
 Optional properties:
 - spi-cpha : SPI shifted clock phase, as per spi-bus bindings.
diff --git a/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt b/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
index 60bec47..265bdb7 100644
--- a/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
+++ b/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
@@ -6,12 +6,17 @@
   - "rockchip,rk3188-efuse" - for RK3188 SoCs.
   - "rockchip,rk3228-efuse" - for RK3228 SoCs.
   - "rockchip,rk3288-efuse" - for RK3288 SoCs.
+  - "rockchip,rk3328-efuse" - for RK3328 SoCs.
   - "rockchip,rk3368-efuse" - for RK3368 SoCs.
   - "rockchip,rk3399-efuse" - for RK3399 SoCs.
 - reg: Should contain the registers location and exact eFuse size
 - clocks: Should be the clock id of eFuse
 - clock-names: Should be "pclk_efuse"
 
+Optional properties:
+- rockchip,efuse-size: Should be exact eFuse size in byte, the eFuse
+  size in property <reg> will be invalid if define this property.
+
 Deprecated properties:
 - compatible: "rockchip,rockchip-efuse"
   Old efuse compatible value compatible to rk3066a, rk3188 and rk3288
diff --git a/Documentation/devicetree/bindings/siox/eckelmann,siox-gpio.txt b/Documentation/devicetree/bindings/siox/eckelmann,siox-gpio.txt
new file mode 100644
index 0000000..55259cf
--- /dev/null
+++ b/Documentation/devicetree/bindings/siox/eckelmann,siox-gpio.txt
@@ -0,0 +1,19 @@
+Eckelmann SIOX GPIO bus
+
+Required properties:
+- compatible : "eckelmann,siox-gpio"
+- din-gpios, dout-gpios, dclk-gpios, dld-gpios: references gpios for the
+    corresponding bus signals.
+
+Examples:
+
+        siox {
+                compatible = "eckelmann,siox-gpio";
+                pinctrl-names = "default";
+                pinctrl-0 = <&pinctrl_siox>;
+
+                din-gpios = <&gpio6 11 0>;
+                dout-gpios = <&gpio6 8 0>;
+                dclk-gpios = <&gpio6 9 0>;
+                dld-gpios = <&gpio6 10 0>;
+        };
diff --git a/Documentation/devicetree/bindings/slimbus/bus.txt b/Documentation/devicetree/bindings/slimbus/bus.txt
new file mode 100644
index 0000000..52fa642
--- /dev/null
+++ b/Documentation/devicetree/bindings/slimbus/bus.txt
@@ -0,0 +1,50 @@
+SLIM(Serial Low Power Interchip Media Bus) bus
+
+SLIMbus is a 2-wire bus, and is used to communicate with peripheral
+components like audio-codec.
+
+Required property for SLIMbus controller node:
+- compatible	- name of SLIMbus controller
+
+Child nodes:
+Every SLIMbus controller node can contain zero or more child nodes
+representing slave devices on the bus. Every SLIMbus slave device is
+uniquely determined by the enumeration address containing 4 fields:
+Manufacturer ID, Product code, Device index, and Instance value for
+the device.
+If child node is not present and it is instantiated after device
+discovery (slave device reporting itself present).
+
+In some cases it may be necessary to describe non-probeable device
+details such as non-standard ways of powering up a device. In
+such cases, child nodes for those devices will be present as
+slaves of the SLIMbus controller, as detailed below.
+
+Required property for SLIMbus child node if it is present:
+- reg		- Should be ('Device index', 'Instance ID') from SLIMbus
+		  Enumeration  Address.
+		  Device Index Uniquely identifies multiple Devices within
+		  a single Component.
+		  Instance ID Is for the cases where multiple Devices of the
+		  same type or Class are attached to the bus.
+
+- compatible	-"slimMID,PID". The textual representation of Manufacturer ID,
+	 	  Product Code, shall be in lower case hexadecimal with leading
+		  zeroes suppressed
+
+SLIMbus example for Qualcomm's slimbus manager component:
+
+	slim@28080000 {
+		compatible = "qcom,apq8064-slim", "qcom,slim";
+		reg = <0x28080000 0x2000>,
+		interrupts = <0 33 0>;
+		clocks = <&lcc SLIMBUS_SRC>, <&lcc AUDIO_SLIMBUS_CLK>;
+		clock-names = "iface", "core";
+		#address-cells = <2>;
+		#size-cell = <0>;
+
+		codec: wcd9310@1,0{
+			compatible = "slim217,60";
+			reg = <1 0>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/slimbus/slim-qcom-ctrl.txt b/Documentation/devicetree/bindings/slimbus/slim-qcom-ctrl.txt
new file mode 100644
index 0000000..922dcb8
--- /dev/null
+++ b/Documentation/devicetree/bindings/slimbus/slim-qcom-ctrl.txt
@@ -0,0 +1,39 @@
+Qualcomm SLIMbus controller
+This controller is used if applications processor driver controls SLIMbus
+master component.
+
+Required properties:
+
+ - #address-cells - refer to Documentation/devicetree/bindings/slimbus/bus.txt
+ - #size-cells	- refer to Documentation/devicetree/bindings/slimbus/bus.txt
+
+ - reg : Offset and length of the register region(s) for the device
+ - reg-names : Register region name(s) referenced in reg above
+	 Required register resource entries are:
+	 "ctrl": Physical address of controller register blocks
+ 	 "slew": required for "qcom,apq8064-slim" SOC.
+ - compatible : should be "qcom,<SOC-NAME>-slim" for SOC specific compatible
+ 		followed by "qcom,slim" for fallback.
+ - interrupts : Interrupt number used by this controller
+ - clocks : Interface and core clocks used by this SLIMbus controller
+ - clock-names : Required clock-name entries are:
+	"iface" : Interface clock for this controller
+	"core" : Interrupt for controller core's BAM
+
+Example:
+
+	slim@28080000 {
+		compatible = "qcom,apq8064-slim", "qcom,slim";
+		reg = <0x28080000 0x2000>, <0x80207C 4>;
+		reg-names = "ctrl", "slew";
+		interrupts = <0 33 0>;
+		clocks = <&lcc SLIMBUS_SRC>, <&lcc AUDIO_SLIMBUS_CLK>;
+		clock-names = "iface", "core";
+		#address-cells = <2>;
+		#size-cell = <0>;
+
+		wcd9310: audio-codec@1,0{
+			compatible = "slim217,60";
+			reg = <1 0>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 0994bdd..889d1c0 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -97,6 +97,7 @@
 dragino	Dragino Technology Co., Limited
 ea	Embedded Artists AB
 ebv	EBV Elektronik
+eckelmann	Eckelmann AG
 edt	Emerging Display Technologies
 eeti	eGalax_eMPIA Technology Inc
 elan	Elan Microelectronic Corp.
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index d17a987..e9b41b1 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -47,6 +47,8 @@
    gpio
    misc_devices
    dmaengine/index
+   slimbus
+   soundwire/index
 
 .. only::  subproject and html
 
diff --git a/Documentation/driver-api/slimbus.rst b/Documentation/driver-api/slimbus.rst
new file mode 100644
index 0000000..7555ecd
--- /dev/null
+++ b/Documentation/driver-api/slimbus.rst
@@ -0,0 +1,127 @@
+============================
+Linux kernel SLIMbus support
+============================
+
+Overview
+========
+
+What is SLIMbus?
+----------------
+SLIMbus (Serial Low Power Interchip Media Bus) is a specification developed by
+MIPI (Mobile Industry Processor Interface) alliance. The bus uses master/slave
+configuration, and is a 2-wire multi-drop implementation (clock, and data).
+
+Currently, SLIMbus is used to interface between application processors of SoCs
+(System-on-Chip) and peripheral components (typically codec). SLIMbus uses
+Time-Division-Multiplexing to accommodate multiple data channels, and
+a control channel.
+
+The control channel is used for various control functions such as bus
+management, configuration and status updates. These messages can be unicast (e.g.
+reading/writing device specific values), or multicast (e.g. data channel
+reconfiguration sequence is a broadcast message announced to all devices)
+
+A data channel is used for data-transfer between 2 SLIMbus devices. Data
+channel uses dedicated ports on the device.
+
+Hardware description:
+---------------------
+SLIMbus specification has different types of device classifications based on
+their capabilities.
+A manager device is responsible for enumeration, configuration, and dynamic
+channel allocation. Every bus has 1 active manager.
+
+A generic device is a device providing application functionality (e.g. codec).
+
+Framer device is responsible for clocking the bus, and transmitting frame-sync
+and framing information on the bus.
+
+Each SLIMbus component has an interface device for monitoring physical layer.
+
+Typically each SoC contains SLIMbus component having 1 manager, 1 framer device,
+1 generic device (for data channel support), and 1 interface device.
+External peripheral SLIMbus component usually has 1 generic device (for
+functionality/data channel support), and an associated interface device.
+The generic device's registers are mapped as 'value elements' so that they can
+be written/read using SLIMbus control channel exchanging control/status type of
+information.
+In case there are multiple framer devices on the same bus, manager device is
+responsible to select the active-framer for clocking the bus.
+
+Per specification, SLIMbus uses "clock gears" to do power management based on
+current frequency and bandwidth requirements. There are 10 clock gears and each
+gear changes the SLIMbus frequency to be twice its previous gear.
+
+Each device has a 6-byte enumeration-address and the manager assigns every
+device with a 1-byte logical address after the devices report presence on the
+bus.
+
+Software description:
+---------------------
+There are 2 types of SLIMbus drivers:
+
+slim_controller represents a 'controller' for SLIMbus. This driver should
+implement duties needed by the SoC (manager device, associated
+interface device for monitoring the layers and reporting errors, default
+framer device).
+
+slim_device represents the 'generic device/component' for SLIMbus, and a
+slim_driver should implement driver for that slim_device.
+
+Device notifications to the driver:
+-----------------------------------
+Since SLIMbus devices have mechanisms for reporting their presence, the
+framework allows drivers to bind when corresponding devices report their
+presence on the bus.
+However, it is possible that the driver needs to be probed
+first so that it can enable corresponding SLIMbus device (e.g. power it up and/or
+take it out of reset). To support that behavior, the framework allows drivers
+to probe first as well  (e.g. using standard DeviceTree compatibility field).
+This creates the necessity for the driver to know when the device is functional
+(i.e. reported present). device_up callback is used for that reason when the
+device reports present and is assigned a logical address by the controller.
+
+Similarly, SLIMbus devices 'report absent' when they go down. A 'device_down'
+callback notifies the driver when the device reports absent and its logical
+address assignment is invalidated by the controller.
+
+Another notification "boot_device" is used to notify the slim_driver when
+controller resets the bus. This notification allows the driver to take necessary
+steps to boot the device so that it's functional after the bus has been reset.
+
+Driver and Controller APIs:
+--------------------------
+.. kernel-doc:: include/linux/slimbus.h
+   :internal:
+
+.. kernel-doc:: drivers/slimbus/slimbus.h
+   :internal:
+
+.. kernel-doc:: drivers/slimbus/core.c
+   :export:
+
+Clock-pause:
+------------
+SLIMbus mandates that a reconfiguration sequence (known as clock-pause) be
+broadcast to all active devices on the bus before the bus can enter low-power
+mode. Controller uses this sequence when it decides to enter low-power mode so
+that corresponding clocks and/or power-rails can be turned off to save power.
+Clock-pause is exited by waking up framer device (if controller driver initiates
+exiting low power mode), or by toggling the data line (if a slave device wants
+to initiate it).
+
+Clock-pause APIs:
+~~~~~~~~~~~~~~~~~
+.. kernel-doc:: drivers/slimbus/sched.c
+   :export:
+
+Messaging:
+----------
+The framework supports regmap and read/write apis to exchange control-information
+with a SLIMbus device. APIs can be synchronous or asynchronous.
+The header file <linux/slimbus.h> has more documentation about messaging APIs.
+
+Messaging APIs:
+~~~~~~~~~~~~~~~
+.. kernel-doc:: drivers/slimbus/messaging.c
+   :export:
diff --git a/Documentation/driver-api/soundwire/index.rst b/Documentation/driver-api/soundwire/index.rst
new file mode 100644
index 0000000..647e946
--- /dev/null
+++ b/Documentation/driver-api/soundwire/index.rst
@@ -0,0 +1,15 @@
+=======================
+SoundWire Documentation
+=======================
+
+.. toctree::
+   :maxdepth: 1
+
+   summary
+
+.. only::  subproject
+
+   Indices
+   =======
+
+   * :ref:`genindex`
diff --git a/Documentation/driver-api/soundwire/summary.rst b/Documentation/driver-api/soundwire/summary.rst
new file mode 100644
index 0000000..8193125
--- /dev/null
+++ b/Documentation/driver-api/soundwire/summary.rst
@@ -0,0 +1,207 @@
+===========================
+SoundWire Subsystem Summary
+===========================
+
+SoundWire is a new interface ratified in 2015 by the MIPI Alliance.
+SoundWire is used for transporting data typically related to audio
+functions. SoundWire interface is optimized to integrate audio devices in
+mobile or mobile inspired systems.
+
+SoundWire is a 2-pin multi-drop interface with data and clock line. It
+facilitates development of low cost, efficient, high performance systems.
+Broad level key features of SoundWire interface include:
+
+ (1) Transporting all of payload data channels, control information, and setup
+     commands over a single two-pin interface.
+
+ (2) Lower clock frequency, and hence lower power consumption, by use of DDR
+     (Dual Data Rate) data transmission.
+
+ (3) Clock scaling and optional multiple data lanes to give wide flexibility
+     in data rate to match system requirements.
+
+ (4) Device status monitoring, including interrupt-style alerts to the Master.
+
+The SoundWire protocol supports up to eleven Slave interfaces. All the
+interfaces share the common Bus containing data and clock line. Each of the
+Slaves can support up to 14 Data Ports. 13 Data Ports are dedicated to audio
+transport. Data Port0 is dedicated to transport of Bulk control information,
+each of the audio Data Ports (1..14) can support up to 8 Channels in
+transmit or receiving mode (typically fixed direction but configurable
+direction is enabled by the specification).  Bandwidth restrictions to
+~19.2..24.576Mbits/s don't however allow for 11*13*8 channels to be
+transmitted simultaneously.
+
+Below figure shows an example of connectivity between a SoundWire Master and
+two Slave devices. ::
+
+        +---------------+                                       +---------------+
+        |               |                       Clock Signal    |               |
+        |    Master     |-------+-------------------------------|    Slave      |
+        |   Interface   |       |               Data Signal     |  Interface 1  |
+        |               |-------|-------+-----------------------|               |
+        +---------------+       |       |                       +---------------+
+                                |       |
+                                |       |
+                                |       |
+                             +--+-------+--+
+                             |             |
+                             |   Slave     |
+                             | Interface 2 |
+                             |             |
+                             +-------------+
+
+
+Terminology
+===========
+
+The MIPI SoundWire specification uses the term 'device' to refer to a Master
+or Slave interface, which of course can be confusing. In this summary and
+code we use the term interface only to refer to the hardware. We follow the
+Linux device model by mapping each Slave interface connected on the bus as a
+device managed by a specific driver. The Linux SoundWire subsystem provides
+a framework to implement a SoundWire Slave driver with an API allowing
+3rd-party vendors to enable implementation-defined functionality while
+common setup/configuration tasks are handled by the bus.
+
+Bus:
+Implements SoundWire Linux Bus which handles the SoundWire protocol.
+Programs all the MIPI-defined Slave registers. Represents a SoundWire
+Master. Multiple instances of Bus may be present in a system.
+
+Slave:
+Registers as SoundWire Slave device (Linux Device). Multiple Slave devices
+can register to a Bus instance.
+
+Slave driver:
+Driver controlling the Slave device. MIPI-specified registers are controlled
+directly by the Bus (and transmitted through the Master driver/interface).
+Any implementation-defined Slave register is controlled by Slave driver. In
+practice, it is expected that the Slave driver relies on regmap and does not
+request direct register access.
+
+Programming interfaces (SoundWire Master interface Driver)
+==========================================================
+
+SoundWire Bus supports programming interfaces for the SoundWire Master
+implementation and SoundWire Slave devices. All the code uses the "sdw"
+prefix commonly used by SoC designers and 3rd party vendors.
+
+Each of the SoundWire Master interfaces needs to be registered to the Bus.
+Bus implements API to read standard Master MIPI properties and also provides
+callback in Master ops for Master driver to implement its own functions that
+provides capabilities information. DT support is not implemented at this
+time but should be trivial to add since capabilities are enabled with the
+``device_property_`` API.
+
+The Master interface along with the Master interface capabilities are
+registered based on board file, DT or ACPI.
+
+Following is the Bus API to register the SoundWire Bus:
+
+.. code-block:: c
+
+	int sdw_add_bus_master(struct sdw_bus *bus)
+	{
+		if (!bus->dev)
+			return -ENODEV;
+
+		mutex_init(&bus->lock);
+		INIT_LIST_HEAD(&bus->slaves);
+
+		/* Check ACPI for Slave devices */
+		sdw_acpi_find_slaves(bus);
+
+		/* Check DT for Slave devices */
+		sdw_of_find_slaves(bus);
+
+		return 0;
+	}
+
+This will initialize sdw_bus object for Master device. "sdw_master_ops" and
+"sdw_master_port_ops" callback functions are provided to the Bus.
+
+"sdw_master_ops" is used by Bus to control the Bus in the hardware specific
+way. It includes Bus control functions such as sending the SoundWire
+read/write messages on Bus, setting up clock frequency & Stream
+Synchronization Point (SSP). The "sdw_master_ops" structure abstracts the
+hardware details of the Master from the Bus.
+
+"sdw_master_port_ops" is used by Bus to setup the Port parameters of the
+Master interface Port. Master interface Port register map is not defined by
+MIPI specification, so Bus calls the "sdw_master_port_ops" callback
+function to do Port operations like "Port Prepare", "Port Transport params
+set", "Port enable and disable". The implementation of the Master driver can
+then perform hardware-specific configurations.
+
+Programming interfaces (SoundWire Slave Driver)
+===============================================
+
+The MIPI specification requires each Slave interface to expose a unique
+48-bit identifier, stored in 6 read-only dev_id registers. This dev_id
+identifier contains vendor and part information, as well as a field enabling
+to differentiate between identical components. An additional class field is
+currently unused. Slave driver is written for a specific vendor and part
+identifier, Bus enumerates the Slave device based on these two ids.
+Slave device and driver match is done based on these two ids . Probe
+of the Slave driver is called by Bus on successful match between device and
+driver id. A parent/child relationship is enforced between Master and Slave
+devices (the logical representation is aligned with the physical
+connectivity).
+
+The information on Master/Slave dependencies is stored in platform data,
+board-file, ACPI or DT. The MIPI Software specification defines additional
+link_id parameters for controllers that have multiple Master interfaces. The
+dev_id registers are only unique in the scope of a link, and the link_id
+unique in the scope of a controller. Both dev_id and link_id are not
+necessarily unique at the system level but the parent/child information is
+used to avoid ambiguity.
+
+.. code-block:: c
+
+	static const struct sdw_device_id slave_id[] = {
+	        SDW_SLAVE_ENTRY(0x025d, 0x700, 0),
+	        {},
+	};
+	MODULE_DEVICE_TABLE(sdw, slave_id);
+
+	static struct sdw_driver slave_sdw_driver = {
+	        .driver = {
+	                   .name = "slave_xxx",
+	                   .pm = &slave_runtime_pm,
+	                   },
+		.probe = slave_sdw_probe,
+		.remove = slave_sdw_remove,
+		.ops = &slave_slave_ops,
+		.id_table = slave_id,
+	};
+
+
+For capabilities, Bus implements API to read standard Slave MIPI properties
+and also provides callback in Slave ops for Slave driver to implement own
+function that provides capabilities information. Bus needs to know a set of
+Slave capabilities to program Slave registers and to control the Bus
+reconfigurations.
+
+Future enhancements to be done
+==============================
+
+ (1) Bulk Register Access (BRA) transfers.
+
+
+ (2) Multiple data lane support.
+
+Links
+=====
+
+SoundWire MIPI specification 1.1 is available at:
+https://members.mipi.org/wg/All-Members/document/70290
+
+SoundWire MIPI DisCo (Discovery and Configuration) specification is
+available at:
+https://www.mipi.org/specifications/mipi-disco-soundwire
+
+(publicly accessible with registration or directly accessible to MIPI
+members)
+
+MIPI Alliance Manufacturer ID Page: mid.mipi.org
diff --git a/Documentation/fpga/fpga-mgr.txt b/Documentation/fpga/fpga-mgr.txt
index 78f197f..cc6413e 100644
--- a/Documentation/fpga/fpga-mgr.txt
+++ b/Documentation/fpga/fpga-mgr.txt
@@ -11,61 +11,65 @@
 The FPGA image data itself is very manufacturer specific, but for our purposes
 it's just binary data.  The FPGA manager core won't parse it.
 
+The FPGA image to be programmed can be in a scatter gather list, a single
+contiguous buffer, or a firmware file.  Because allocating contiguous kernel
+memory for the buffer should be avoided, users are encouraged to use a scatter
+gather list instead if possible.
+
+The particulars for programming the image are presented in a structure (struct
+fpga_image_info).  This struct contains parameters such as pointers to the
+FPGA image as well as image-specific particulars such as whether the image was
+built for full or partial reconfiguration.
 
 API Functions:
 ==============
 
-To program the FPGA from a file or from a buffer:
--------------------------------------------------
+To program the FPGA:
+--------------------
 
-	int fpga_mgr_buf_load(struct fpga_manager *mgr,
-			      struct fpga_image_info *info,
-		              const char *buf, size_t count);
+	int fpga_mgr_load(struct fpga_manager *mgr,
+			  struct fpga_image_info *info);
 
-Load the FPGA from an image which exists as a contiguous buffer in
-memory. Allocating contiguous kernel memory for the buffer should be avoided,
-users are encouraged to use the _sg interface instead of this.
-
-        int fpga_mgr_buf_load_sg(struct fpga_manager *mgr,
-				 struct fpga_image_info *info,
-				 struct sg_table *sgt);
-
-Load the FPGA from an image from non-contiguous in memory. Callers can
-construct a sg_table using alloc_page backed memory.
-
-	int fpga_mgr_firmware_load(struct fpga_manager *mgr,
-				   struct fpga_image_info *info,
-		                   const char *image_name);
-
-Load the FPGA from an image which exists as a file.  The image file must be on
-the firmware search path (see the firmware class documentation).  If successful,
+Load the FPGA from an image which is indicated in the info.  If successful,
 the FPGA ends up in operating mode.  Return 0 on success or a negative error
 code.
 
-A FPGA design contained in a FPGA image file will likely have particulars that
-affect how the image is programmed to the FPGA.  These are contained in struct
-fpga_image_info.  Currently the only such particular is a single flag bit
-indicating whether the image is for full or partial reconfiguration.
+To allocate or free a struct fpga_image_info:
+---------------------------------------------
+
+	struct fpga_image_info *fpga_image_info_alloc(struct device *dev);
+
+	void fpga_image_info_free(struct fpga_image_info *info);
 
 To get/put a reference to a FPGA manager:
 -----------------------------------------
 
 	struct fpga_manager *of_fpga_mgr_get(struct device_node *node);
 	struct fpga_manager *fpga_mgr_get(struct device *dev);
-
-Given a DT node or device, get an exclusive reference to a FPGA manager.
-
 	void fpga_mgr_put(struct fpga_manager *mgr);
 
-Release the reference.
+Given a DT node or device, get a reference to a FPGA manager.  This pointer
+can be saved until you are ready to program the FPGA.  fpga_mgr_put releases
+the reference.
+
+
+To get exclusive control of a FPGA manager:
+-------------------------------------------
+
+	int fpga_mgr_lock(struct fpga_manager *mgr);
+	void fpga_mgr_unlock(struct fpga_manager *mgr);
+
+The user should call fpga_mgr_lock and verify that it returns 0 before
+attempting to program the FPGA.  Likewise, the user should call
+fpga_mgr_unlock when done programming the FPGA.
 
 
 To register or unregister the low level FPGA-specific driver:
 -------------------------------------------------------------
 
 	int fpga_mgr_register(struct device *dev, const char *name,
-		              const struct fpga_manager_ops *mops,
-		              void *priv);
+			      const struct fpga_manager_ops *mops,
+			      void *priv);
 
 	void fpga_mgr_unregister(struct device *dev);
 
@@ -75,62 +79,58 @@
 
 How to write an image buffer to a supported FPGA
 ================================================
-/* Include to get the API */
 #include <linux/fpga/fpga-mgr.h>
 
-/* device node that specifies the FPGA manager to use */
-struct device_node *mgr_node = ...
-
-/* FPGA image is in this buffer.  count is size of the buffer. */
-char *buf = ...
-int count = ...
-
-/* struct with information about the FPGA image to program. */
-struct fpga_image_info info;
-
-/* flags indicates whether to do full or partial reconfiguration */
-info.flags = 0;
-
+struct fpga_manager *mgr;
+struct fpga_image_info *info;
 int ret;
 
+/*
+ * Get a reference to FPGA manager.  The manager is not locked, so you can
+ * hold onto this reference without it preventing programming.
+ *
+ * This example uses the device node of the manager.  Alternatively, use
+ * fpga_mgr_get(dev) instead if you have the device.
+ */
+mgr = of_fpga_mgr_get(mgr_node);
+
+/* struct with information about the FPGA image to program. */
+info = fpga_image_info_alloc(dev);
+
+/* flags indicates whether to do full or partial reconfiguration */
+info->flags = FPGA_MGR_PARTIAL_RECONFIG;
+
+/*
+ * At this point, indicate where the image is. This is pseudo-code; you're
+ * going to use one of these three.
+ */
+if (image is in a scatter gather table) {
+
+	info->sgt = [your scatter gather table]
+
+} else if (image is in a buffer) {
+
+	info->buf = [your image buffer]
+	info->count = [image buffer size]
+
+} else if (image is in a firmware file) {
+
+	info->firmware_name = devm_kstrdup(dev, firmware_name, GFP_KERNEL);
+
+}
+
 /* Get exclusive control of FPGA manager */
-struct fpga_manager *mgr = of_fpga_mgr_get(mgr_node);
+ret = fpga_mgr_lock(mgr);
 
 /* Load the buffer to the FPGA */
 ret = fpga_mgr_buf_load(mgr, &info, buf, count);
 
 /* Release the FPGA manager */
+fpga_mgr_unlock(mgr);
 fpga_mgr_put(mgr);
 
-
-How to write an image file to a supported FPGA
-==============================================
-/* Include to get the API */
-#include <linux/fpga/fpga-mgr.h>
-
-/* device node that specifies the FPGA manager to use */
-struct device_node *mgr_node = ...
-
-/* FPGA image is in this file which is in the firmware search path */
-const char *path = "fpga-image-9.rbf"
-
-/* struct with information about the FPGA image to program. */
-struct fpga_image_info info;
-
-/* flags indicates whether to do full or partial reconfiguration */
-info.flags = 0;
-
-int ret;
-
-/* Get exclusive control of FPGA manager */
-struct fpga_manager *mgr = of_fpga_mgr_get(mgr_node);
-
-/* Get the firmware image (path) and load it to the FPGA */
-ret = fpga_mgr_firmware_load(mgr, &info, path);
-
-/* Release the FPGA manager */
-fpga_mgr_put(mgr);
-
+/* Deallocate the image info if you're done with it */
+fpga_image_info_free(info);
 
 How to support a new FPGA device
 ================================
diff --git a/Documentation/fpga/fpga-region.txt b/Documentation/fpga/fpga-region.txt
new file mode 100644
index 0000000..139a02b
--- /dev/null
+++ b/Documentation/fpga/fpga-region.txt
@@ -0,0 +1,95 @@
+FPGA Regions
+
+Alan Tull 2017
+
+CONTENTS
+ - Introduction
+ - The FPGA region API
+ - Usage example
+
+Introduction
+============
+
+This document is meant to be an brief overview of the FPGA region API usage.  A
+more conceptual look at regions can be found in [1].
+
+For the purposes of this API document, let's just say that a region associates
+an FPGA Manager and a bridge (or bridges) with a reprogrammable region of an
+FPGA or the whole FPGA.  The API provides a way to register a region and to
+program a region.
+
+Currently the only layer above fpga-region.c in the kernel is the Device Tree
+support (of-fpga-region.c) described in [1].  The DT support layer uses regions
+to program the FPGA and then DT to handle enumeration.  The common region code
+is intended to be used by other schemes that have other ways of accomplishing
+enumeration after programming.
+
+An fpga-region can be set up to know the following things:
+* which FPGA manager to use to do the programming
+* which bridges to disable before programming and enable afterwards.
+
+Additional info needed to program the FPGA image is passed in the struct
+fpga_image_info [2] including:
+* pointers to the image as either a scatter-gather buffer, a contiguous
+  buffer, or the name of firmware file
+* flags indicating specifics such as whether the image if for partial
+  reconfiguration.
+
+===================
+The FPGA region API
+===================
+
+To register or unregister a region:
+-----------------------------------
+
+	int fpga_region_register(struct device *dev,
+				 struct fpga_region *region);
+	int fpga_region_unregister(struct fpga_region *region);
+
+An example of usage can be seen in the probe function of [3]
+
+To program an FPGA:
+-------------------
+	int fpga_region_program_fpga(struct fpga_region *region);
+
+This function operates on info passed in the fpga_image_info
+(region->info).
+
+This function will attempt to:
+ * lock the region's mutex
+ * lock the region's FPGA manager
+ * build a list of FPGA bridges if a method has been specified to do so
+ * disable the bridges
+ * program the FPGA
+ * re-enable the bridges
+ * release the locks
+
+=============
+Usage example
+=============
+
+First, allocate the info struct:
+
+	info = fpga_image_info_alloc(dev);
+	if (!info)
+		return -ENOMEM;
+
+Set flags as needed, i.e.
+
+	info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
+
+Point to your FPGA image, such as:
+
+	info->sgt = &sgt;
+
+Add info to region and do the programming:
+
+	region->info = info;
+	ret = fpga_region_program_fpga(region);
+
+Then enumerate whatever hardware has appeared in the FPGA.
+
+--
+[1] ../devicetree/bindings/fpga/fpga-region.txt
+[2] ./fpga-mgr.txt
+[3] ../../drivers/fpga/of-fpga-region.c
diff --git a/Documentation/fpga/overview.txt b/Documentation/fpga/overview.txt
new file mode 100644
index 0000000..0f1236e
--- /dev/null
+++ b/Documentation/fpga/overview.txt
@@ -0,0 +1,23 @@
+Linux kernel FPGA support
+
+Alan Tull 2017
+
+The main point of this project has been to separate the out the upper layers
+that know when to reprogram a FPGA from the lower layers that know how to
+reprogram a specific FPGA device.  The intention is to make this manufacturer
+agnostic, understanding that of course the FPGA images are very device specific
+themselves.
+
+The framework in the kernel includes:
+* low level FPGA manager drivers that know how to program a specific device
+* the fpga-mgr framework they are registered with
+* low level FPGA bridge drivers for hard/soft bridges which are intended to
+  be disable during FPGA programming
+* the fpga-bridge framework they are registered with
+* the fpga-region framework which associates and controls managers and bridges
+  as reconfigurable regions
+* the of-fpga-region support for reprogramming FPGAs when device tree overlays
+  are applied.
+
+I would encourage you the user to add code that creates FPGA regions rather
+that trying to control managers and bridges separately.
diff --git a/MAINTAINERS b/MAINTAINERS
index b46c9ce..e3ed91e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3398,8 +3398,8 @@
 M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
 S:	Supported
-F:	drivers/char/*
-F:	drivers/misc/*
+F:	drivers/char/
+F:	drivers/misc/
 F:	include/linux/miscdevice.h
 
 CHECKPATCH
@@ -12460,6 +12460,13 @@
 F:	lib/test_siphash.c
 F:	include/linux/siphash.h
 
+SIOX
+M:	Gavin Schenk <g.schenk@eckelmann.de>
+M:	Uwe Kleine-König <kernel@pengutronix.de>
+S:	Supported
+F:	drivers/siox/*
+F:	include/trace/events/siox.h
+
 SIS 190 ETHERNET DRIVER
 M:	Francois Romieu <romieu@fr.zoreil.com>
 L:	netdev@vger.kernel.org
@@ -12511,6 +12518,14 @@
 F:	include/linux/srcu.h
 F:	kernel/rcu/srcu.c
 
+SERIAL LOW-POWER INTER-CHIP MEDIA BUS (SLIMbus)
+M:	Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+S:	Maintained
+F:	drivers/slimbus/
+F:	Documentation/devicetree/bindings/slimbus/
+F:	include/linux/slimbus.h
+
 SMACK SECURITY MODULE
 M:	Casey Schaufler <casey@schaufler-ca.com>
 L:	linux-security-module@vger.kernel.org
@@ -12714,6 +12729,15 @@
 F:	sound/soc/
 F:	include/sound/soc*
 
+SOUNDWIRE SUBSYSTEM
+M:	Vinod Koul <vinod.koul@intel.com>
+M:	Sanyog Kale <sanyog.r.kale@intel.com>
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+S:	Supported
+F:	Documentation/driver-api/soundwire/
+F:	drivers/soundwire/
+F:	include/linux/soundwire/
+
 SP2 MEDIA DRIVER
 M:	Olli Salonen <olli.salonen@iki.fi>
 L:	linux-media@vger.kernel.org
@@ -14557,6 +14581,15 @@
 F:	drivers/virtio/virtio_input.c
 F:	include/uapi/linux/virtio_input.h
 
+VIRTUAL BOX GUEST DEVICE DRIVER
+M:	Hans de Goede <hdegoede@redhat.com>
+M:	Arnd Bergmann <arnd@arndb.de>
+M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+S:	Maintained
+F:	include/linux/vbox_utils.h
+F:	include/uapi/linux/vbox*.h
+F:	drivers/virt/vboxguest/
+
 VIRTUAL SERIO DEVICE DRIVER
 M:	Stephen Chandler Paul <thatslyude@gmail.com>
 S:	Maintained
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 152744c..02b6fd0 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -153,6 +153,8 @@
 
 source "drivers/rpmsg/Kconfig"
 
+source "drivers/soundwire/Kconfig"
+
 source "drivers/soc/Kconfig"
 
 source "drivers/devfreq/Kconfig"
@@ -211,4 +213,8 @@
 
 source "drivers/opp/Kconfig"
 
+source "drivers/siox/Kconfig"
+
+source "drivers/slimbus/Kconfig"
+
 endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index e06f7f6..e096637 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -87,6 +87,7 @@
 obj-$(CONFIG_SPI)		+= spi/
 obj-$(CONFIG_SPMI)		+= spmi/
 obj-$(CONFIG_HSI)		+= hsi/
+obj-$(CONFIG_SLIMBUS)		+= slimbus/
 obj-y				+= net/
 obj-$(CONFIG_ATM)		+= atm/
 obj-$(CONFIG_FUSION)		+= message/
@@ -157,6 +158,7 @@
 obj-$(CONFIG_HWSPINLOCK)	+= hwspinlock/
 obj-$(CONFIG_REMOTEPROC)	+= remoteproc/
 obj-$(CONFIG_RPMSG)		+= rpmsg/
+obj-$(CONFIG_SOUNDWIRE)		+= soundwire/
 
 # Virtualization drivers
 obj-$(CONFIG_VIRT_DRIVERS)	+= virt/
@@ -184,3 +186,4 @@
 obj-$(CONFIG_FSI)		+= fsi/
 obj-$(CONFIG_TEE)		+= tee/
 obj-$(CONFIG_MULTIPLEXER)	+= mux/
+obj-$(CONFIG_SIOX)		+= siox/
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index a7ecfde..778caed 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -579,6 +579,8 @@ enum {
  *                        (protected by @proc->inner_lock)
  * @todo:                 list of work to do for this thread
  *                        (protected by @proc->inner_lock)
+ * @process_todo:         whether work in @todo should be processed
+ *                        (protected by @proc->inner_lock)
  * @return_error:         transaction errors reported by this thread
  *                        (only accessed by this thread)
  * @reply_error:          transaction errors reported by target thread
@@ -604,6 +606,7 @@ struct binder_thread {
 	bool looper_need_return; /* can be written by other thread */
 	struct binder_transaction *transaction_stack;
 	struct list_head todo;
+	bool process_todo;
 	struct binder_error return_error;
 	struct binder_error reply_error;
 	wait_queue_head_t wait;
@@ -789,6 +792,16 @@ static bool binder_worklist_empty(struct binder_proc *proc,
 	return ret;
 }
 
+/**
+ * binder_enqueue_work_ilocked() - Add an item to the work list
+ * @work:         struct binder_work to add to list
+ * @target_list:  list to add work to
+ *
+ * Adds the work to the specified list. Asserts that work
+ * is not already on a list.
+ *
+ * Requires the proc->inner_lock to be held.
+ */
 static void
 binder_enqueue_work_ilocked(struct binder_work *work,
 			   struct list_head *target_list)
@@ -799,22 +812,56 @@ binder_enqueue_work_ilocked(struct binder_work *work,
 }
 
 /**
- * binder_enqueue_work() - Add an item to the work list
- * @proc:         binder_proc associated with list
+ * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
+ * @thread:       thread to queue work to
  * @work:         struct binder_work to add to list
- * @target_list:  list to add work to
  *
- * Adds the work to the specified list. Asserts that work
- * is not already on a list.
+ * Adds the work to the todo list of the thread. Doesn't set the process_todo
+ * flag, which means that (if it wasn't already set) the thread will go to
+ * sleep without handling this work when it calls read.
+ *
+ * Requires the proc->inner_lock to be held.
  */
 static void
-binder_enqueue_work(struct binder_proc *proc,
-		    struct binder_work *work,
-		    struct list_head *target_list)
+binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
+					    struct binder_work *work)
 {
-	binder_inner_proc_lock(proc);
-	binder_enqueue_work_ilocked(work, target_list);
-	binder_inner_proc_unlock(proc);
+	binder_enqueue_work_ilocked(work, &thread->todo);
+}
+
+/**
+ * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
+ * @thread:       thread to queue work to
+ * @work:         struct binder_work to add to list
+ *
+ * Adds the work to the todo list of the thread, and enables processing
+ * of the todo queue.
+ *
+ * Requires the proc->inner_lock to be held.
+ */
+static void
+binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
+				   struct binder_work *work)
+{
+	binder_enqueue_work_ilocked(work, &thread->todo);
+	thread->process_todo = true;
+}
+
+/**
+ * binder_enqueue_thread_work() - Add an item to the thread work list
+ * @thread:       thread to queue work to
+ * @work:         struct binder_work to add to list
+ *
+ * Adds the work to the todo list of the thread, and enables processing
+ * of the todo queue.
+ */
+static void
+binder_enqueue_thread_work(struct binder_thread *thread,
+			   struct binder_work *work)
+{
+	binder_inner_proc_lock(thread->proc);
+	binder_enqueue_thread_work_ilocked(thread, work);
+	binder_inner_proc_unlock(thread->proc);
 }
 
 static void
@@ -940,7 +987,7 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
 static bool binder_has_work_ilocked(struct binder_thread *thread,
 				    bool do_proc_work)
 {
-	return !binder_worklist_empty_ilocked(&thread->todo) ||
+	return thread->process_todo ||
 		thread->looper_need_return ||
 		(do_proc_work &&
 		 !binder_worklist_empty_ilocked(&thread->proc->todo));
@@ -1228,6 +1275,17 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
 			node->local_strong_refs++;
 		if (!node->has_strong_ref && target_list) {
 			binder_dequeue_work_ilocked(&node->work);
+			/*
+			 * Note: this function is the only place where we queue
+			 * directly to a thread->todo without using the
+			 * corresponding binder_enqueue_thread_work() helper
+			 * functions; in this case it's ok to not set the
+			 * process_todo flag, since we know this node work will
+			 * always be followed by other work that starts queue
+			 * processing: in case of synchronous transactions, a
+			 * BR_REPLY or BR_ERROR; in case of oneway
+			 * transactions, a BR_TRANSACTION_COMPLETE.
+			 */
 			binder_enqueue_work_ilocked(&node->work, target_list);
 		}
 	} else {
@@ -1239,6 +1297,9 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
 					node->debug_id);
 				return -EINVAL;
 			}
+			/*
+			 * See comment above
+			 */
 			binder_enqueue_work_ilocked(&node->work, target_list);
 		}
 	}
@@ -1928,9 +1989,9 @@ static void binder_send_failed_reply(struct binder_transaction *t,
 			binder_pop_transaction_ilocked(target_thread, t);
 			if (target_thread->reply_error.cmd == BR_OK) {
 				target_thread->reply_error.cmd = error_code;
-				binder_enqueue_work_ilocked(
-					&target_thread->reply_error.work,
-					&target_thread->todo);
+				binder_enqueue_thread_work_ilocked(
+					target_thread,
+					&target_thread->reply_error.work);
 				wake_up_interruptible(&target_thread->wait);
 			} else {
 				WARN(1, "Unexpected reply error: %u\n",
@@ -2569,18 +2630,16 @@ static bool binder_proc_transaction(struct binder_transaction *t,
 				    struct binder_proc *proc,
 				    struct binder_thread *thread)
 {
-	struct list_head *target_list = NULL;
 	struct binder_node *node = t->buffer->target_node;
 	bool oneway = !!(t->flags & TF_ONE_WAY);
-	bool wakeup = true;
+	bool pending_async = false;
 
 	BUG_ON(!node);
 	binder_node_lock(node);
 	if (oneway) {
 		BUG_ON(thread);
 		if (node->has_async_transaction) {
-			target_list = &node->async_todo;
-			wakeup = false;
+			pending_async = true;
 		} else {
 			node->has_async_transaction = 1;
 		}
@@ -2594,19 +2653,17 @@ static bool binder_proc_transaction(struct binder_transaction *t,
 		return false;
 	}
 
-	if (!thread && !target_list)
+	if (!thread && !pending_async)
 		thread = binder_select_thread_ilocked(proc);
 
 	if (thread)
-		target_list = &thread->todo;
-	else if (!target_list)
-		target_list = &proc->todo;
+		binder_enqueue_thread_work_ilocked(thread, &t->work);
+	else if (!pending_async)
+		binder_enqueue_work_ilocked(&t->work, &proc->todo);
 	else
-		BUG_ON(target_list != &node->async_todo);
+		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
 
-	binder_enqueue_work_ilocked(&t->work, target_list);
-
-	if (wakeup)
+	if (!pending_async)
 		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
 
 	binder_inner_proc_unlock(proc);
@@ -3101,10 +3158,10 @@ static void binder_transaction(struct binder_proc *proc,
 		}
 	}
 	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
-	binder_enqueue_work(proc, tcomplete, &thread->todo);
 	t->work.type = BINDER_WORK_TRANSACTION;
 
 	if (reply) {
+		binder_enqueue_thread_work(thread, tcomplete);
 		binder_inner_proc_lock(target_proc);
 		if (target_thread->is_dead) {
 			binder_inner_proc_unlock(target_proc);
@@ -3112,13 +3169,21 @@ static void binder_transaction(struct binder_proc *proc,
 		}
 		BUG_ON(t->buffer->async_transaction != 0);
 		binder_pop_transaction_ilocked(target_thread, in_reply_to);
-		binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
+		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
 		binder_inner_proc_unlock(target_proc);
 		wake_up_interruptible_sync(&target_thread->wait);
 		binder_free_transaction(in_reply_to);
 	} else if (!(t->flags & TF_ONE_WAY)) {
 		BUG_ON(t->buffer->async_transaction != 0);
 		binder_inner_proc_lock(proc);
+		/*
+		 * Defer the TRANSACTION_COMPLETE, so we don't return to
+		 * userspace immediately; this allows the target process to
+		 * immediately start processing this transaction, reducing
+		 * latency. We will then return the TRANSACTION_COMPLETE when
+		 * the target replies (or there is an error).
+		 */
+		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
 		t->need_reply = 1;
 		t->from_parent = thread->transaction_stack;
 		thread->transaction_stack = t;
@@ -3132,6 +3197,7 @@ static void binder_transaction(struct binder_proc *proc,
 	} else {
 		BUG_ON(target_node == NULL);
 		BUG_ON(t->buffer->async_transaction != 1);
+		binder_enqueue_thread_work(thread, tcomplete);
 		if (!binder_proc_transaction(t, target_proc, NULL))
 			goto err_dead_proc_or_thread;
 	}
@@ -3210,15 +3276,11 @@ static void binder_transaction(struct binder_proc *proc,
 	BUG_ON(thread->return_error.cmd != BR_OK);
 	if (in_reply_to) {
 		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
-		binder_enqueue_work(thread->proc,
-				    &thread->return_error.work,
-				    &thread->todo);
+		binder_enqueue_thread_work(thread, &thread->return_error.work);
 		binder_send_failed_reply(in_reply_to, return_error);
 	} else {
 		thread->return_error.cmd = return_error;
-		binder_enqueue_work(thread->proc,
-				    &thread->return_error.work,
-				    &thread->todo);
+		binder_enqueue_thread_work(thread, &thread->return_error.work);
 	}
 }
 
@@ -3522,10 +3584,9 @@ static int binder_thread_write(struct binder_proc *proc,
 					WARN_ON(thread->return_error.cmd !=
 						BR_OK);
 					thread->return_error.cmd = BR_ERROR;
-					binder_enqueue_work(
-						thread->proc,
-						&thread->return_error.work,
-						&thread->todo);
+					binder_enqueue_thread_work(
+						thread,
+						&thread->return_error.work);
 					binder_debug(
 						BINDER_DEBUG_FAILED_TRANSACTION,
 						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
@@ -3605,9 +3666,9 @@ static int binder_thread_write(struct binder_proc *proc,
 					if (thread->looper &
 					    (BINDER_LOOPER_STATE_REGISTERED |
 					     BINDER_LOOPER_STATE_ENTERED))
-						binder_enqueue_work_ilocked(
-								&death->work,
-								&thread->todo);
+						binder_enqueue_thread_work_ilocked(
+								thread,
+								&death->work);
 					else {
 						binder_enqueue_work_ilocked(
 								&death->work,
@@ -3662,8 +3723,8 @@ static int binder_thread_write(struct binder_proc *proc,
 				if (thread->looper &
 					(BINDER_LOOPER_STATE_REGISTERED |
 					 BINDER_LOOPER_STATE_ENTERED))
-					binder_enqueue_work_ilocked(
-						&death->work, &thread->todo);
+					binder_enqueue_thread_work_ilocked(
+						thread, &death->work);
 				else {
 					binder_enqueue_work_ilocked(
 							&death->work,
@@ -3837,6 +3898,8 @@ static int binder_thread_read(struct binder_proc *proc,
 			break;
 		}
 		w = binder_dequeue_work_head_ilocked(list);
+		if (binder_worklist_empty_ilocked(&thread->todo))
+			thread->process_todo = false;
 
 		switch (w->type) {
 		case BINDER_WORK_TRANSACTION: {
@@ -5524,7 +5587,9 @@ static int __init binder_init(void)
 	struct binder_device *device;
 	struct hlist_node *tmp;
 
-	binder_alloc_shrinker_init();
+	ret = binder_alloc_shrinker_init();
+	if (ret)
+		return ret;
 
 	atomic_set(&binder_transaction_log.cur, ~0U);
 	atomic_set(&binder_transaction_log_failed.cur, ~0U);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 6f6f745..07b866a 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -281,6 +281,9 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
 			goto err_vm_insert_page_failed;
 		}
 
+		if (index + 1 > alloc->pages_high)
+			alloc->pages_high = index + 1;
+
 		trace_binder_alloc_page_end(alloc, index);
 		/* vm_insert_page does not seem to increment the refcount */
 	}
@@ -324,11 +327,12 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
 	return vma ? -ENOMEM : -ESRCH;
 }
 
-struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
-						  size_t data_size,
-						  size_t offsets_size,
-						  size_t extra_buffers_size,
-						  int is_async)
+static struct binder_buffer *binder_alloc_new_buf_locked(
+				struct binder_alloc *alloc,
+				size_t data_size,
+				size_t offsets_size,
+				size_t extra_buffers_size,
+				int is_async)
 {
 	struct rb_node *n = alloc->free_buffers.rb_node;
 	struct binder_buffer *buffer;
@@ -853,6 +857,7 @@ void binder_alloc_print_pages(struct seq_file *m,
 	}
 	mutex_unlock(&alloc->mutex);
 	seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
+	seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
 }
 
 /**
@@ -1002,8 +1007,14 @@ void binder_alloc_init(struct binder_alloc *alloc)
 	INIT_LIST_HEAD(&alloc->buffers);
 }
 
-void binder_alloc_shrinker_init(void)
+int binder_alloc_shrinker_init(void)
 {
-	list_lru_init(&binder_alloc_lru);
-	register_shrinker(&binder_shrinker);
+	int ret = list_lru_init(&binder_alloc_lru);
+
+	if (ret == 0) {
+		ret = register_shrinker(&binder_shrinker);
+		if (ret)
+			list_lru_destroy(&binder_alloc_lru);
+	}
+	return ret;
 }
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 2dd33b6..9ef64e5 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -92,6 +92,7 @@ struct binder_lru_page {
  * @pages:              array of binder_lru_page
  * @buffer_size:        size of address space specified via mmap
  * @pid:                pid for associated binder_proc (invariant after init)
+ * @pages_high:         high watermark of offset in @pages
  *
  * Bookkeeping structure for per-proc address space management for binder
  * buffers. It is normally initialized during binder_init() and binder_mmap()
@@ -112,6 +113,7 @@ struct binder_alloc {
 	size_t buffer_size;
 	uint32_t buffer_free;
 	int pid;
+	size_t pages_high;
 };
 
 #ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
@@ -128,7 +130,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
 						  size_t extra_buffers_size,
 						  int is_async);
 extern void binder_alloc_init(struct binder_alloc *alloc);
-void binder_alloc_shrinker_init(void);
+extern int binder_alloc_shrinker_init(void);
 extern void binder_alloc_vma_close(struct binder_alloc *alloc);
 extern struct binder_buffer *
 binder_alloc_prepare_to_free(struct binder_alloc *alloc,
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 3a1535d..cc162b4 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -21,6 +21,10 @@
 	tristate
 	depends on I2C
 
+config REGMAP_SLIMBUS
+	tristate
+	depends on SLIMBUS
+
 config REGMAP_SPI
 	tristate
 	depends on SPI
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 0d298c4..63dec92 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -8,6 +8,7 @@
 obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
 obj-$(CONFIG_REGMAP_AC97) += regmap-ac97.o
 obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
+obj-$(CONFIG_REGMAP_SLIMBUS) += regmap-slimbus.o
 obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
 obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o
 obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c
new file mode 100644
index 0000000..c90bee8
--- /dev/null
+++ b/drivers/base/regmap/regmap-slimbus.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017, Linaro Ltd.
+
+#include <linux/regmap.h>
+#include <linux/slimbus.h>
+#include <linux/module.h>
+
+#include "internal.h"
+
+static int regmap_slimbus_byte_reg_read(void *context, unsigned int reg,
+					unsigned int *val)
+{
+	struct slim_device *sdev = context;
+	int v;
+
+	v = slim_readb(sdev, reg);
+
+	if (v < 0)
+		return v;
+
+	*val = v;
+
+	return 0;
+}
+
+static int regmap_slimbus_byte_reg_write(void *context, unsigned int reg,
+					 unsigned int val)
+{
+	struct slim_device *sdev = context;
+
+	return slim_writeb(sdev, reg, val);
+}
+
+static struct regmap_bus regmap_slimbus_bus = {
+	.reg_write = regmap_slimbus_byte_reg_write,
+	.reg_read = regmap_slimbus_byte_reg_read,
+	.reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+	.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static const struct regmap_bus *regmap_get_slimbus(struct slim_device *slim,
+					const struct regmap_config *config)
+{
+	if (config->val_bits == 8 && config->reg_bits == 8)
+		return &regmap_slimbus_bus;
+
+	return ERR_PTR(-ENOTSUPP);
+}
+
+struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
+				     const struct regmap_config *config,
+				     struct lock_class_key *lock_key,
+				     const char *lock_name)
+{
+	const struct regmap_bus *bus = regmap_get_slimbus(slimbus, config);
+
+	if (IS_ERR(bus))
+		return ERR_CAST(bus);
+
+	return __regmap_init(&slimbus->dev, bus, &slimbus->dev, config,
+			     lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_slimbus);
+
+struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
+					  const struct regmap_config *config,
+					  struct lock_class_key *lock_key,
+					  const char *lock_name)
+{
+	const struct regmap_bus *bus = regmap_get_slimbus(slimbus, config);
+
+	if (IS_ERR(bus))
+		return ERR_CAST(bus);
+
+	return __devm_regmap_init(&slimbus->dev, bus, &slimbus, config,
+				  lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 8249762..be14abf 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -659,17 +659,31 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd,
 	return retval;
 }
 
-static int lp_set_timeout(unsigned int minor, struct timeval *par_timeout)
+static int lp_set_timeout(unsigned int minor, s64 tv_sec, long tv_usec)
 {
 	long to_jiffies;
 
 	/* Convert to jiffies, place in lp_table */
-	if ((par_timeout->tv_sec < 0) ||
-	    (par_timeout->tv_usec < 0)) {
+	if (tv_sec < 0 || tv_usec < 0)
 		return -EINVAL;
+
+	/*
+	 * we used to not check, so let's not make this fatal,
+	 * but deal with user space passing a 32-bit tv_nsec in
+	 * a 64-bit field, capping the timeout to 1 second
+	 * worth of microseconds, and capping the total at
+	 * MAX_JIFFY_OFFSET.
+	 */
+	if (tv_usec > 999999)
+		tv_usec = 999999;
+
+	if (tv_sec >= MAX_SEC_IN_JIFFIES - 1) {
+		to_jiffies = MAX_JIFFY_OFFSET;
+	} else {
+		to_jiffies = DIV_ROUND_UP(tv_usec, 1000000/HZ);
+		to_jiffies += tv_sec * (long) HZ;
 	}
-	to_jiffies = DIV_ROUND_UP(par_timeout->tv_usec, 1000000/HZ);
-	to_jiffies += par_timeout->tv_sec * (long) HZ;
+
 	if (to_jiffies <= 0) {
 		return -EINVAL;
 	}
@@ -677,23 +691,43 @@ static int lp_set_timeout(unsigned int minor, struct timeval *par_timeout)
 	return 0;
 }
 
+static int lp_set_timeout32(unsigned int minor, void __user *arg)
+{
+	s32 karg[2];
+
+	if (copy_from_user(karg, arg, sizeof(karg)))
+		return -EFAULT;
+
+	return lp_set_timeout(minor, karg[0], karg[1]);
+}
+
+static int lp_set_timeout64(unsigned int minor, void __user *arg)
+{
+	s64 karg[2];
+
+	if (copy_from_user(karg, arg, sizeof(karg)))
+		return -EFAULT;
+
+	return lp_set_timeout(minor, karg[0], karg[1]);
+}
+
 static long lp_ioctl(struct file *file, unsigned int cmd,
 			unsigned long arg)
 {
 	unsigned int minor;
-	struct timeval par_timeout;
 	int ret;
 
 	minor = iminor(file_inode(file));
 	mutex_lock(&lp_mutex);
 	switch (cmd) {
-	case LPSETTIMEOUT:
-		if (copy_from_user(&par_timeout, (void __user *)arg,
-					sizeof (struct timeval))) {
-			ret = -EFAULT;
+	case LPSETTIMEOUT_OLD:
+		if (BITS_PER_LONG == 32) {
+			ret = lp_set_timeout32(minor, (void __user *)arg);
 			break;
 		}
-		ret = lp_set_timeout(minor, &par_timeout);
+		/* fallthrough for 64-bit */
+	case LPSETTIMEOUT_NEW:
+		ret = lp_set_timeout64(minor, (void __user *)arg);
 		break;
 	default:
 		ret = lp_do_ioctl(minor, cmd, arg, (void __user *)arg);
@@ -709,18 +743,19 @@ static long lp_compat_ioctl(struct file *file, unsigned int cmd,
 			unsigned long arg)
 {
 	unsigned int minor;
-	struct timeval par_timeout;
 	int ret;
 
 	minor = iminor(file_inode(file));
 	mutex_lock(&lp_mutex);
 	switch (cmd) {
-	case LPSETTIMEOUT:
-		if (compat_get_timeval(&par_timeout, compat_ptr(arg))) {
-			ret = -EFAULT;
+	case LPSETTIMEOUT_OLD:
+		if (!COMPAT_USE_64BIT_TIME) {
+			ret = lp_set_timeout32(minor, (void __user *)arg);
 			break;
 		}
-		ret = lp_set_timeout(minor, &par_timeout);
+		/* fallthrough for x32 mode */
+	case LPSETTIMEOUT_NEW:
+		ret = lp_set_timeout64(minor, (void __user *)arg);
 		break;
 #ifdef LP_STATS
 	case LPGETSTATS:
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6aefe53..052011b 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -107,6 +107,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
 	phys_addr_t p = *ppos;
 	ssize_t read, sz;
 	void *ptr;
+	char *bounce;
+	int err;
 
 	if (p != *ppos)
 		return 0;
@@ -129,15 +131,22 @@ static ssize_t read_mem(struct file *file, char __user *buf,
 	}
 #endif
 
+	bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!bounce)
+		return -ENOMEM;
+
 	while (count > 0) {
 		unsigned long remaining;
 		int allowed;
 
 		sz = size_inside_page(p, count);
 
+		err = -EPERM;
 		allowed = page_is_allowed(p >> PAGE_SHIFT);
 		if (!allowed)
-			return -EPERM;
+			goto failed;
+
+		err = -EFAULT;
 		if (allowed == 2) {
 			/* Show zeros for restricted memory. */
 			remaining = clear_user(buf, sz);
@@ -149,24 +158,32 @@ static ssize_t read_mem(struct file *file, char __user *buf,
 			 */
 			ptr = xlate_dev_mem_ptr(p);
 			if (!ptr)
-				return -EFAULT;
+				goto failed;
 
-			remaining = copy_to_user(buf, ptr, sz);
-
+			err = probe_kernel_read(bounce, ptr, sz);
 			unxlate_dev_mem_ptr(p, ptr);
+			if (err)
+				goto failed;
+
+			remaining = copy_to_user(buf, bounce, sz);
 		}
 
 		if (remaining)
-			return -EFAULT;
+			goto failed;
 
 		buf += sz;
 		p += sz;
 		count -= sz;
 		read += sz;
 	}
+	kfree(bounce);
 
 	*ppos += read;
 	return read;
+
+failed:
+	kfree(bounce);
+	return err;
 }
 
 static ssize_t write_mem(struct file *file, const char __user *buf,
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index ad5448f..f47ef84 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -11,33 +11,6 @@
 
 if FPGA
 
-config FPGA_REGION
-	tristate "FPGA Region"
-	depends on OF && FPGA_BRIDGE
-	help
-	  FPGA Regions allow loading FPGA images under control of
-	  the Device Tree.
-
-config FPGA_MGR_ICE40_SPI
-	tristate "Lattice iCE40 SPI"
-	depends on OF && SPI
-	help
-	  FPGA manager driver support for Lattice iCE40 FPGAs over SPI.
-
-config FPGA_MGR_ALTERA_CVP
-	tristate "Altera Arria-V/Cyclone-V/Stratix-V CvP FPGA Manager"
-	depends on PCI
-	help
-	  FPGA manager driver support for Arria-V, Cyclone-V, Stratix-V
-	  and Arria 10 Altera FPGAs using the CvP interface over PCIe.
-
-config FPGA_MGR_ALTERA_PS_SPI
-	tristate "Altera FPGA Passive Serial over SPI"
-	depends on SPI
-	help
-	  FPGA manager driver support for Altera Arria/Cyclone/Stratix
-	  using the passive serial interface over SPI.
-
 config FPGA_MGR_SOCFPGA
 	tristate "Altera SOCFPGA FPGA Manager"
 	depends on ARCH_SOCFPGA || COMPILE_TEST
@@ -51,19 +24,31 @@
 	help
 	  FPGA manager driver support for Altera Arria10 SoCFPGA.
 
-config FPGA_MGR_TS73XX
-	tristate "Technologic Systems TS-73xx SBC FPGA Manager"
-	depends on ARCH_EP93XX && MACH_TS72XX
-	help
-	  FPGA manager driver support for the Altera Cyclone II FPGA
-	  present on the TS-73xx SBC boards.
+config ALTERA_PR_IP_CORE
+        tristate "Altera Partial Reconfiguration IP Core"
+        help
+          Core driver support for Altera Partial Reconfiguration IP component
 
-config FPGA_MGR_XILINX_SPI
-	tristate "Xilinx Configuration over Slave Serial (SPI)"
+config ALTERA_PR_IP_CORE_PLAT
+	tristate "Platform support of Altera Partial Reconfiguration IP Core"
+	depends on ALTERA_PR_IP_CORE && OF && HAS_IOMEM
+	help
+	  Platform driver support for Altera Partial Reconfiguration IP
+	  component
+
+config FPGA_MGR_ALTERA_PS_SPI
+	tristate "Altera FPGA Passive Serial over SPI"
 	depends on SPI
 	help
-	  FPGA manager driver support for Xilinx FPGA configuration
-	  over slave serial interface.
+	  FPGA manager driver support for Altera Arria/Cyclone/Stratix
+	  using the passive serial interface over SPI.
+
+config FPGA_MGR_ALTERA_CVP
+	tristate "Altera Arria-V/Cyclone-V/Stratix-V CvP FPGA Manager"
+	depends on PCI
+	help
+	  FPGA manager driver support for Arria-V, Cyclone-V, Stratix-V
+	  and Arria 10 Altera FPGAs using the CvP interface over PCIe.
 
 config FPGA_MGR_ZYNQ_FPGA
 	tristate "Xilinx Zynq FPGA"
@@ -72,9 +57,28 @@
 	help
 	  FPGA manager driver support for Xilinx Zynq FPGAs.
 
+config FPGA_MGR_XILINX_SPI
+	tristate "Xilinx Configuration over Slave Serial (SPI)"
+	depends on SPI
+	help
+	  FPGA manager driver support for Xilinx FPGA configuration
+	  over slave serial interface.
+
+config FPGA_MGR_ICE40_SPI
+	tristate "Lattice iCE40 SPI"
+	depends on OF && SPI
+	help
+	  FPGA manager driver support for Lattice iCE40 FPGAs over SPI.
+
+config FPGA_MGR_TS73XX
+	tristate "Technologic Systems TS-73xx SBC FPGA Manager"
+	depends on ARCH_EP93XX && MACH_TS72XX
+	help
+	  FPGA manager driver support for the Altera Cyclone II FPGA
+	  present on the TS-73xx SBC boards.
+
 config FPGA_BRIDGE
 	tristate "FPGA Bridge Framework"
-	depends on OF
 	help
 	  Say Y here if you want to support bridges connected between host
 	  processors and FPGAs or between FPGAs.
@@ -95,18 +99,6 @@
 	  isolate one region of the FPGA from the busses while that
 	  region is being reprogrammed.
 
-config ALTERA_PR_IP_CORE
-        tristate "Altera Partial Reconfiguration IP Core"
-        help
-          Core driver support for Altera Partial Reconfiguration IP component
-
-config ALTERA_PR_IP_CORE_PLAT
-	tristate "Platform support of Altera Partial Reconfiguration IP Core"
-	depends on ALTERA_PR_IP_CORE && OF && HAS_IOMEM
-	help
-	  Platform driver support for Altera Partial Reconfiguration IP
-	  component
-
 config XILINX_PR_DECOUPLER
 	tristate "Xilinx LogiCORE PR Decoupler"
 	depends on FPGA_BRIDGE
@@ -117,4 +109,19 @@
 	  region of the FPGA from the busses while that region is
 	  being reprogrammed during partial reconfig.
 
+config FPGA_REGION
+	tristate "FPGA Region"
+	depends on FPGA_BRIDGE
+	help
+	  FPGA Region common code.  A FPGA Region controls a FPGA Manager
+	  and the FPGA Bridges associated with either a reconfigurable
+	  region of an FPGA or a whole FPGA.
+
+config OF_FPGA_REGION
+	tristate "FPGA Region Device Tree Overlay Support"
+	depends on OF && FPGA_REGION
+	help
+	  Support for loading FPGA images by applying a Device Tree
+	  overlay.
+
 endif # FPGA
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index f98dcf1..3cb276a 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -26,3 +26,4 @@
 
 # High Level Interfaces
 obj-$(CONFIG_FPGA_REGION)		+= fpga-region.o
+obj-$(CONFIG_OF_FPGA_REGION)		+= of-fpga-region.o
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 9651aa56..31bd2c5 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -2,6 +2,7 @@
  * FPGA Bridge Framework Driver
  *
  *  Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved.
+ *  Copyright (C) 2017 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -70,32 +71,13 @@ int fpga_bridge_disable(struct fpga_bridge *bridge)
 }
 EXPORT_SYMBOL_GPL(fpga_bridge_disable);
 
-/**
- * of_fpga_bridge_get - get an exclusive reference to a fpga bridge
- *
- * @np: node pointer of a FPGA bridge
- * @info: fpga image specific information
- *
- * Return fpga_bridge struct if successful.
- * Return -EBUSY if someone already has a reference to the bridge.
- * Return -ENODEV if @np is not a FPGA Bridge.
- */
-struct fpga_bridge *of_fpga_bridge_get(struct device_node *np,
-				       struct fpga_image_info *info)
-
+static struct fpga_bridge *__fpga_bridge_get(struct device *dev,
+					     struct fpga_image_info *info)
 {
-	struct device *dev;
 	struct fpga_bridge *bridge;
 	int ret = -ENODEV;
 
-	dev = class_find_device(fpga_bridge_class, NULL, np,
-				fpga_bridge_of_node_match);
-	if (!dev)
-		goto err_dev;
-
 	bridge = to_fpga_bridge(dev);
-	if (!bridge)
-		goto err_dev;
 
 	bridge->info = info;
 
@@ -117,8 +99,58 @@ struct fpga_bridge *of_fpga_bridge_get(struct device_node *np,
 	put_device(dev);
 	return ERR_PTR(ret);
 }
+
+/**
+ * of_fpga_bridge_get - get an exclusive reference to a fpga bridge
+ *
+ * @np: node pointer of a FPGA bridge
+ * @info: fpga image specific information
+ *
+ * Return fpga_bridge struct if successful.
+ * Return -EBUSY if someone already has a reference to the bridge.
+ * Return -ENODEV if @np is not a FPGA Bridge.
+ */
+struct fpga_bridge *of_fpga_bridge_get(struct device_node *np,
+				       struct fpga_image_info *info)
+{
+	struct device *dev;
+
+	dev = class_find_device(fpga_bridge_class, NULL, np,
+				fpga_bridge_of_node_match);
+	if (!dev)
+		return ERR_PTR(-ENODEV);
+
+	return __fpga_bridge_get(dev, info);
+}
 EXPORT_SYMBOL_GPL(of_fpga_bridge_get);
 
+static int fpga_bridge_dev_match(struct device *dev, const void *data)
+{
+	return dev->parent == data;
+}
+
+/**
+ * fpga_bridge_get - get an exclusive reference to a fpga bridge
+ * @dev:	parent device that fpga bridge was registered with
+ *
+ * Given a device, get an exclusive reference to a fpga bridge.
+ *
+ * Return: fpga manager struct or IS_ERR() condition containing error code.
+ */
+struct fpga_bridge *fpga_bridge_get(struct device *dev,
+				    struct fpga_image_info *info)
+{
+	struct device *bridge_dev;
+
+	bridge_dev = class_find_device(fpga_bridge_class, NULL, dev,
+				       fpga_bridge_dev_match);
+	if (!bridge_dev)
+		return ERR_PTR(-ENODEV);
+
+	return __fpga_bridge_get(bridge_dev, info);
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_get);
+
 /**
  * fpga_bridge_put - release a reference to a bridge
  *
@@ -206,7 +238,7 @@ void fpga_bridges_put(struct list_head *bridge_list)
 EXPORT_SYMBOL_GPL(fpga_bridges_put);
 
 /**
- * fpga_bridges_get_to_list - get a bridge, add it to a list
+ * of_fpga_bridge_get_to_list - get a bridge, add it to a list
  *
  * @np: node pointer of a FPGA bridge
  * @info: fpga image specific information
@@ -216,14 +248,44 @@ EXPORT_SYMBOL_GPL(fpga_bridges_put);
  *
  * Return 0 for success, error code from of_fpga_bridge_get() othewise.
  */
-int fpga_bridge_get_to_list(struct device_node *np,
+int of_fpga_bridge_get_to_list(struct device_node *np,
+			       struct fpga_image_info *info,
+			       struct list_head *bridge_list)
+{
+	struct fpga_bridge *bridge;
+	unsigned long flags;
+
+	bridge = of_fpga_bridge_get(np, info);
+	if (IS_ERR(bridge))
+		return PTR_ERR(bridge);
+
+	spin_lock_irqsave(&bridge_list_lock, flags);
+	list_add(&bridge->node, bridge_list);
+	spin_unlock_irqrestore(&bridge_list_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(of_fpga_bridge_get_to_list);
+
+/**
+ * fpga_bridge_get_to_list - given device, get a bridge, add it to a list
+ *
+ * @dev: FPGA bridge device
+ * @info: fpga image specific information
+ * @bridge_list: list of FPGA bridges
+ *
+ * Get an exclusive reference to the bridge and and it to the list.
+ *
+ * Return 0 for success, error code from fpga_bridge_get() othewise.
+ */
+int fpga_bridge_get_to_list(struct device *dev,
 			    struct fpga_image_info *info,
 			    struct list_head *bridge_list)
 {
 	struct fpga_bridge *bridge;
 	unsigned long flags;
 
-	bridge = of_fpga_bridge_get(np, info);
+	bridge = fpga_bridge_get(dev, info);
 	if (IS_ERR(bridge))
 		return PTR_ERR(bridge);
 
@@ -303,6 +365,7 @@ int fpga_bridge_register(struct device *dev, const char *name,
 	bridge->priv = priv;
 
 	device_initialize(&bridge->dev);
+	bridge->dev.groups = br_ops->groups;
 	bridge->dev.class = fpga_bridge_class;
 	bridge->dev.parent = dev;
 	bridge->dev.of_node = dev->of_node;
@@ -381,7 +444,7 @@ static void __exit fpga_bridge_dev_exit(void)
 }
 
 MODULE_DESCRIPTION("FPGA Bridge Driver");
-MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_AUTHOR("Alan Tull <atull@kernel.org>");
 MODULE_LICENSE("GPL v2");
 
 subsys_initcall(fpga_bridge_dev_init);
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index 188ffef..9939d2c 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -2,6 +2,7 @@
  * FPGA Manager Core
  *
  *  Copyright (C) 2013-2015 Altera Corporation
+ *  Copyright (C) 2017 Intel Corporation
  *
  * With code from the mailing list:
  * Copyright (C) 2013 Xilinx, Inc.
@@ -31,6 +32,40 @@
 static DEFINE_IDA(fpga_mgr_ida);
 static struct class *fpga_mgr_class;
 
+struct fpga_image_info *fpga_image_info_alloc(struct device *dev)
+{
+	struct fpga_image_info *info;
+
+	get_device(dev);
+
+	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+	if (!info) {
+		put_device(dev);
+		return NULL;
+	}
+
+	info->dev = dev;
+
+	return info;
+}
+EXPORT_SYMBOL_GPL(fpga_image_info_alloc);
+
+void fpga_image_info_free(struct fpga_image_info *info)
+{
+	struct device *dev;
+
+	if (!info)
+		return;
+
+	dev = info->dev;
+	if (info->firmware_name)
+		devm_kfree(dev, info->firmware_name);
+
+	devm_kfree(dev, info);
+	put_device(dev);
+}
+EXPORT_SYMBOL_GPL(fpga_image_info_free);
+
 /*
  * Call the low level driver's write_init function.  This will do the
  * device-specific things to get the FPGA into the state where it is ready to
@@ -137,8 +172,9 @@ static int fpga_mgr_write_complete(struct fpga_manager *mgr,
  *
  * Return: 0 on success, negative error code otherwise.
  */
-int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info,
-			 struct sg_table *sgt)
+static int fpga_mgr_buf_load_sg(struct fpga_manager *mgr,
+				struct fpga_image_info *info,
+				struct sg_table *sgt)
 {
 	int ret;
 
@@ -170,7 +206,6 @@ int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info,
 
 	return fpga_mgr_write_complete(mgr, info);
 }
-EXPORT_SYMBOL_GPL(fpga_mgr_buf_load_sg);
 
 static int fpga_mgr_buf_load_mapped(struct fpga_manager *mgr,
 				    struct fpga_image_info *info,
@@ -210,8 +245,9 @@ static int fpga_mgr_buf_load_mapped(struct fpga_manager *mgr,
  *
  * Return: 0 on success, negative error code otherwise.
  */
-int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
-		      const char *buf, size_t count)
+static int fpga_mgr_buf_load(struct fpga_manager *mgr,
+			     struct fpga_image_info *info,
+			     const char *buf, size_t count)
 {
 	struct page **pages;
 	struct sg_table sgt;
@@ -266,7 +302,6 @@ int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
 
 	return rc;
 }
-EXPORT_SYMBOL_GPL(fpga_mgr_buf_load);
 
 /**
  * fpga_mgr_firmware_load - request firmware and load to fpga
@@ -282,9 +317,9 @@ EXPORT_SYMBOL_GPL(fpga_mgr_buf_load);
  *
  * Return: 0 on success, negative error code otherwise.
  */
-int fpga_mgr_firmware_load(struct fpga_manager *mgr,
-			   struct fpga_image_info *info,
-			   const char *image_name)
+static int fpga_mgr_firmware_load(struct fpga_manager *mgr,
+				  struct fpga_image_info *info,
+				  const char *image_name)
 {
 	struct device *dev = &mgr->dev;
 	const struct firmware *fw;
@@ -307,7 +342,18 @@ int fpga_mgr_firmware_load(struct fpga_manager *mgr,
 
 	return ret;
 }
-EXPORT_SYMBOL_GPL(fpga_mgr_firmware_load);
+
+int fpga_mgr_load(struct fpga_manager *mgr, struct fpga_image_info *info)
+{
+	if (info->sgt)
+		return fpga_mgr_buf_load_sg(mgr, info, info->sgt);
+	if (info->buf && info->count)
+		return fpga_mgr_buf_load(mgr, info, info->buf, info->count);
+	if (info->firmware_name)
+		return fpga_mgr_firmware_load(mgr, info, info->firmware_name);
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(fpga_mgr_load);
 
 static const char * const state_str[] = {
 	[FPGA_MGR_STATE_UNKNOWN] =		"unknown",
@@ -364,28 +410,17 @@ ATTRIBUTE_GROUPS(fpga_mgr);
 static struct fpga_manager *__fpga_mgr_get(struct device *dev)
 {
 	struct fpga_manager *mgr;
-	int ret = -ENODEV;
 
 	mgr = to_fpga_manager(dev);
-	if (!mgr)
-		goto err_dev;
-
-	/* Get exclusive use of fpga manager */
-	if (!mutex_trylock(&mgr->ref_mutex)) {
-		ret = -EBUSY;
-		goto err_dev;
-	}
 
 	if (!try_module_get(dev->parent->driver->owner))
-		goto err_ll_mod;
+		goto err_dev;
 
 	return mgr;
 
-err_ll_mod:
-	mutex_unlock(&mgr->ref_mutex);
 err_dev:
 	put_device(dev);
-	return ERR_PTR(ret);
+	return ERR_PTR(-ENODEV);
 }
 
 static int fpga_mgr_dev_match(struct device *dev, const void *data)
@@ -394,10 +429,10 @@ static int fpga_mgr_dev_match(struct device *dev, const void *data)
 }
 
 /**
- * fpga_mgr_get - get an exclusive reference to a fpga mgr
+ * fpga_mgr_get - get a reference to a fpga mgr
  * @dev:	parent device that fpga mgr was registered with
  *
- * Given a device, get an exclusive reference to a fpga mgr.
+ * Given a device, get a reference to a fpga mgr.
  *
  * Return: fpga manager struct or IS_ERR() condition containing error code.
  */
@@ -418,10 +453,10 @@ static int fpga_mgr_of_node_match(struct device *dev, const void *data)
 }
 
 /**
- * of_fpga_mgr_get - get an exclusive reference to a fpga mgr
+ * of_fpga_mgr_get - get a reference to a fpga mgr
  * @node:	device node
  *
- * Given a device node, get an exclusive reference to a fpga mgr.
+ * Given a device node, get a reference to a fpga mgr.
  *
  * Return: fpga manager struct or IS_ERR() condition containing error code.
  */
@@ -445,12 +480,41 @@ EXPORT_SYMBOL_GPL(of_fpga_mgr_get);
 void fpga_mgr_put(struct fpga_manager *mgr)
 {
 	module_put(mgr->dev.parent->driver->owner);
-	mutex_unlock(&mgr->ref_mutex);
 	put_device(&mgr->dev);
 }
 EXPORT_SYMBOL_GPL(fpga_mgr_put);
 
 /**
+ * fpga_mgr_lock - Lock FPGA manager for exclusive use
+ * @mgr:	fpga manager
+ *
+ * Given a pointer to FPGA Manager (from fpga_mgr_get() or
+ * of_fpga_mgr_put()) attempt to get the mutex.
+ *
+ * Return: 0 for success or -EBUSY
+ */
+int fpga_mgr_lock(struct fpga_manager *mgr)
+{
+	if (!mutex_trylock(&mgr->ref_mutex)) {
+		dev_err(&mgr->dev, "FPGA manager is in use.\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_mgr_lock);
+
+/**
+ * fpga_mgr_unlock - Unlock FPGA manager
+ * @mgr:	fpga manager
+ */
+void fpga_mgr_unlock(struct fpga_manager *mgr)
+{
+	mutex_unlock(&mgr->ref_mutex);
+}
+EXPORT_SYMBOL_GPL(fpga_mgr_unlock);
+
+/**
  * fpga_mgr_register - register a low level fpga manager driver
  * @dev:	fpga manager device from pdev
  * @name:	fpga manager name
@@ -503,6 +567,7 @@ int fpga_mgr_register(struct device *dev, const char *name,
 
 	device_initialize(&mgr->dev);
 	mgr->dev.class = fpga_mgr_class;
+	mgr->dev.groups = mops->groups;
 	mgr->dev.parent = dev;
 	mgr->dev.of_node = dev->of_node;
 	mgr->dev.id = id;
@@ -578,7 +643,7 @@ static void __exit fpga_mgr_class_exit(void)
 	ida_destroy(&fpga_mgr_ida);
 }
 
-MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_AUTHOR("Alan Tull <atull@kernel.org>");
 MODULE_DESCRIPTION("FPGA manager framework");
 MODULE_LICENSE("GPL v2");
 
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index d9ab7c7..edab2a2 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -2,6 +2,7 @@
  * FPGA Region - Device Tree support for FPGA programming under Linux
  *
  *  Copyright (C) 2013-2016 Altera Corporation
+ *  Copyright (C) 2017 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -18,61 +19,30 @@
 
 #include <linux/fpga/fpga-bridge.h>
 #include <linux/fpga/fpga-mgr.h>
+#include <linux/fpga/fpga-region.h>
 #include <linux/idr.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/module.h>
-#include <linux/of_platform.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 
-/**
- * struct fpga_region - FPGA Region structure
- * @dev: FPGA Region device
- * @mutex: enforces exclusive reference to region
- * @bridge_list: list of FPGA bridges specified in region
- * @info: fpga image specific information
- */
-struct fpga_region {
-	struct device dev;
-	struct mutex mutex; /* for exclusive reference to region */
-	struct list_head bridge_list;
-	struct fpga_image_info *info;
-};
-
-#define to_fpga_region(d) container_of(d, struct fpga_region, dev)
-
 static DEFINE_IDA(fpga_region_ida);
 static struct class *fpga_region_class;
 
-static const struct of_device_id fpga_region_of_match[] = {
-	{ .compatible = "fpga-region", },
-	{},
-};
-MODULE_DEVICE_TABLE(of, fpga_region_of_match);
-
-static int fpga_region_of_node_match(struct device *dev, const void *data)
-{
-	return dev->of_node == data;
-}
-
-/**
- * fpga_region_find - find FPGA region
- * @np: device node of FPGA Region
- * Caller will need to put_device(&region->dev) when done.
- * Returns FPGA Region struct or NULL
- */
-static struct fpga_region *fpga_region_find(struct device_node *np)
+struct fpga_region *fpga_region_class_find(
+	struct device *start, const void *data,
+	int (*match)(struct device *, const void *))
 {
 	struct device *dev;
 
-	dev = class_find_device(fpga_region_class, NULL, np,
-				fpga_region_of_node_match);
+	dev = class_find_device(fpga_region_class, start, data, match);
 	if (!dev)
 		return NULL;
 
 	return to_fpga_region(dev);
 }
+EXPORT_SYMBOL_GPL(fpga_region_class_find);
 
 /**
  * fpga_region_get - get an exclusive reference to a fpga region
@@ -94,15 +64,13 @@ static struct fpga_region *fpga_region_get(struct fpga_region *region)
 	}
 
 	get_device(dev);
-	of_node_get(dev->of_node);
 	if (!try_module_get(dev->parent->driver->owner)) {
-		of_node_put(dev->of_node);
 		put_device(dev);
 		mutex_unlock(&region->mutex);
 		return ERR_PTR(-ENODEV);
 	}
 
-	dev_dbg(&region->dev, "get\n");
+	dev_dbg(dev, "get\n");
 
 	return region;
 }
@@ -116,403 +84,99 @@ static void fpga_region_put(struct fpga_region *region)
 {
 	struct device *dev = &region->dev;
 
-	dev_dbg(&region->dev, "put\n");
+	dev_dbg(dev, "put\n");
 
 	module_put(dev->parent->driver->owner);
-	of_node_put(dev->of_node);
 	put_device(dev);
 	mutex_unlock(&region->mutex);
 }
 
 /**
- * fpga_region_get_manager - get exclusive reference for FPGA manager
- * @region: FPGA region
- *
- * Get FPGA Manager from "fpga-mgr" property or from ancestor region.
- *
- * Caller should call fpga_mgr_put() when done with manager.
- *
- * Return: fpga manager struct or IS_ERR() condition containing error code.
- */
-static struct fpga_manager *fpga_region_get_manager(struct fpga_region *region)
-{
-	struct device *dev = &region->dev;
-	struct device_node *np = dev->of_node;
-	struct device_node  *mgr_node;
-	struct fpga_manager *mgr;
-
-	of_node_get(np);
-	while (np) {
-		if (of_device_is_compatible(np, "fpga-region")) {
-			mgr_node = of_parse_phandle(np, "fpga-mgr", 0);
-			if (mgr_node) {
-				mgr = of_fpga_mgr_get(mgr_node);
-				of_node_put(np);
-				return mgr;
-			}
-		}
-		np = of_get_next_parent(np);
-	}
-	of_node_put(np);
-
-	return ERR_PTR(-EINVAL);
-}
-
-/**
- * fpga_region_get_bridges - create a list of bridges
- * @region: FPGA region
- * @overlay: device node of the overlay
- *
- * Create a list of bridges including the parent bridge and the bridges
- * specified by "fpga-bridges" property.  Note that the
- * fpga_bridges_enable/disable/put functions are all fine with an empty list
- * if that happens.
- *
- * Caller should call fpga_bridges_put(&region->bridge_list) when
- * done with the bridges.
- *
- * Return 0 for success (even if there are no bridges specified)
- * or -EBUSY if any of the bridges are in use.
- */
-static int fpga_region_get_bridges(struct fpga_region *region,
-				   struct device_node *overlay)
-{
-	struct device *dev = &region->dev;
-	struct device_node *region_np = dev->of_node;
-	struct device_node *br, *np, *parent_br = NULL;
-	int i, ret;
-
-	/* If parent is a bridge, add to list */
-	ret = fpga_bridge_get_to_list(region_np->parent, region->info,
-				      &region->bridge_list);
-	if (ret == -EBUSY)
-		return ret;
-
-	if (!ret)
-		parent_br = region_np->parent;
-
-	/* If overlay has a list of bridges, use it. */
-	if (of_parse_phandle(overlay, "fpga-bridges", 0))
-		np = overlay;
-	else
-		np = region_np;
-
-	for (i = 0; ; i++) {
-		br = of_parse_phandle(np, "fpga-bridges", i);
-		if (!br)
-			break;
-
-		/* If parent bridge is in list, skip it. */
-		if (br == parent_br)
-			continue;
-
-		/* If node is a bridge, get it and add to list */
-		ret = fpga_bridge_get_to_list(br, region->info,
-					      &region->bridge_list);
-
-		/* If any of the bridges are in use, give up */
-		if (ret == -EBUSY) {
-			fpga_bridges_put(&region->bridge_list);
-			return -EBUSY;
-		}
-	}
-
-	return 0;
-}
-
-/**
  * fpga_region_program_fpga - program FPGA
  * @region: FPGA region
- * @firmware_name: name of FPGA image firmware file
- * @overlay: device node of the overlay
- * Program an FPGA using information in the device tree.
- * Function assumes that there is a firmware-name property.
+ * Program an FPGA using fpga image info (region->info).
  * Return 0 for success or negative error code.
  */
-static int fpga_region_program_fpga(struct fpga_region *region,
-				    const char *firmware_name,
-				    struct device_node *overlay)
+int fpga_region_program_fpga(struct fpga_region *region)
 {
-	struct fpga_manager *mgr;
+	struct device *dev = &region->dev;
+	struct fpga_image_info *info = region->info;
 	int ret;
 
 	region = fpga_region_get(region);
 	if (IS_ERR(region)) {
-		pr_err("failed to get fpga region\n");
+		dev_err(dev, "failed to get FPGA region\n");
 		return PTR_ERR(region);
 	}
 
-	mgr = fpga_region_get_manager(region);
-	if (IS_ERR(mgr)) {
-		pr_err("failed to get fpga region manager\n");
-		ret = PTR_ERR(mgr);
+	ret = fpga_mgr_lock(region->mgr);
+	if (ret) {
+		dev_err(dev, "FPGA manager is busy\n");
 		goto err_put_region;
 	}
 
-	ret = fpga_region_get_bridges(region, overlay);
-	if (ret) {
-		pr_err("failed to get fpga region bridges\n");
-		goto err_put_mgr;
+	/*
+	 * In some cases, we already have a list of bridges in the
+	 * fpga region struct.  Or we don't have any bridges.
+	 */
+	if (region->get_bridges) {
+		ret = region->get_bridges(region);
+		if (ret) {
+			dev_err(dev, "failed to get fpga region bridges\n");
+			goto err_unlock_mgr;
+		}
 	}
 
 	ret = fpga_bridges_disable(&region->bridge_list);
 	if (ret) {
-		pr_err("failed to disable region bridges\n");
+		dev_err(dev, "failed to disable bridges\n");
 		goto err_put_br;
 	}
 
-	ret = fpga_mgr_firmware_load(mgr, region->info, firmware_name);
+	ret = fpga_mgr_load(region->mgr, info);
 	if (ret) {
-		pr_err("failed to load fpga image\n");
+		dev_err(dev, "failed to load FPGA image\n");
 		goto err_put_br;
 	}
 
 	ret = fpga_bridges_enable(&region->bridge_list);
 	if (ret) {
-		pr_err("failed to enable region bridges\n");
+		dev_err(dev, "failed to enable region bridges\n");
 		goto err_put_br;
 	}
 
-	fpga_mgr_put(mgr);
+	fpga_mgr_unlock(region->mgr);
 	fpga_region_put(region);
 
 	return 0;
 
 err_put_br:
-	fpga_bridges_put(&region->bridge_list);
-err_put_mgr:
-	fpga_mgr_put(mgr);
+	if (region->get_bridges)
+		fpga_bridges_put(&region->bridge_list);
+err_unlock_mgr:
+	fpga_mgr_unlock(region->mgr);
 err_put_region:
 	fpga_region_put(region);
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(fpga_region_program_fpga);
 
-/**
- * child_regions_with_firmware
- * @overlay: device node of the overlay
- *
- * If the overlay adds child FPGA regions, they are not allowed to have
- * firmware-name property.
- *
- * Return 0 for OK or -EINVAL if child FPGA region adds firmware-name.
- */
-static int child_regions_with_firmware(struct device_node *overlay)
+int fpga_region_register(struct device *dev, struct fpga_region *region)
 {
-	struct device_node *child_region;
-	const char *child_firmware_name;
-	int ret = 0;
-
-	of_node_get(overlay);
-
-	child_region = of_find_matching_node(overlay, fpga_region_of_match);
-	while (child_region) {
-		if (!of_property_read_string(child_region, "firmware-name",
-					     &child_firmware_name)) {
-			ret = -EINVAL;
-			break;
-		}
-		child_region = of_find_matching_node(child_region,
-						     fpga_region_of_match);
-	}
-
-	of_node_put(child_region);
-
-	if (ret)
-		pr_err("firmware-name not allowed in child FPGA region: %pOF",
-		       child_region);
-
-	return ret;
-}
-
-/**
- * fpga_region_notify_pre_apply - pre-apply overlay notification
- *
- * @region: FPGA region that the overlay was applied to
- * @nd: overlay notification data
- *
- * Called after when an overlay targeted to a FPGA Region is about to be
- * applied.  Function will check the properties that will be added to the FPGA
- * region.  If the checks pass, it will program the FPGA.
- *
- * The checks are:
- * The overlay must add either firmware-name or external-fpga-config property
- * to the FPGA Region.
- *
- *   firmware-name         : program the FPGA
- *   external-fpga-config  : FPGA is already programmed
- *   encrypted-fpga-config : FPGA bitstream is encrypted
- *
- * The overlay can add other FPGA regions, but child FPGA regions cannot have a
- * firmware-name property since those regions don't exist yet.
- *
- * If the overlay that breaks the rules, notifier returns an error and the
- * overlay is rejected before it goes into the main tree.
- *
- * Returns 0 for success or negative error code for failure.
- */
-static int fpga_region_notify_pre_apply(struct fpga_region *region,
-					struct of_overlay_notify_data *nd)
-{
-	const char *firmware_name = NULL;
-	struct fpga_image_info *info;
-	int ret;
-
-	info = devm_kzalloc(&region->dev, sizeof(*info), GFP_KERNEL);
-	if (!info)
-		return -ENOMEM;
-
-	region->info = info;
-
-	/* Reject overlay if child FPGA Regions have firmware-name property */
-	ret = child_regions_with_firmware(nd->overlay);
-	if (ret)
-		return ret;
-
-	/* Read FPGA region properties from the overlay */
-	if (of_property_read_bool(nd->overlay, "partial-fpga-config"))
-		info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
-
-	if (of_property_read_bool(nd->overlay, "external-fpga-config"))
-		info->flags |= FPGA_MGR_EXTERNAL_CONFIG;
-
-	if (of_property_read_bool(nd->overlay, "encrypted-fpga-config"))
-		info->flags |= FPGA_MGR_ENCRYPTED_BITSTREAM;
-
-	of_property_read_string(nd->overlay, "firmware-name", &firmware_name);
-
-	of_property_read_u32(nd->overlay, "region-unfreeze-timeout-us",
-			     &info->enable_timeout_us);
-
-	of_property_read_u32(nd->overlay, "region-freeze-timeout-us",
-			     &info->disable_timeout_us);
-
-	of_property_read_u32(nd->overlay, "config-complete-timeout-us",
-			     &info->config_complete_timeout_us);
-
-	/* If FPGA was externally programmed, don't specify firmware */
-	if ((info->flags & FPGA_MGR_EXTERNAL_CONFIG) && firmware_name) {
-		pr_err("error: specified firmware and external-fpga-config");
-		return -EINVAL;
-	}
-
-	/* FPGA is already configured externally.  We're done. */
-	if (info->flags & FPGA_MGR_EXTERNAL_CONFIG)
-		return 0;
-
-	/* If we got this far, we should be programming the FPGA */
-	if (!firmware_name) {
-		pr_err("should specify firmware-name or external-fpga-config\n");
-		return -EINVAL;
-	}
-
-	return fpga_region_program_fpga(region, firmware_name, nd->overlay);
-}
-
-/**
- * fpga_region_notify_post_remove - post-remove overlay notification
- *
- * @region: FPGA region that was targeted by the overlay that was removed
- * @nd: overlay notification data
- *
- * Called after an overlay has been removed if the overlay's target was a
- * FPGA region.
- */
-static void fpga_region_notify_post_remove(struct fpga_region *region,
-					   struct of_overlay_notify_data *nd)
-{
-	fpga_bridges_disable(&region->bridge_list);
-	fpga_bridges_put(&region->bridge_list);
-	devm_kfree(&region->dev, region->info);
-	region->info = NULL;
-}
-
-/**
- * of_fpga_region_notify - reconfig notifier for dynamic DT changes
- * @nb:		notifier block
- * @action:	notifier action
- * @arg:	reconfig data
- *
- * This notifier handles programming a FPGA when a "firmware-name" property is
- * added to a fpga-region.
- *
- * Returns NOTIFY_OK or error if FPGA programming fails.
- */
-static int of_fpga_region_notify(struct notifier_block *nb,
-				 unsigned long action, void *arg)
-{
-	struct of_overlay_notify_data *nd = arg;
-	struct fpga_region *region;
-	int ret;
-
-	switch (action) {
-	case OF_OVERLAY_PRE_APPLY:
-		pr_debug("%s OF_OVERLAY_PRE_APPLY\n", __func__);
-		break;
-	case OF_OVERLAY_POST_APPLY:
-		pr_debug("%s OF_OVERLAY_POST_APPLY\n", __func__);
-		return NOTIFY_OK;       /* not for us */
-	case OF_OVERLAY_PRE_REMOVE:
-		pr_debug("%s OF_OVERLAY_PRE_REMOVE\n", __func__);
-		return NOTIFY_OK;       /* not for us */
-	case OF_OVERLAY_POST_REMOVE:
-		pr_debug("%s OF_OVERLAY_POST_REMOVE\n", __func__);
-		break;
-	default:			/* should not happen */
-		return NOTIFY_OK;
-	}
-
-	region = fpga_region_find(nd->target);
-	if (!region)
-		return NOTIFY_OK;
-
-	ret = 0;
-	switch (action) {
-	case OF_OVERLAY_PRE_APPLY:
-		ret = fpga_region_notify_pre_apply(region, nd);
-		break;
-
-	case OF_OVERLAY_POST_REMOVE:
-		fpga_region_notify_post_remove(region, nd);
-		break;
-	}
-
-	put_device(&region->dev);
-
-	if (ret)
-		return notifier_from_errno(ret);
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block fpga_region_of_nb = {
-	.notifier_call = of_fpga_region_notify,
-};
-
-static int fpga_region_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct device_node *np = dev->of_node;
-	struct fpga_region *region;
 	int id, ret = 0;
 
-	region = kzalloc(sizeof(*region), GFP_KERNEL);
-	if (!region)
-		return -ENOMEM;
-
 	id = ida_simple_get(&fpga_region_ida, 0, 0, GFP_KERNEL);
-	if (id < 0) {
-		ret = id;
-		goto err_kfree;
-	}
+	if (id < 0)
+		return id;
 
 	mutex_init(&region->mutex);
 	INIT_LIST_HEAD(&region->bridge_list);
-
 	device_initialize(&region->dev);
+	region->dev.groups = region->groups;
 	region->dev.class = fpga_region_class;
 	region->dev.parent = dev;
-	region->dev.of_node = np;
+	region->dev.of_node = dev->of_node;
 	region->dev.id = id;
 	dev_set_drvdata(dev, region);
 
@@ -524,44 +188,27 @@ static int fpga_region_probe(struct platform_device *pdev)
 	if (ret)
 		goto err_remove;
 
-	of_platform_populate(np, fpga_region_of_match, NULL, &region->dev);
-
-	dev_info(dev, "FPGA Region probed\n");
-
 	return 0;
 
 err_remove:
 	ida_simple_remove(&fpga_region_ida, id);
-err_kfree:
-	kfree(region);
-
 	return ret;
 }
+EXPORT_SYMBOL_GPL(fpga_region_register);
 
-static int fpga_region_remove(struct platform_device *pdev)
+int fpga_region_unregister(struct fpga_region *region)
 {
-	struct fpga_region *region = platform_get_drvdata(pdev);
-
 	device_unregister(&region->dev);
 
 	return 0;
 }
-
-static struct platform_driver fpga_region_driver = {
-	.probe = fpga_region_probe,
-	.remove = fpga_region_remove,
-	.driver = {
-		.name	= "fpga-region",
-		.of_match_table = of_match_ptr(fpga_region_of_match),
-	},
-};
+EXPORT_SYMBOL_GPL(fpga_region_unregister);
 
 static void fpga_region_dev_release(struct device *dev)
 {
 	struct fpga_region *region = to_fpga_region(dev);
 
 	ida_simple_remove(&fpga_region_ida, region->dev.id);
-	kfree(region);
 }
 
 /**
@@ -570,36 +217,17 @@ static void fpga_region_dev_release(struct device *dev)
  */
 static int __init fpga_region_init(void)
 {
-	int ret;
-
 	fpga_region_class = class_create(THIS_MODULE, "fpga_region");
 	if (IS_ERR(fpga_region_class))
 		return PTR_ERR(fpga_region_class);
 
 	fpga_region_class->dev_release = fpga_region_dev_release;
 
-	ret = of_overlay_notifier_register(&fpga_region_of_nb);
-	if (ret)
-		goto err_class;
-
-	ret = platform_driver_register(&fpga_region_driver);
-	if (ret)
-		goto err_plat;
-
 	return 0;
-
-err_plat:
-	of_overlay_notifier_unregister(&fpga_region_of_nb);
-err_class:
-	class_destroy(fpga_region_class);
-	ida_destroy(&fpga_region_ida);
-	return ret;
 }
 
 static void __exit fpga_region_exit(void)
 {
-	platform_driver_unregister(&fpga_region_driver);
-	of_overlay_notifier_unregister(&fpga_region_of_nb);
 	class_destroy(fpga_region_class);
 	ida_destroy(&fpga_region_ida);
 }
@@ -608,5 +236,5 @@ subsys_initcall(fpga_region_init);
 module_exit(fpga_region_exit);
 
 MODULE_DESCRIPTION("FPGA Region");
-MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_AUTHOR("Alan Tull <atull@kernel.org>");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
new file mode 100644
index 0000000..119ff75
--- /dev/null
+++ b/drivers/fpga/of-fpga-region.c
@@ -0,0 +1,504 @@
+/*
+ * FPGA Region - Device Tree support for FPGA programming under Linux
+ *
+ *  Copyright (C) 2013-2016 Altera Corporation
+ *  Copyright (C) 2017 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/fpga/fpga-region.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+static const struct of_device_id fpga_region_of_match[] = {
+	{ .compatible = "fpga-region", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, fpga_region_of_match);
+
+static int fpga_region_of_node_match(struct device *dev, const void *data)
+{
+	return dev->of_node == data;
+}
+
+/**
+ * of_fpga_region_find - find FPGA region
+ * @np: device node of FPGA Region
+ *
+ * Caller will need to put_device(&region->dev) when done.
+ *
+ * Returns FPGA Region struct or NULL
+ */
+static struct fpga_region *of_fpga_region_find(struct device_node *np)
+{
+	return fpga_region_class_find(NULL, np, fpga_region_of_node_match);
+}
+
+/**
+ * of_fpga_region_get_mgr - get reference for FPGA manager
+ * @np: device node of FPGA region
+ *
+ * Get FPGA Manager from "fpga-mgr" property or from ancestor region.
+ *
+ * Caller should call fpga_mgr_put() when done with manager.
+ *
+ * Return: fpga manager struct or IS_ERR() condition containing error code.
+ */
+static struct fpga_manager *of_fpga_region_get_mgr(struct device_node *np)
+{
+	struct device_node  *mgr_node;
+	struct fpga_manager *mgr;
+
+	of_node_get(np);
+	while (np) {
+		if (of_device_is_compatible(np, "fpga-region")) {
+			mgr_node = of_parse_phandle(np, "fpga-mgr", 0);
+			if (mgr_node) {
+				mgr = of_fpga_mgr_get(mgr_node);
+				of_node_put(mgr_node);
+				of_node_put(np);
+				return mgr;
+			}
+		}
+		np = of_get_next_parent(np);
+	}
+	of_node_put(np);
+
+	return ERR_PTR(-EINVAL);
+}
+
+/**
+ * of_fpga_region_get_bridges - create a list of bridges
+ * @region: FPGA region
+ *
+ * Create a list of bridges including the parent bridge and the bridges
+ * specified by "fpga-bridges" property.  Note that the
+ * fpga_bridges_enable/disable/put functions are all fine with an empty list
+ * if that happens.
+ *
+ * Caller should call fpga_bridges_put(&region->bridge_list) when
+ * done with the bridges.
+ *
+ * Return 0 for success (even if there are no bridges specified)
+ * or -EBUSY if any of the bridges are in use.
+ */
+static int of_fpga_region_get_bridges(struct fpga_region *region)
+{
+	struct device *dev = &region->dev;
+	struct device_node *region_np = dev->of_node;
+	struct fpga_image_info *info = region->info;
+	struct device_node *br, *np, *parent_br = NULL;
+	int i, ret;
+
+	/* If parent is a bridge, add to list */
+	ret = of_fpga_bridge_get_to_list(region_np->parent, info,
+					 &region->bridge_list);
+
+	/* -EBUSY means parent is a bridge that is under use. Give up. */
+	if (ret == -EBUSY)
+		return ret;
+
+	/* Zero return code means parent was a bridge and was added to list. */
+	if (!ret)
+		parent_br = region_np->parent;
+
+	/* If overlay has a list of bridges, use it. */
+	br = of_parse_phandle(info->overlay, "fpga-bridges", 0);
+	if (br) {
+		of_node_put(br);
+		np = info->overlay;
+	} else {
+		np = region_np;
+	}
+
+	for (i = 0; ; i++) {
+		br = of_parse_phandle(np, "fpga-bridges", i);
+		if (!br)
+			break;
+
+		/* If parent bridge is in list, skip it. */
+		if (br == parent_br) {
+			of_node_put(br);
+			continue;
+		}
+
+		/* If node is a bridge, get it and add to list */
+		ret = of_fpga_bridge_get_to_list(br, info,
+						 &region->bridge_list);
+		of_node_put(br);
+
+		/* If any of the bridges are in use, give up */
+		if (ret == -EBUSY) {
+			fpga_bridges_put(&region->bridge_list);
+			return -EBUSY;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * child_regions_with_firmware
+ * @overlay: device node of the overlay
+ *
+ * If the overlay adds child FPGA regions, they are not allowed to have
+ * firmware-name property.
+ *
+ * Return 0 for OK or -EINVAL if child FPGA region adds firmware-name.
+ */
+static int child_regions_with_firmware(struct device_node *overlay)
+{
+	struct device_node *child_region;
+	const char *child_firmware_name;
+	int ret = 0;
+
+	of_node_get(overlay);
+
+	child_region = of_find_matching_node(overlay, fpga_region_of_match);
+	while (child_region) {
+		if (!of_property_read_string(child_region, "firmware-name",
+					     &child_firmware_name)) {
+			ret = -EINVAL;
+			break;
+		}
+		child_region = of_find_matching_node(child_region,
+						     fpga_region_of_match);
+	}
+
+	of_node_put(child_region);
+
+	if (ret)
+		pr_err("firmware-name not allowed in child FPGA region: %pOF",
+		       child_region);
+
+	return ret;
+}
+
+/**
+ * of_fpga_region_parse_ov - parse and check overlay applied to region
+ *
+ * @region: FPGA region
+ * @overlay: overlay applied to the FPGA region
+ *
+ * Given an overlay applied to a FPGA region, parse the FPGA image specific
+ * info in the overlay and do some checking.
+ *
+ * Returns:
+ *   NULL if overlay doesn't direct us to program the FPGA.
+ *   fpga_image_info struct if there is an image to program.
+ *   error code for invalid overlay.
+ */
+static struct fpga_image_info *of_fpga_region_parse_ov(
+						struct fpga_region *region,
+						struct device_node *overlay)
+{
+	struct device *dev = &region->dev;
+	struct fpga_image_info *info;
+	const char *firmware_name;
+	int ret;
+
+	if (region->info) {
+		dev_err(dev, "Region already has overlay applied.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	/*
+	 * Reject overlay if child FPGA Regions added in the overlay have
+	 * firmware-name property (would mean that an FPGA region that has
+	 * not been added to the live tree yet is doing FPGA programming).
+	 */
+	ret = child_regions_with_firmware(overlay);
+	if (ret)
+		return ERR_PTR(ret);
+
+	info = fpga_image_info_alloc(dev);
+	if (!info)
+		return ERR_PTR(-ENOMEM);
+
+	info->overlay = overlay;
+
+	/* Read FPGA region properties from the overlay */
+	if (of_property_read_bool(overlay, "partial-fpga-config"))
+		info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
+
+	if (of_property_read_bool(overlay, "external-fpga-config"))
+		info->flags |= FPGA_MGR_EXTERNAL_CONFIG;
+
+	if (of_property_read_bool(overlay, "encrypted-fpga-config"))
+		info->flags |= FPGA_MGR_ENCRYPTED_BITSTREAM;
+
+	if (!of_property_read_string(overlay, "firmware-name",
+				     &firmware_name)) {
+		info->firmware_name = devm_kstrdup(dev, firmware_name,
+						   GFP_KERNEL);
+		if (!info->firmware_name)
+			return ERR_PTR(-ENOMEM);
+	}
+
+	of_property_read_u32(overlay, "region-unfreeze-timeout-us",
+			     &info->enable_timeout_us);
+
+	of_property_read_u32(overlay, "region-freeze-timeout-us",
+			     &info->disable_timeout_us);
+
+	of_property_read_u32(overlay, "config-complete-timeout-us",
+			     &info->config_complete_timeout_us);
+
+	/* If overlay is not programming the FPGA, don't need FPGA image info */
+	if (!info->firmware_name) {
+		ret = 0;
+		goto ret_no_info;
+	}
+
+	/*
+	 * If overlay informs us FPGA was externally programmed, specifying
+	 * firmware here would be ambiguous.
+	 */
+	if (info->flags & FPGA_MGR_EXTERNAL_CONFIG) {
+		dev_err(dev, "error: specified firmware and external-fpga-config");
+		ret = -EINVAL;
+		goto ret_no_info;
+	}
+
+	return info;
+ret_no_info:
+	fpga_image_info_free(info);
+	return ERR_PTR(ret);
+}
+
+/**
+ * of_fpga_region_notify_pre_apply - pre-apply overlay notification
+ *
+ * @region: FPGA region that the overlay was applied to
+ * @nd: overlay notification data
+ *
+ * Called when an overlay targeted to a FPGA Region is about to be applied.
+ * Parses the overlay for properties that influence how the FPGA will be
+ * programmed and does some checking. If the checks pass, programs the FPGA.
+ * If the checks fail, overlay is rejected and does not get added to the
+ * live tree.
+ *
+ * Returns 0 for success or negative error code for failure.
+ */
+static int of_fpga_region_notify_pre_apply(struct fpga_region *region,
+					   struct of_overlay_notify_data *nd)
+{
+	struct device *dev = &region->dev;
+	struct fpga_image_info *info;
+	int ret;
+
+	info = of_fpga_region_parse_ov(region, nd->overlay);
+	if (IS_ERR(info))
+		return PTR_ERR(info);
+
+	/* If overlay doesn't program the FPGA, accept it anyway. */
+	if (!info)
+		return 0;
+
+	if (region->info) {
+		dev_err(dev, "Region already has overlay applied.\n");
+		return -EINVAL;
+	}
+
+	region->info = info;
+	ret = fpga_region_program_fpga(region);
+	if (ret) {
+		/* error; reject overlay */
+		fpga_image_info_free(info);
+		region->info = NULL;
+	}
+
+	return ret;
+}
+
+/**
+ * of_fpga_region_notify_post_remove - post-remove overlay notification
+ *
+ * @region: FPGA region that was targeted by the overlay that was removed
+ * @nd: overlay notification data
+ *
+ * Called after an overlay has been removed if the overlay's target was a
+ * FPGA region.
+ */
+static void of_fpga_region_notify_post_remove(struct fpga_region *region,
+					      struct of_overlay_notify_data *nd)
+{
+	fpga_bridges_disable(&region->bridge_list);
+	fpga_bridges_put(&region->bridge_list);
+	fpga_image_info_free(region->info);
+	region->info = NULL;
+}
+
+/**
+ * of_fpga_region_notify - reconfig notifier for dynamic DT changes
+ * @nb:		notifier block
+ * @action:	notifier action
+ * @arg:	reconfig data
+ *
+ * This notifier handles programming a FPGA when a "firmware-name" property is
+ * added to a fpga-region.
+ *
+ * Returns NOTIFY_OK or error if FPGA programming fails.
+ */
+static int of_fpga_region_notify(struct notifier_block *nb,
+				 unsigned long action, void *arg)
+{
+	struct of_overlay_notify_data *nd = arg;
+	struct fpga_region *region;
+	int ret;
+
+	switch (action) {
+	case OF_OVERLAY_PRE_APPLY:
+		pr_debug("%s OF_OVERLAY_PRE_APPLY\n", __func__);
+		break;
+	case OF_OVERLAY_POST_APPLY:
+		pr_debug("%s OF_OVERLAY_POST_APPLY\n", __func__);
+		return NOTIFY_OK;       /* not for us */
+	case OF_OVERLAY_PRE_REMOVE:
+		pr_debug("%s OF_OVERLAY_PRE_REMOVE\n", __func__);
+		return NOTIFY_OK;       /* not for us */
+	case OF_OVERLAY_POST_REMOVE:
+		pr_debug("%s OF_OVERLAY_POST_REMOVE\n", __func__);
+		break;
+	default:			/* should not happen */
+		return NOTIFY_OK;
+	}
+
+	region = of_fpga_region_find(nd->target);
+	if (!region)
+		return NOTIFY_OK;
+
+	ret = 0;
+	switch (action) {
+	case OF_OVERLAY_PRE_APPLY:
+		ret = of_fpga_region_notify_pre_apply(region, nd);
+		break;
+
+	case OF_OVERLAY_POST_REMOVE:
+		of_fpga_region_notify_post_remove(region, nd);
+		break;
+	}
+
+	put_device(&region->dev);
+
+	if (ret)
+		return notifier_from_errno(ret);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block fpga_region_of_nb = {
+	.notifier_call = of_fpga_region_notify,
+};
+
+static int of_fpga_region_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct fpga_region *region;
+	struct fpga_manager *mgr;
+	int ret;
+
+	/* Find the FPGA mgr specified by region or parent region. */
+	mgr = of_fpga_region_get_mgr(np);
+	if (IS_ERR(mgr))
+		return -EPROBE_DEFER;
+
+	region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
+	if (!region) {
+		ret = -ENOMEM;
+		goto eprobe_mgr_put;
+	}
+
+	region->mgr = mgr;
+
+	/* Specify how to get bridges for this type of region. */
+	region->get_bridges = of_fpga_region_get_bridges;
+
+	ret = fpga_region_register(dev, region);
+	if (ret)
+		goto eprobe_mgr_put;
+
+	of_platform_populate(np, fpga_region_of_match, NULL, &region->dev);
+
+	dev_info(dev, "FPGA Region probed\n");
+
+	return 0;
+
+eprobe_mgr_put:
+	fpga_mgr_put(mgr);
+	return ret;
+}
+
+static int of_fpga_region_remove(struct platform_device *pdev)
+{
+	struct fpga_region *region = platform_get_drvdata(pdev);
+
+	fpga_region_unregister(region);
+	fpga_mgr_put(region->mgr);
+
+	return 0;
+}
+
+static struct platform_driver of_fpga_region_driver = {
+	.probe = of_fpga_region_probe,
+	.remove = of_fpga_region_remove,
+	.driver = {
+		.name	= "of-fpga-region",
+		.of_match_table = of_match_ptr(fpga_region_of_match),
+	},
+};
+
+/**
+ * fpga_region_init - init function for fpga_region class
+ * Creates the fpga_region class and registers a reconfig notifier.
+ */
+static int __init of_fpga_region_init(void)
+{
+	int ret;
+
+	ret = of_overlay_notifier_register(&fpga_region_of_nb);
+	if (ret)
+		return ret;
+
+	ret = platform_driver_register(&of_fpga_region_driver);
+	if (ret)
+		goto err_plat;
+
+	return 0;
+
+err_plat:
+	of_overlay_notifier_unregister(&fpga_region_of_nb);
+	return ret;
+}
+
+static void __exit of_fpga_region_exit(void)
+{
+	platform_driver_unregister(&of_fpga_region_driver);
+	of_overlay_notifier_unregister(&fpga_region_of_nb);
+}
+
+subsys_initcall(of_fpga_region_init);
+module_exit(of_fpga_region_exit);
+
+MODULE_DESCRIPTION("FPGA Region");
+MODULE_AUTHOR("Alan Tull <atull@kernel.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/socfpga-a10.c b/drivers/fpga/socfpga-a10.c
index f8770af..a46e343 100644
--- a/drivers/fpga/socfpga-a10.c
+++ b/drivers/fpga/socfpga-a10.c
@@ -519,8 +519,14 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev)
 		return -EBUSY;
 	}
 
-	return fpga_mgr_register(dev, "SoCFPGA Arria10 FPGA Manager",
+	ret = fpga_mgr_register(dev, "SoCFPGA Arria10 FPGA Manager",
 				 &socfpga_a10_fpga_mgr_ops, priv);
+	if (ret) {
+		clk_disable_unprepare(priv->clk);
+		return ret;
+	}
+
+	return 0;
 }
 
 static int socfpga_a10_fpga_remove(struct platform_device *pdev)
diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
index 6821ed0..513e351 100644
--- a/drivers/fsi/Kconfig
+++ b/drivers/fsi/Kconfig
@@ -2,9 +2,7 @@
 # FSI subsystem
 #
 
-menu "FSI support"
-
-config FSI
+menuconfig FSI
 	tristate "FSI support"
 	select CRC4
 	---help---
@@ -34,5 +32,3 @@
 	This option enables an FSI based SCOM device driver.
 
 endif
-
-endmenu
diff --git a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
index 8f4357e..043da86 100644
--- a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
@@ -163,10 +163,8 @@ static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
 	desc.dev = &adev->dev;
 	desc.groups = replicator_groups;
 	drvdata->csdev = coresight_register(&desc);
-	if (IS_ERR(drvdata->csdev))
-		return PTR_ERR(drvdata->csdev);
 
-	return 0;
+	return PTR_ERR_OR_ZERO(drvdata->csdev);
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index e03e589..580cd38 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -33,7 +33,6 @@
 #include <linux/mm.h>
 #include <linux/perf_event.h>
 
-#include <asm/local.h>
 
 #include "coresight-priv.h"
 
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index fd3c396..9f8ac0be 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -214,10 +214,8 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
 	desc.dev = dev;
 	desc.groups = coresight_funnel_groups;
 	drvdata->csdev = coresight_register(&desc);
-	if (IS_ERR(drvdata->csdev))
-		return PTR_ERR(drvdata->csdev);
 
-	return 0;
+	return PTR_ERR_OR_ZERO(drvdata->csdev);
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index bef49a3..805f7c2 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -46,8 +46,11 @@
 #define TPIU_ITATBCTR0		0xef8
 
 /** register definition **/
+/* FFSR - 0x300 */
+#define FFSR_FT_STOPPED		BIT(1)
 /* FFCR - 0x304 */
 #define FFCR_FON_MAN		BIT(6)
+#define FFCR_STOP_FI		BIT(12)
 
 /**
  * @base:	memory mapped base address for this component.
@@ -85,10 +88,14 @@ static void tpiu_disable_hw(struct tpiu_drvdata *drvdata)
 {
 	CS_UNLOCK(drvdata->base);
 
-	/* Clear formatter controle reg. */
-	writel_relaxed(0x0, drvdata->base + TPIU_FFCR);
+	/* Clear formatter and stop on flush */
+	writel_relaxed(FFCR_STOP_FI, drvdata->base + TPIU_FFCR);
 	/* Generate manual flush */
-	writel_relaxed(FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
+	writel_relaxed(FFCR_STOP_FI | FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
+	/* Wait for flush to complete */
+	coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN, 0);
+	/* Wait for formatter to stop */
+	coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED, 1);
 
 	CS_LOCK(drvdata->base);
 }
@@ -160,10 +167,8 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
 	desc.pdata = pdata;
 	desc.dev = dev;
 	drvdata->csdev = coresight_register(&desc);
-	if (IS_ERR(drvdata->csdev))
-		return PTR_ERR(drvdata->csdev);
 
-	return 0;
+	return PTR_ERR_OR_ZERO(drvdata->csdev);
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index b8091be..389c4ba 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -843,32 +843,17 @@ static void coresight_fixup_orphan_conns(struct coresight_device *csdev)
 }
 
 
-static int coresight_name_match(struct device *dev, void *data)
-{
-	char *to_match;
-	struct coresight_device *i_csdev;
-
-	to_match = data;
-	i_csdev = to_coresight_device(dev);
-
-	if (to_match && !strcmp(to_match, dev_name(&i_csdev->dev)))
-		return 1;
-
-	return 0;
-}
-
 static void coresight_fixup_device_conns(struct coresight_device *csdev)
 {
 	int i;
-	struct device *dev = NULL;
-	struct coresight_connection *conn;
 
 	for (i = 0; i < csdev->nr_outport; i++) {
-		conn = &csdev->conns[i];
-		dev = bus_find_device(&coresight_bustype, NULL,
-				      (void *)conn->child_name,
-				      coresight_name_match);
+		struct coresight_connection *conn = &csdev->conns[i];
+		struct device *dev = NULL;
 
+		if (conn->child_name)
+			dev = bus_find_device_by_name(&coresight_bustype, NULL,
+						      conn->child_name);
 		if (dev) {
 			conn->child_dev = to_coresight_device(dev);
 			/* and put reference from 'bus_find_device()' */
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index fe16727..bc591b7 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -3,7 +3,7 @@
  * Copyright (c) 2009-2010 Analog Devices, Inc.
  * Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
  *
- * DEVID		#Wipers		#Positions 	Resistor Options (kOhm)
+ * DEVID		#Wipers		#Positions	Resistor Options (kOhm)
  * AD5258		1		64		1, 10, 50, 100
  * AD5259		1		256		5, 10, 50, 100
  * AD5251		2		64		1, 10, 50, 100
@@ -84,12 +84,12 @@
 struct dpot_data {
 	struct ad_dpot_bus_data	bdata;
 	struct mutex update_lock;
-	unsigned rdac_mask;
-	unsigned max_pos;
+	unsigned int rdac_mask;
+	unsigned int max_pos;
 	unsigned long devid;
-	unsigned uid;
-	unsigned feat;
-	unsigned wipers;
+	unsigned int uid;
+	unsigned int feat;
+	unsigned int wipers;
 	u16 rdac_cache[MAX_RDACS];
 	DECLARE_BITMAP(otp_en_mask, MAX_RDACS);
 };
@@ -126,7 +126,7 @@ static inline int dpot_write_r8d16(struct dpot_data *dpot, u8 reg, u16 val)
 
 static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg)
 {
-	unsigned ctrl = 0;
+	unsigned int ctrl = 0;
 	int value;
 
 	if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) {
@@ -175,7 +175,7 @@ static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg)
 static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
 {
 	int value;
-	unsigned ctrl = 0;
+	unsigned int ctrl = 0;
 
 	switch (dpot->uid) {
 	case DPOT_UID(AD5246_ID):
@@ -238,7 +238,7 @@ static s32 dpot_read(struct dpot_data *dpot, u8 reg)
 
 static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
 {
-	unsigned val = 0;
+	unsigned int val = 0;
 
 	if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD | DPOT_ADDR_OTP))) {
 		if (dpot->feat & F_RDACS_WONLY)
@@ -328,7 +328,7 @@ static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
 static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
 {
 	/* Only write the instruction byte for certain commands */
-	unsigned tmp = 0, ctrl = 0;
+	unsigned int tmp = 0, ctrl = 0;
 
 	switch (dpot->uid) {
 	case DPOT_UID(AD5246_ID):
@@ -515,11 +515,11 @@ set_##_name(struct device *dev, \
 #define DPOT_DEVICE_SHOW_SET(name, reg) \
 DPOT_DEVICE_SHOW(name, reg) \
 DPOT_DEVICE_SET(name, reg) \
-static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, set_##name);
+static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, set_##name)
 
 #define DPOT_DEVICE_SHOW_ONLY(name, reg) \
 DPOT_DEVICE_SHOW(name, reg) \
-static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, NULL);
+static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, NULL)
 
 DPOT_DEVICE_SHOW_SET(rdac0, DPOT_ADDR_RDAC | DPOT_RDAC0);
 DPOT_DEVICE_SHOW_SET(eeprom0, DPOT_ADDR_EEPROM | DPOT_RDAC0);
@@ -616,7 +616,7 @@ set_##_name(struct device *dev, \
 { \
 	return sysfs_do_cmd(dev, attr, buf, count, _cmd); \
 } \
-static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, NULL, set_##_name);
+static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, NULL, set_##_name)
 
 DPOT_DEVICE_DO_CMD(inc_all, DPOT_INC_ALL);
 DPOT_DEVICE_DO_CMD(dec_all, DPOT_DEC_ALL);
@@ -636,7 +636,7 @@ static const struct attribute_group ad525x_group_commands = {
 };
 
 static int ad_dpot_add_files(struct device *dev,
-		unsigned features, unsigned rdac)
+		unsigned int features, unsigned int rdac)
 {
 	int err = sysfs_create_file(&dev->kobj,
 		dpot_attrib_wipers[rdac]);
@@ -661,7 +661,7 @@ static int ad_dpot_add_files(struct device *dev,
 }
 
 static inline void ad_dpot_remove_files(struct device *dev,
-		unsigned features, unsigned rdac)
+		unsigned int features, unsigned int rdac)
 {
 	sysfs_remove_file(&dev->kobj,
 		dpot_attrib_wipers[rdac]);
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h
index 6bd1eba..443a51f 100644
--- a/drivers/misc/ad525x_dpot.h
+++ b/drivers/misc/ad525x_dpot.h
@@ -195,12 +195,12 @@ enum dpot_devid {
 struct dpot_data;
 
 struct ad_dpot_bus_ops {
-	int (*read_d8) (void *client);
-	int (*read_r8d8) (void *client, u8 reg);
-	int (*read_r8d16) (void *client, u8 reg);
-	int (*write_d8) (void *client, u8 val);
-	int (*write_r8d8) (void *client, u8 reg, u8 val);
-	int (*write_r8d16) (void *client, u8 reg, u16 val);
+	int (*read_d8)(void *client);
+	int (*read_r8d8)(void *client, u8 reg);
+	int (*read_r8d16)(void *client, u8 reg);
+	int (*write_d8)(void *client, u8 val);
+	int (*write_r8d8)(void *client, u8 reg, u8 val);
+	int (*write_r8d16)(void *client, u8 reg, u16 val);
 };
 
 struct ad_dpot_bus_data {
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index c9f0703..ed9412d 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -715,6 +715,7 @@ static ssize_t apds990x_rate_avail(struct device *dev,
 {
 	int i;
 	int pos = 0;
+
 	for (i = 0; i < ARRAY_SIZE(arates_hz); i++)
 		pos += sprintf(buf + pos, "%d ", arates_hz[i]);
 	sprintf(buf + pos - 1, "\n");
@@ -725,6 +726,7 @@ static ssize_t apds990x_rate_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
 	struct apds990x_chip *chip =  dev_get_drvdata(dev);
+
 	return sprintf(buf, "%d\n", chip->arate);
 }
 
@@ -784,6 +786,7 @@ static ssize_t apds990x_prox_show(struct device *dev,
 {
 	ssize_t ret;
 	struct apds990x_chip *chip =  dev_get_drvdata(dev);
+
 	if (pm_runtime_suspended(dev) || !chip->prox_en)
 		return -EIO;
 
@@ -807,6 +810,7 @@ static ssize_t apds990x_prox_enable_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
 	struct apds990x_chip *chip =  dev_get_drvdata(dev);
+
 	return sprintf(buf, "%d\n", chip->prox_en);
 }
 
@@ -847,6 +851,7 @@ static ssize_t apds990x_prox_reporting_mode_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
 	struct apds990x_chip *chip =  dev_get_drvdata(dev);
+
 	return sprintf(buf, "%s\n",
 		reporting_modes[!!chip->prox_continuous_mode]);
 }
@@ -884,6 +889,7 @@ static ssize_t apds990x_lux_thresh_above_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
 	struct apds990x_chip *chip =  dev_get_drvdata(dev);
+
 	return sprintf(buf, "%d\n", chip->lux_thres_hi);
 }
 
@@ -891,6 +897,7 @@ static ssize_t apds990x_lux_thresh_below_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
 	struct apds990x_chip *chip =  dev_get_drvdata(dev);
+
 	return sprintf(buf, "%d\n", chip->lux_thres_lo);
 }
 
@@ -926,6 +933,7 @@ static ssize_t apds990x_lux_thresh_above_store(struct device *dev,
 {
 	struct apds990x_chip *chip =  dev_get_drvdata(dev);
 	int ret = apds990x_set_lux_thresh(chip, &chip->lux_thres_hi, buf);
+
 	if (ret < 0)
 		return ret;
 	return len;
@@ -937,6 +945,7 @@ static ssize_t apds990x_lux_thresh_below_store(struct device *dev,
 {
 	struct apds990x_chip *chip =  dev_get_drvdata(dev);
 	int ret = apds990x_set_lux_thresh(chip, &chip->lux_thres_lo, buf);
+
 	if (ret < 0)
 		return ret;
 	return len;
@@ -954,6 +963,7 @@ static ssize_t apds990x_prox_threshold_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
 	struct apds990x_chip *chip =  dev_get_drvdata(dev);
+
 	return sprintf(buf, "%d\n", chip->prox_thres);
 }
 
@@ -1026,6 +1036,7 @@ static ssize_t apds990x_chip_id_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
 	struct apds990x_chip *chip =  dev_get_drvdata(dev);
+
 	return sprintf(buf, "%s %d\n", chip->chipname, chip->revision);
 }
 
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 5afe4cd..9282ffd 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -276,6 +276,9 @@ static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip)
 			return -ENODEV;
 		}
 		switch (val) {
+		case 9:
+			chip->flags |= EE_INSTR_BIT3_IS_ADDR;
+			/* fall through */
 		case 8:
 			chip->flags |= EE_ADDR1;
 			break;
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index eb29113..5a17bfe 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -468,7 +468,7 @@ static struct class enclosure_class = {
 	.dev_groups		= enclosure_class_groups,
 };
 
-static const char *const enclosure_status [] = {
+static const char *const enclosure_status[] = {
 	[ENCLOSURE_STATUS_UNSUPPORTED] = "unsupported",
 	[ENCLOSURE_STATUS_OK] = "OK",
 	[ENCLOSURE_STATUS_CRITICAL] = "critical",
@@ -480,7 +480,7 @@ static const char *const enclosure_status [] = {
 	[ENCLOSURE_STATUS_MAX] = NULL,
 };
 
-static const char *const enclosure_type [] = {
+static const char *const enclosure_type[] = {
 	[ENCLOSURE_COMPONENT_DEVICE] = "device",
 	[ENCLOSURE_COMPONENT_ARRAY_DEVICE] = "array device",
 };
@@ -680,13 +680,7 @@ ATTRIBUTE_GROUPS(enclosure_component);
 
 static int __init enclosure_init(void)
 {
-	int err;
-
-	err = class_register(&enclosure_class);
-	if (err)
-		return err;
-
-	return 0;
+	return class_register(&enclosure_class);
 }
 
 static void __exit enclosure_exit(void)
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
index 4fd21e8..c7cd367 100644
--- a/drivers/misc/genwqe/card_base.c
+++ b/drivers/misc/genwqe/card_base.c
@@ -153,11 +153,11 @@ static struct genwqe_dev *genwqe_dev_alloc(void)
 	cd->card_state = GENWQE_CARD_UNUSED;
 	spin_lock_init(&cd->print_lock);
 
-	cd->ddcb_software_timeout = genwqe_ddcb_software_timeout;
-	cd->kill_timeout = genwqe_kill_timeout;
+	cd->ddcb_software_timeout = GENWQE_DDCB_SOFTWARE_TIMEOUT;
+	cd->kill_timeout = GENWQE_KILL_TIMEOUT;
 
 	for (j = 0; j < GENWQE_MAX_VFS; j++)
-		cd->vf_jobtimeout_msec[j] = genwqe_vf_jobtimeout_msec;
+		cd->vf_jobtimeout_msec[j] = GENWQE_VF_JOBTIMEOUT_MSEC;
 
 	genwqe_devices[i] = cd;
 	return cd;
@@ -324,11 +324,11 @@ static bool genwqe_setup_pf_jtimer(struct genwqe_dev *cd)
 	u32 T = genwqe_T_psec(cd);
 	u64 x;
 
-	if (genwqe_pf_jobtimeout_msec == 0)
+	if (GENWQE_PF_JOBTIMEOUT_MSEC == 0)
 		return false;
 
 	/* PF: large value needed, flash update 2sec per block */
-	x = ilog2(genwqe_pf_jobtimeout_msec *
+	x = ilog2(GENWQE_PF_JOBTIMEOUT_MSEC *
 		  16000000000uL/(T * 15)) - 10;
 
 	genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT,
@@ -904,7 +904,7 @@ static int genwqe_reload_bistream(struct genwqe_dev *cd)
  *   b) a critical GFIR occured
  *
  * Informational GFIRs are checked and potentially printed in
- * health_check_interval seconds.
+ * GENWQE_HEALTH_CHECK_INTERVAL seconds.
  */
 static int genwqe_health_thread(void *data)
 {
@@ -918,7 +918,7 @@ static int genwqe_health_thread(void *data)
 		rc = wait_event_interruptible_timeout(cd->health_waitq,
 			 (genwqe_health_check_cond(cd, &gfir) ||
 			  (should_stop = kthread_should_stop())),
-				genwqe_health_check_interval * HZ);
+				GENWQE_HEALTH_CHECK_INTERVAL * HZ);
 
 		if (should_stop)
 			break;
@@ -1028,7 +1028,7 @@ static int genwqe_health_check_start(struct genwqe_dev *cd)
 {
 	int rc;
 
-	if (genwqe_health_check_interval <= 0)
+	if (GENWQE_HEALTH_CHECK_INTERVAL <= 0)
 		return 0;	/* valid for disabling the service */
 
 	/* moved before request_irq() */
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
index 3743c87f..1c3967f 100644
--- a/drivers/misc/genwqe/card_base.h
+++ b/drivers/misc/genwqe/card_base.h
@@ -47,13 +47,13 @@
 #define GENWQE_CARD_NO_MAX		(16 * GENWQE_MAX_FUNCS)
 
 /* Compile parameters, some of them appear in debugfs for later adjustment */
-#define genwqe_ddcb_max			32 /* DDCBs on the work-queue */
-#define genwqe_polling_enabled		0  /* in case of irqs not working */
-#define genwqe_ddcb_software_timeout	10 /* timeout per DDCB in seconds */
-#define genwqe_kill_timeout		8  /* time until process gets killed */
-#define genwqe_vf_jobtimeout_msec	250  /* 250 msec */
-#define genwqe_pf_jobtimeout_msec	8000 /* 8 sec should be ok */
-#define genwqe_health_check_interval	4 /* <= 0: disabled */
+#define GENWQE_DDCB_MAX			32 /* DDCBs on the work-queue */
+#define GENWQE_POLLING_ENABLED		0  /* in case of irqs not working */
+#define GENWQE_DDCB_SOFTWARE_TIMEOUT	10 /* timeout per DDCB in seconds */
+#define GENWQE_KILL_TIMEOUT		8  /* time until process gets killed */
+#define GENWQE_VF_JOBTIMEOUT_MSEC	250  /* 250 msec */
+#define GENWQE_PF_JOBTIMEOUT_MSEC	8000 /* 8 sec should be ok */
+#define GENWQE_HEALTH_CHECK_INTERVAL	4 /* <= 0: disabled */
 
 /* Sysfs attribute groups used when we create the genwqe device */
 extern const struct attribute_group *genwqe_attribute_groups[];
@@ -490,11 +490,9 @@ int  genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len);
 
 /* Memory allocation/deallocation; dma address handling */
 int  genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m,
-		      void *uaddr, unsigned long size,
-		      struct ddcb_requ *req);
+		      void *uaddr, unsigned long size);
 
-int  genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
-			struct ddcb_requ *req);
+int  genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m);
 
 static inline bool dma_mapping_used(struct dma_mapping *m)
 {
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
index ddfeefe..b7f8d35 100644
--- a/drivers/misc/genwqe/card_ddcb.c
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -500,7 +500,7 @@ int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
 
 	rc = wait_event_interruptible_timeout(queue->ddcb_waitqs[ddcb_no],
 				ddcb_requ_finished(cd, req),
-				genwqe_ddcb_software_timeout * HZ);
+				GENWQE_DDCB_SOFTWARE_TIMEOUT * HZ);
 
 	/*
 	 * We need to distinguish 3 cases here:
@@ -633,7 +633,7 @@ int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
 	__be32 old, new;
 
 	/* unsigned long flags; */
-	if (genwqe_ddcb_software_timeout <= 0) {
+	if (GENWQE_DDCB_SOFTWARE_TIMEOUT <= 0) {
 		dev_err(&pci_dev->dev,
 			"[%s] err: software timeout is not set!\n", __func__);
 		return -EFAULT;
@@ -641,7 +641,7 @@ int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
 
 	pddcb = &queue->ddcb_vaddr[req->num];
 
-	for (t = 0; t < genwqe_ddcb_software_timeout * 10; t++) {
+	for (t = 0; t < GENWQE_DDCB_SOFTWARE_TIMEOUT * 10; t++) {
 
 		spin_lock_irqsave(&queue->ddcb_lock, flags);
 
@@ -718,7 +718,7 @@ int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
 
 	dev_err(&pci_dev->dev,
 		"[%s] err: DDCB#%d not purged and not completed after %d seconds QSTAT=%016llx!!\n",
-		__func__, req->num, genwqe_ddcb_software_timeout,
+		__func__, req->num, GENWQE_DDCB_SOFTWARE_TIMEOUT,
 		queue_status);
 
 	print_ddcb_info(cd, req->queue);
@@ -778,7 +778,7 @@ int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req,
 	/* FIXME circumvention to improve performance when no irq is
 	 * there.
 	 */
-	if (genwqe_polling_enabled)
+	if (GENWQE_POLLING_ENABLED)
 		genwqe_check_ddcb_queue(cd, queue);
 
 	/*
@@ -878,7 +878,7 @@ int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req,
 	pddcb->icrc_hsi_shi_32 = cpu_to_be32((u32)icrc << 16);
 
 	/* enable DDCB completion irq */
-	if (!genwqe_polling_enabled)
+	if (!GENWQE_POLLING_ENABLED)
 		pddcb->icrc_hsi_shi_32 |= DDCB_INTR_BE32;
 
 	dev_dbg(&pci_dev->dev, "INPUT DDCB#%d\n", req->num);
@@ -1028,10 +1028,10 @@ static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
 	unsigned int queue_size;
 	struct pci_dev *pci_dev = cd->pci_dev;
 
-	if (genwqe_ddcb_max < 2)
+	if (GENWQE_DDCB_MAX < 2)
 		return -EINVAL;
 
-	queue_size = roundup(genwqe_ddcb_max * sizeof(struct ddcb), PAGE_SIZE);
+	queue_size = roundup(GENWQE_DDCB_MAX * sizeof(struct ddcb), PAGE_SIZE);
 
 	queue->ddcbs_in_flight = 0;  /* statistics */
 	queue->ddcbs_max_in_flight = 0;
@@ -1040,7 +1040,7 @@ static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
 	queue->wait_on_busy = 0;
 
 	queue->ddcb_seq	  = 0x100; /* start sequence number */
-	queue->ddcb_max	  = genwqe_ddcb_max; /* module parameter */
+	queue->ddcb_max	  = GENWQE_DDCB_MAX;
 	queue->ddcb_vaddr = __genwqe_alloc_consistent(cd, queue_size,
 						&queue->ddcb_daddr);
 	if (queue->ddcb_vaddr == NULL) {
@@ -1194,7 +1194,7 @@ static int genwqe_card_thread(void *data)
 
 		genwqe_check_ddcb_queue(cd, &cd->queue);
 
-		if (genwqe_polling_enabled) {
+		if (GENWQE_POLLING_ENABLED) {
 			rc = wait_event_interruptible_timeout(
 				cd->queue_waitq,
 				genwqe_ddcbs_in_flight(cd) ||
@@ -1340,7 +1340,7 @@ static int queue_wake_up_all(struct genwqe_dev *cd)
 int genwqe_finish_queue(struct genwqe_dev *cd)
 {
 	int i, rc = 0, in_flight;
-	int waitmax = genwqe_ddcb_software_timeout;
+	int waitmax = GENWQE_DDCB_SOFTWARE_TIMEOUT;
 	struct pci_dev *pci_dev = cd->pci_dev;
 	struct ddcb_queue *queue = &cd->queue;
 
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c
index c715534..f921dd5 100644
--- a/drivers/misc/genwqe/card_debugfs.c
+++ b/drivers/misc/genwqe/card_debugfs.c
@@ -198,7 +198,7 @@ static int genwqe_jtimer_show(struct seq_file *s, void *unused)
 
 	jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, 0);
 	seq_printf(s, "  PF   0x%016llx %d msec\n", jtimer,
-		   genwqe_pf_jobtimeout_msec);
+		   GENWQE_PF_JOBTIMEOUT_MSEC);
 
 	for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) {
 		jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT,
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 3ecfa35..0dd6b5e 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -226,7 +226,7 @@ static void genwqe_remove_mappings(struct genwqe_file *cfile)
 			kfree(dma_map);
 		} else if (dma_map->type == GENWQE_MAPPING_SGL_TEMP) {
 			/* we use dma_map statically from the request */
-			genwqe_user_vunmap(cd, dma_map, NULL);
+			genwqe_user_vunmap(cd, dma_map);
 		}
 	}
 }
@@ -249,7 +249,7 @@ static void genwqe_remove_pinnings(struct genwqe_file *cfile)
 		 * deleted.
 		 */
 		list_del_init(&dma_map->pin_list);
-		genwqe_user_vunmap(cd, dma_map, NULL);
+		genwqe_user_vunmap(cd, dma_map);
 		kfree(dma_map);
 	}
 }
@@ -790,7 +790,7 @@ static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
 		return -ENOMEM;
 
 	genwqe_mapping_init(dma_map, GENWQE_MAPPING_SGL_PINNED);
-	rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size, NULL);
+	rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size);
 	if (rc != 0) {
 		dev_err(&pci_dev->dev,
 			"[%s] genwqe_user_vmap rc=%d\n", __func__, rc);
@@ -820,7 +820,7 @@ static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
 		return -ENOENT;
 
 	genwqe_del_pin(cfile, dma_map);
-	genwqe_user_vunmap(cd, dma_map, NULL);
+	genwqe_user_vunmap(cd, dma_map);
 	kfree(dma_map);
 	return 0;
 }
@@ -841,7 +841,7 @@ static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req)
 
 		if (dma_mapping_used(dma_map)) {
 			__genwqe_del_mapping(cfile, dma_map);
-			genwqe_user_vunmap(cd, dma_map, req);
+			genwqe_user_vunmap(cd, dma_map);
 		}
 		if (req->sgls[i].sgl != NULL)
 			genwqe_free_sync_sgl(cd, &req->sgls[i]);
@@ -947,7 +947,7 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
 					m->write = 0;
 
 				rc = genwqe_user_vmap(cd, m, (void *)u_addr,
-						      u_size, req);
+						      u_size);
 				if (rc != 0)
 					goto err_out;
 
@@ -1011,7 +1011,6 @@ static int do_execute_ddcb(struct genwqe_file *cfile,
 {
 	int rc;
 	struct genwqe_ddcb_cmd *cmd;
-	struct ddcb_requ *req;
 	struct genwqe_dev *cd = cfile->cd;
 	struct file *filp = cfile->filp;
 
@@ -1019,8 +1018,6 @@ static int do_execute_ddcb(struct genwqe_file *cfile,
 	if (cmd == NULL)
 		return -ENOMEM;
 
-	req = container_of(cmd, struct ddcb_requ, cmd);
-
 	if (copy_from_user(cmd, (void __user *)arg, sizeof(*cmd))) {
 		ddcb_requ_free(cmd);
 		return -EFAULT;
@@ -1345,7 +1342,7 @@ static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
 	rc = genwqe_kill_fasync(cd, SIGIO);
 	if (rc > 0) {
 		/* give kill_timeout seconds to close file descriptors ... */
-		for (i = 0; (i < genwqe_kill_timeout) &&
+		for (i = 0; (i < GENWQE_KILL_TIMEOUT) &&
 			     genwqe_open_files(cd); i++) {
 			dev_info(&pci_dev->dev, "  %d sec ...", i);
 
@@ -1363,7 +1360,7 @@ static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
 		rc = genwqe_force_sig(cd, SIGKILL); /* force terminate */
 		if (rc) {
 			/* Give kill_timout more seconds to end processes */
-			for (i = 0; (i < genwqe_kill_timeout) &&
+			for (i = 0; (i < GENWQE_KILL_TIMEOUT) &&
 				     genwqe_open_files(cd); i++) {
 				dev_warn(&pci_dev->dev, "  %d sec ...", i);
 
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 5c0d917..8f2e644 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -524,22 +524,16 @@ int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
 }
 
 /**
- * free_user_pages() - Give pinned pages back
+ * genwqe_free_user_pages() - Give pinned pages back
  *
- * Documentation of get_user_pages is in mm/memory.c:
+ * Documentation of get_user_pages is in mm/gup.c:
  *
  * If the page is written to, set_page_dirty (or set_page_dirty_lock,
  * as appropriate) must be called after the page is finished with, and
  * before put_page is called.
- *
- * FIXME Could be of use to others and might belong in the generic
- * code, if others agree. E.g.
- *    ll_free_user_pages in drivers/staging/lustre/lustre/llite/rw26.c
- *    ceph_put_page_vector in net/ceph/pagevec.c
- *    maybe more?
  */
-static int free_user_pages(struct page **page_list, unsigned int nr_pages,
-			   int dirty)
+static int genwqe_free_user_pages(struct page **page_list,
+			unsigned int nr_pages, int dirty)
 {
 	unsigned int i;
 
@@ -577,7 +571,7 @@ static int free_user_pages(struct page **page_list, unsigned int nr_pages,
  * Return: 0 if success
  */
 int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
-		     unsigned long size, struct ddcb_requ *req)
+		     unsigned long size)
 {
 	int rc = -EINVAL;
 	unsigned long data, offs;
@@ -617,7 +611,7 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
 
 	/* assumption: get_user_pages can be killed by signals. */
 	if (rc < m->nr_pages) {
-		free_user_pages(m->page_list, rc, m->write);
+		genwqe_free_user_pages(m->page_list, rc, m->write);
 		rc = -EFAULT;
 		goto fail_get_user_pages;
 	}
@@ -629,7 +623,7 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
 	return 0;
 
  fail_free_user_pages:
-	free_user_pages(m->page_list, m->nr_pages, m->write);
+	genwqe_free_user_pages(m->page_list, m->nr_pages, m->write);
 
  fail_get_user_pages:
 	kfree(m->page_list);
@@ -647,8 +641,7 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
  * @cd:         pointer to genwqe device
  * @m:          mapping params
  */
-int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
-		       struct ddcb_requ *req)
+int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m)
 {
 	struct pci_dev *pci_dev = cd->pci_dev;
 
@@ -662,7 +655,7 @@ int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
 		genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
 
 	if (m->page_list) {
-		free_user_pages(m->page_list, m->nr_pages, m->write);
+		genwqe_free_user_pages(m->page_list, m->nr_pages, m->write);
 
 		kfree(m->page_list);
 		m->page_list = NULL;
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index 097e309..cfdf052 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Driver for the HP iLO management processor.
  *
  * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
  *	David Altobelli <david.altobelli@hpe.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 #include <linux/kernel.h>
 #include <linux/types.h>
diff --git a/drivers/misc/hpilo.h b/drivers/misc/hpilo.h
index b97672e..94dfb9e 100644
--- a/drivers/misc/hpilo.h
+++ b/drivers/misc/hpilo.h
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * linux/drivers/char/hpilo.h
  *
  * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
  *	David Altobelli <david.altobelli@hp.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 #ifndef __HPILO_H
 #define __HPILO_H
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
index 28f51e0..81a0541 100644
--- a/drivers/misc/ics932s401.c
+++ b/drivers/misc/ics932s401.c
@@ -33,7 +33,7 @@ static const unsigned short normal_i2c[] = { 0x69, I2C_CLIENT_END };
 
 /* ICS932S401 registers */
 #define ICS932S401_REG_CFG2			0x01
-#define 	ICS932S401_CFG1_SPREAD		0x01
+#define		ICS932S401_CFG1_SPREAD		0x01
 #define ICS932S401_REG_CFG7			0x06
 #define		ICS932S401_FS_MASK		0x07
 #define	ICS932S401_REG_VENDOR_REV		0x07
@@ -58,7 +58,7 @@ static const unsigned short normal_i2c[] = { 0x69, I2C_CLIENT_END };
 #define	ICS932S401_REG_SRC_SPREAD1		0x11
 #define ICS932S401_REG_SRC_SPREAD2		0x12
 #define ICS932S401_REG_CPU_DIVISOR		0x13
-#define 	ICS932S401_CPU_DIVISOR_SHIFT	4
+#define		ICS932S401_CPU_DIVISOR_SHIFT	4
 #define ICS932S401_REG_PCISRC_DIVISOR		0x14
 #define		ICS932S401_SRC_DIVISOR_MASK	0x0F
 #define		ICS932S401_PCI_DIVISOR_SHIFT	4
@@ -225,6 +225,7 @@ static ssize_t show_cpu_clock_sel(struct device *dev,
 	else {
 		/* Freq is neatly wrapped up for us */
 		int fid = data->regs[ICS932S401_REG_CFG7] & ICS932S401_FS_MASK;
+
 		freq = fs_speeds[fid];
 		if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_CPU_ALT) {
 			switch (freq) {
@@ -352,8 +353,7 @@ static DEVICE_ATTR(ref_clock, S_IRUGO, show_value, NULL);
 static DEVICE_ATTR(cpu_spread, S_IRUGO, show_spread, NULL);
 static DEVICE_ATTR(src_spread, S_IRUGO, show_spread, NULL);
 
-static struct attribute *ics932s401_attr[] =
-{
+static struct attribute *ics932s401_attr[] = {
 	&dev_attr_spread_enabled.attr,
 	&dev_attr_cpu_clock_selection.attr,
 	&dev_attr_cpu_clock.attr,
diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c
index 976df00..b803288 100644
--- a/drivers/misc/isl29003.c
+++ b/drivers/misc/isl29003.c
@@ -78,6 +78,7 @@ static int __isl29003_read_reg(struct i2c_client *client,
 			       u32 reg, u8 mask, u8 shift)
 {
 	struct isl29003_data *data = i2c_get_clientdata(client);
+
 	return (data->reg_cache[reg] & mask) >> shift;
 }
 
@@ -160,6 +161,7 @@ static int isl29003_get_power_state(struct i2c_client *client)
 {
 	struct isl29003_data *data = i2c_get_clientdata(client);
 	u8 cmdreg = data->reg_cache[ISL29003_REG_COMMAND];
+
 	return ~cmdreg & ISL29003_ADC_PD;
 }
 
@@ -196,6 +198,7 @@ static ssize_t isl29003_show_range(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
 	struct i2c_client *client = to_i2c_client(dev);
+
 	return sprintf(buf, "%i\n", isl29003_get_range(client));
 }
 
@@ -231,6 +234,7 @@ static ssize_t isl29003_show_resolution(struct device *dev,
 					char *buf)
 {
 	struct i2c_client *client = to_i2c_client(dev);
+
 	return sprintf(buf, "%d\n", isl29003_get_resolution(client));
 }
 
@@ -264,6 +268,7 @@ static ssize_t isl29003_show_mode(struct device *dev,
 				  struct device_attribute *attr, char *buf)
 {
 	struct i2c_client *client = to_i2c_client(dev);
+
 	return sprintf(buf, "%d\n", isl29003_get_mode(client));
 }
 
@@ -298,6 +303,7 @@ static ssize_t isl29003_show_power_state(struct device *dev,
 					 char *buf)
 {
 	struct i2c_client *client = to_i2c_client(dev);
+
 	return sprintf(buf, "%d\n", isl29003_get_power_state(client));
 }
 
@@ -361,6 +367,7 @@ static int isl29003_init_client(struct i2c_client *client)
 	 * if one of the reads fails, we consider the init failed */
 	for (i = 0; i < ARRAY_SIZE(data->reg_cache); i++) {
 		int v = i2c_smbus_read_byte_data(client, i);
+
 		if (v < 0)
 			return -ENODEV;
 
diff --git a/drivers/misc/lkdtm_heap.c b/drivers/misc/lkdtm_heap.c
index f5494a6..65026d7 100644
--- a/drivers/misc/lkdtm_heap.c
+++ b/drivers/misc/lkdtm_heap.c
@@ -16,6 +16,8 @@ void lkdtm_OVERWRITE_ALLOCATION(void)
 {
 	size_t len = 1020;
 	u32 *data = kmalloc(len, GFP_KERNEL);
+	if (!data)
+		return;
 
 	data[1024 / sizeof(u32)] = 0x12345678;
 	kfree(data);
@@ -33,6 +35,8 @@ void lkdtm_WRITE_AFTER_FREE(void)
 	size_t offset = (len / sizeof(*base)) / 2;
 
 	base = kmalloc(len, GFP_KERNEL);
+	if (!base)
+		return;
 	pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
 	pr_info("Attempting bad write to freed memory at %p\n",
 		&base[offset]);
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 1ac10cb..3e5eabd 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -543,14 +543,20 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
 	mutex_lock(&bus->device_lock);
 
 	if (!mei_cl_is_connected(cl)) {
-		dev_dbg(bus->dev, "Already disconnected");
+		dev_dbg(bus->dev, "Already disconnected\n");
+		err = 0;
+		goto out;
+	}
+
+	if (bus->dev_state == MEI_DEV_POWER_DOWN) {
+		dev_dbg(bus->dev, "Device is powering down, don't bother with disconnection\n");
 		err = 0;
 		goto out;
 	}
 
 	err = mei_cl_disconnect(cl);
 	if (err < 0)
-		dev_err(bus->dev, "Could not disconnect from the ME client");
+		dev_err(bus->dev, "Could not disconnect from the ME client\n");
 
 out:
 	/* Flush queues and remove any pending read */
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 10dcf4f..0b21f9e 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1260,7 +1260,9 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
 		if (rets == -ENODATA)
 			break;
 
-		if (rets && dev->dev_state != MEI_DEV_RESETTING) {
+		if (rets &&
+		    (dev->dev_state != MEI_DEV_RESETTING ||
+		     dev->dev_state != MEI_DEV_POWER_DOWN)) {
 			dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
 						rets);
 			schedule_work(&dev->reset_work);
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 24e4a4c..8419727 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -1127,7 +1127,9 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
 	if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) {
 		/* Read from TXE */
 		rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
-		if (rets && dev->dev_state != MEI_DEV_RESETTING) {
+		if (rets &&
+		    (dev->dev_state != MEI_DEV_RESETTING ||
+		     dev->dev_state != MEI_DEV_POWER_DOWN)) {
 			dev_err(dev->dev,
 				"mei_irq_read_handler ret = %d.\n", rets);
 
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index d2f6914..c46f6e9 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -310,6 +310,9 @@ void mei_stop(struct mei_device *dev)
 {
 	dev_dbg(dev->dev, "stopping the device.\n");
 
+	mutex_lock(&dev->device_lock);
+	dev->dev_state = MEI_DEV_POWER_DOWN;
+	mutex_unlock(&dev->device_lock);
 	mei_cl_bus_remove_devices(dev);
 
 	mei_cancel_work(dev);
@@ -319,7 +322,6 @@ void mei_stop(struct mei_device *dev)
 
 	mutex_lock(&dev->device_lock);
 
-	dev->dev_state = MEI_DEV_POWER_DOWN;
 	mei_reset(dev);
 	/* move device to disabled state unconditionally */
 	dev->dev_state = MEI_DEV_DISABLED;
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index fed992e..27db64e 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -937,13 +937,10 @@ static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
 		    dd.num_vq > MIC_MAX_VRINGS)
 			return -EINVAL;
 
-		dd_config = kzalloc(mic_desc_size(&dd), GFP_KERNEL);
-		if (!dd_config)
-			return -ENOMEM;
-		if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) {
-			ret = -EFAULT;
-			goto free_ret;
-		}
+		dd_config = memdup_user(argp, mic_desc_size(&dd));
+		if (IS_ERR(dd_config))
+			return PTR_ERR(dd_config);
+
 		/* Ensure desc has not changed between the two reads */
 		if (memcmp(&dd, dd_config, sizeof(dd))) {
 			ret = -EINVAL;
@@ -995,17 +992,12 @@ static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
 		ret = vop_vdev_inited(vdev);
 		if (ret)
 			goto __unlock_ret;
-		buf = kzalloc(vdev->dd->config_len, GFP_KERNEL);
-		if (!buf) {
-			ret = -ENOMEM;
+		buf = memdup_user(argp, vdev->dd->config_len);
+		if (IS_ERR(buf)) {
+			ret = PTR_ERR(buf);
 			goto __unlock_ret;
 		}
-		if (copy_from_user(buf, argp, vdev->dd->config_len)) {
-			ret = -EFAULT;
-			goto done;
-		}
 		ret = vop_virtio_config_change(vdev, buf);
-done:
 		kfree(buf);
 __unlock_ret:
 		mutex_unlock(&vdev->vdev_mutex);
diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
index 2cde80c..9eea30f 100644
--- a/drivers/misc/vexpress-syscfg.c
+++ b/drivers/misc/vexpress-syscfg.c
@@ -270,10 +270,8 @@ static int vexpress_syscfg_probe(struct platform_device *pdev)
 	/* Must use dev.parent (MFD), as that's where DT phandle points at... */
 	bridge = vexpress_config_bridge_register(pdev->dev.parent,
 			&vexpress_syscfg_bridge_ops, syscfg);
-	if (IS_ERR(bridge))
-		return PTR_ERR(bridge);
 
-	return 0;
+	return PTR_ERR_OR_ZERO(bridge);
 }
 
 static const struct platform_device_id vexpress_syscfg_id_table[] = {
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 5a5cefd..35a3dbe 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -444,7 +444,6 @@ static int nvmem_setup_compat(struct nvmem_device *nvmem,
 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
 {
 	struct nvmem_device *nvmem;
-	struct device_node *np;
 	int rval;
 
 	if (!config->dev)
@@ -464,8 +463,8 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
 	nvmem->owner = config->owner;
 	if (!nvmem->owner && config->dev->driver)
 		nvmem->owner = config->dev->driver->owner;
-	nvmem->stride = config->stride;
-	nvmem->word_size = config->word_size;
+	nvmem->stride = config->stride ?: 1;
+	nvmem->word_size = config->word_size ?: 1;
 	nvmem->size = config->size;
 	nvmem->dev.type = &nvmem_provider_type;
 	nvmem->dev.bus = &nvmem_bus_type;
@@ -473,13 +472,12 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
 	nvmem->priv = config->priv;
 	nvmem->reg_read = config->reg_read;
 	nvmem->reg_write = config->reg_write;
-	np = config->dev->of_node;
-	nvmem->dev.of_node = np;
+	nvmem->dev.of_node = config->dev->of_node;
 	dev_set_name(&nvmem->dev, "%s%d",
 		     config->name ? : "nvmem",
 		     config->name ? config->id : nvmem->id);
 
-	nvmem->read_only = of_property_read_bool(np, "read-only") |
+	nvmem->read_only = device_property_present(config->dev, "read-only") |
 			   config->read_only;
 
 	if (config->root_only)
@@ -600,16 +598,11 @@ static void __nvmem_device_put(struct nvmem_device *nvmem)
 	mutex_unlock(&nvmem_mutex);
 }
 
-static int nvmem_match(struct device *dev, void *data)
-{
-	return !strcmp(dev_name(dev), data);
-}
-
 static struct nvmem_device *nvmem_find(const char *name)
 {
 	struct device *d;
 
-	d = bus_find_device(&nvmem_bus_type, NULL, (void *)name, nvmem_match);
+	d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
 
 	if (!d)
 		return NULL;
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
index 123de77..f13a833 100644
--- a/drivers/nvmem/rockchip-efuse.c
+++ b/drivers/nvmem/rockchip-efuse.c
@@ -32,6 +32,14 @@
 #define RK3288_STROBE		BIT(1)
 #define RK3288_CSB		BIT(0)
 
+#define RK3328_SECURE_SIZES	96
+#define RK3328_INT_STATUS	0x0018
+#define RK3328_DOUT		0x0020
+#define RK3328_AUTO_CTRL	0x0024
+#define RK3328_INT_FINISH	BIT(0)
+#define RK3328_AUTO_ENB		BIT(0)
+#define RK3328_AUTO_RD		BIT(1)
+
 #define RK3399_A_SHIFT		16
 #define RK3399_A_MASK		0x3ff
 #define RK3399_NBYTES		4
@@ -92,6 +100,60 @@ static int rockchip_rk3288_efuse_read(void *context, unsigned int offset,
 	return 0;
 }
 
+static int rockchip_rk3328_efuse_read(void *context, unsigned int offset,
+				      void *val, size_t bytes)
+{
+	struct rockchip_efuse_chip *efuse = context;
+	unsigned int addr_start, addr_end, addr_offset, addr_len;
+	u32 out_value, status;
+	u8 *buf;
+	int ret, i = 0;
+
+	ret = clk_prepare_enable(efuse->clk);
+	if (ret < 0) {
+		dev_err(efuse->dev, "failed to prepare/enable efuse clk\n");
+		return ret;
+	}
+
+	/* 128 Byte efuse, 96 Byte for secure, 32 Byte for non-secure */
+	offset += RK3328_SECURE_SIZES;
+	addr_start = rounddown(offset, RK3399_NBYTES) / RK3399_NBYTES;
+	addr_end = roundup(offset + bytes, RK3399_NBYTES) / RK3399_NBYTES;
+	addr_offset = offset % RK3399_NBYTES;
+	addr_len = addr_end - addr_start;
+
+	buf = kzalloc(sizeof(*buf) * addr_len * RK3399_NBYTES, GFP_KERNEL);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto nomem;
+	}
+
+	while (addr_len--) {
+		writel(RK3328_AUTO_RD | RK3328_AUTO_ENB |
+		       ((addr_start++ & RK3399_A_MASK) << RK3399_A_SHIFT),
+		       efuse->base + RK3328_AUTO_CTRL);
+		udelay(4);
+		status = readl(efuse->base + RK3328_INT_STATUS);
+		if (!(status & RK3328_INT_FINISH)) {
+			ret = -EIO;
+			goto err;
+		}
+		out_value = readl(efuse->base + RK3328_DOUT);
+		writel(RK3328_INT_FINISH, efuse->base + RK3328_INT_STATUS);
+
+		memcpy(&buf[i], &out_value, RK3399_NBYTES);
+		i += RK3399_NBYTES;
+	}
+
+	memcpy(val, buf + addr_offset, bytes);
+err:
+	kfree(buf);
+nomem:
+	clk_disable_unprepare(efuse->clk);
+
+	return ret;
+}
+
 static int rockchip_rk3399_efuse_read(void *context, unsigned int offset,
 				      void *val, size_t bytes)
 {
@@ -181,6 +243,10 @@ static const struct of_device_id rockchip_efuse_match[] = {
 		.data = (void *)&rockchip_rk3288_efuse_read,
 	},
 	{
+		.compatible = "rockchip,rk3328-efuse",
+		.data = (void *)&rockchip_rk3328_efuse_read,
+	},
+	{
 		.compatible = "rockchip,rk3399-efuse",
 		.data = (void *)&rockchip_rk3399_efuse_read,
 	},
@@ -217,7 +283,9 @@ static int rockchip_efuse_probe(struct platform_device *pdev)
 		return PTR_ERR(efuse->clk);
 
 	efuse->dev = &pdev->dev;
-	econfig.size = resource_size(res);
+	if (of_property_read_u32(dev->of_node, "rockchip,efuse-size",
+				 &econfig.size))
+		econfig.size = resource_size(res);
 	econfig.reg_read = match->data;
 	econfig.priv = efuse;
 	econfig.dev = efuse->dev;
diff --git a/drivers/nvmem/uniphier-efuse.c b/drivers/nvmem/uniphier-efuse.c
index 9d278b4..be11880 100644
--- a/drivers/nvmem/uniphier-efuse.c
+++ b/drivers/nvmem/uniphier-efuse.c
@@ -27,11 +27,11 @@ static int uniphier_reg_read(void *context,
 			     unsigned int reg, void *_val, size_t bytes)
 {
 	struct uniphier_efuse_priv *priv = context;
-	u32 *val = _val;
+	u8 *val = _val;
 	int offs;
 
-	for (offs = 0; offs < bytes; offs += sizeof(u32))
-		*val++ = readl(priv->base + reg + offs);
+	for (offs = 0; offs < bytes; offs += sizeof(u8))
+		*val++ = readb(priv->base + reg + offs);
 
 	return 0;
 }
@@ -53,8 +53,8 @@ static int uniphier_efuse_probe(struct platform_device *pdev)
 	if (IS_ERR(priv->base))
 		return PTR_ERR(priv->base);
 
-	econfig.stride = 4;
-	econfig.word_size = 4;
+	econfig.stride = 1;
+	econfig.word_size = 1;
 	econfig.read_only = true;
 	econfig.reg_read = uniphier_reg_read;
 	econfig.size = resource_size(res);
diff --git a/drivers/siox/Kconfig b/drivers/siox/Kconfig
new file mode 100644
index 0000000..083d2e6
--- /dev/null
+++ b/drivers/siox/Kconfig
@@ -0,0 +1,18 @@
+menuconfig SIOX
+	tristate "Eckelmann SIOX Support"
+	help
+	  SIOX stands for Serial Input Output eXtension and is a synchronous
+	  bus system invented by Eckelmann AG. It is used in their control and
+	  remote monitoring systems for commercial and industrial refrigeration
+	  to drive additional I/O units.
+
+	  Unless you know better, it is probably safe to say "no" here.
+
+if SIOX
+
+config SIOX_BUS_GPIO
+	tristate "SIOX GPIO bus driver"
+	help
+	  SIOX bus driver that controls the four bus lines using GPIOs.
+
+endif
diff --git a/drivers/siox/Makefile b/drivers/siox/Makefile
new file mode 100644
index 0000000..a956f65
--- /dev/null
+++ b/drivers/siox/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_SIOX) += siox-core.o
+obj-$(CONFIG_SIOX_BUS_GPIO) += siox-bus-gpio.o
diff --git a/drivers/siox/siox-bus-gpio.c b/drivers/siox/siox-bus-gpio.c
new file mode 100644
index 0000000..ea7ef98
--- /dev/null
+++ b/drivers/siox/siox-bus-gpio.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2017 Pengutronix, Uwe Kleine-König <kernel@pengutronix.de>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/delay.h>
+
+#include "siox.h"
+
+#define DRIVER_NAME "siox-gpio"
+
+struct siox_gpio_ddata {
+	struct gpio_desc *din;
+	struct gpio_desc *dout;
+	struct gpio_desc *dclk;
+	struct gpio_desc *dld;
+};
+
+static unsigned int siox_clkhigh_ns = 1000;
+static unsigned int siox_loadhigh_ns;
+static unsigned int siox_bytegap_ns;
+
+static int siox_gpio_pushpull(struct siox_master *smaster,
+			      size_t setbuf_len, const u8 setbuf[],
+			      size_t getbuf_len, u8 getbuf[])
+{
+	struct siox_gpio_ddata *ddata = siox_master_get_devdata(smaster);
+	size_t i;
+	size_t cycles = max(setbuf_len, getbuf_len);
+
+	/* reset data and clock */
+	gpiod_set_value_cansleep(ddata->dout, 0);
+	gpiod_set_value_cansleep(ddata->dclk, 0);
+
+	gpiod_set_value_cansleep(ddata->dld, 1);
+	ndelay(siox_loadhigh_ns);
+	gpiod_set_value_cansleep(ddata->dld, 0);
+
+	for (i = 0; i < cycles; ++i) {
+		u8 set = 0, get = 0;
+		size_t j;
+
+		if (i >= cycles - setbuf_len)
+			set = setbuf[i - (cycles - setbuf_len)];
+
+		for (j = 0; j < 8; ++j) {
+			get <<= 1;
+			if (gpiod_get_value_cansleep(ddata->din))
+				get |= 1;
+
+			/* DOUT is logically inverted */
+			gpiod_set_value_cansleep(ddata->dout, !(set & 0x80));
+			set <<= 1;
+
+			gpiod_set_value_cansleep(ddata->dclk, 1);
+			ndelay(siox_clkhigh_ns);
+			gpiod_set_value_cansleep(ddata->dclk, 0);
+		}
+
+		if (i < getbuf_len)
+			getbuf[i] = get;
+
+		ndelay(siox_bytegap_ns);
+	}
+
+	gpiod_set_value_cansleep(ddata->dld, 1);
+	ndelay(siox_loadhigh_ns);
+	gpiod_set_value_cansleep(ddata->dld, 0);
+
+	/*
+	 * Resetting dout isn't necessary protocol wise, but it makes the
+	 * signals more pretty because the dout level is deterministic between
+	 * cycles. Note that this only affects dout between the master and the
+	 * first siox device. dout for the later devices depend on the output of
+	 * the previous siox device.
+	 */
+	gpiod_set_value_cansleep(ddata->dout, 0);
+
+	return 0;
+}
+
+static int siox_gpio_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct siox_gpio_ddata *ddata;
+	int ret;
+	struct siox_master *smaster;
+
+	smaster = siox_master_alloc(&pdev->dev, sizeof(*ddata));
+	if (!smaster) {
+		dev_err(dev, "failed to allocate siox master\n");
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, smaster);
+	ddata = siox_master_get_devdata(smaster);
+
+	ddata->din = devm_gpiod_get(dev, "din", GPIOD_IN);
+	if (IS_ERR(ddata->din)) {
+		ret = PTR_ERR(ddata->din);
+		dev_err(dev, "Failed to get %s GPIO: %d\n", "din", ret);
+		goto err;
+	}
+
+	ddata->dout = devm_gpiod_get(dev, "dout", GPIOD_OUT_LOW);
+	if (IS_ERR(ddata->dout)) {
+		ret = PTR_ERR(ddata->dout);
+		dev_err(dev, "Failed to get %s GPIO: %d\n", "dout", ret);
+		goto err;
+	}
+
+	ddata->dclk = devm_gpiod_get(dev, "dclk", GPIOD_OUT_LOW);
+	if (IS_ERR(ddata->dclk)) {
+		ret = PTR_ERR(ddata->dclk);
+		dev_err(dev, "Failed to get %s GPIO: %d\n", "dclk", ret);
+		goto err;
+	}
+
+	ddata->dld = devm_gpiod_get(dev, "dld", GPIOD_OUT_LOW);
+	if (IS_ERR(ddata->dld)) {
+		ret = PTR_ERR(ddata->dld);
+		dev_err(dev, "Failed to get %s GPIO: %d\n", "dld", ret);
+		goto err;
+	}
+
+	smaster->pushpull = siox_gpio_pushpull;
+	/* XXX: determine automatically like spi does */
+	smaster->busno = 0;
+
+	ret = siox_master_register(smaster);
+	if (ret) {
+		dev_err(dev, "Failed to register siox master: %d\n", ret);
+err:
+		siox_master_put(smaster);
+	}
+
+	return ret;
+}
+
+static int siox_gpio_remove(struct platform_device *pdev)
+{
+	struct siox_master *master = platform_get_drvdata(pdev);
+
+	siox_master_unregister(master);
+
+	return 0;
+}
+
+static const struct of_device_id siox_gpio_dt_ids[] = {
+	{ .compatible = "eckelmann,siox-gpio", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, siox_gpio_dt_ids);
+
+static struct platform_driver siox_gpio_driver = {
+	.probe = siox_gpio_probe,
+	.remove = siox_gpio_remove,
+
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = siox_gpio_dt_ids,
+	},
+};
+module_platform_driver(siox_gpio_driver);
+
+MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/siox/siox-core.c b/drivers/siox/siox-core.c
new file mode 100644
index 0000000..fdfcdea
--- /dev/null
+++ b/drivers/siox/siox-core.c
@@ -0,0 +1,934 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2017 Pengutronix, Uwe Kleine-König <kernel@pengutronix.de>
+ */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "siox.h"
+
+/*
+ * The lowest bit in the SIOX status word signals if the in-device watchdog is
+ * ok. If the bit is set, the device is functional.
+ *
+ * On writing the watchdog timer is reset when this bit toggles.
+ */
+#define SIOX_STATUS_WDG			0x01
+
+/*
+ * Bits 1 to 3 of the status word read as the bitwise negation of what was
+ * clocked in before. The value clocked in is changed in each cycle and so
+ * allows to detect transmit/receive problems.
+ */
+#define SIOX_STATUS_COUNTER		0x0e
+
+/*
+ * Each Siox-Device has a 4 bit type number that is neither 0 nor 15. This is
+ * available in the upper nibble of the read status.
+ *
+ * On write these bits are DC.
+ */
+#define SIOX_STATUS_TYPE		0xf0
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/siox.h>
+
+static bool siox_is_registered;
+
+static void siox_master_lock(struct siox_master *smaster)
+{
+	mutex_lock(&smaster->lock);
+}
+
+static void siox_master_unlock(struct siox_master *smaster)
+{
+	mutex_unlock(&smaster->lock);
+}
+
+static inline u8 siox_status_clean(u8 status_read, u8 status_written)
+{
+	/*
+	 * bits 3:1 of status sample the respective bit in the status
+	 * byte written in the previous cycle but inverted. So if you wrote the
+	 * status word as 0xa before (counter = 0b101), it is expected to get
+	 * back the counter bits as 0b010.
+	 *
+	 * So given the last status written this function toggles the there
+	 * unset counter bits in the read value such that the counter bits in
+	 * the return value are all zero iff the bits were read as expected to
+	 * simplify error detection.
+	 */
+
+	return status_read ^ (~status_written & 0xe);
+}
+
+static bool siox_device_counter_error(struct siox_device *sdevice,
+				      u8 status_clean)
+{
+	return (status_clean & SIOX_STATUS_COUNTER) != 0;
+}
+
+static bool siox_device_type_error(struct siox_device *sdevice, u8 status_clean)
+{
+	u8 statustype = (status_clean & SIOX_STATUS_TYPE) >> 4;
+
+	/*
+	 * If the device knows which value the type bits should have, check
+	 * against this value otherwise just rule out the invalid values 0b0000
+	 * and 0b1111.
+	 */
+	if (sdevice->statustype) {
+		if (statustype != sdevice->statustype)
+			return true;
+	} else {
+		switch (statustype) {
+		case 0:
+		case 0xf:
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static bool siox_device_wdg_error(struct siox_device *sdevice, u8 status_clean)
+{
+	return (status_clean & SIOX_STATUS_WDG) == 0;
+}
+
+/*
+ * If there is a type or counter error the device is called "unsynced".
+ */
+bool siox_device_synced(struct siox_device *sdevice)
+{
+	if (siox_device_type_error(sdevice, sdevice->status_read_clean))
+		return false;
+
+	return !siox_device_counter_error(sdevice, sdevice->status_read_clean);
+
+}
+EXPORT_SYMBOL_GPL(siox_device_synced);
+
+/*
+ * A device is called "connected" if it is synced and the watchdog is not
+ * asserted.
+ */
+bool siox_device_connected(struct siox_device *sdevice)
+{
+	if (!siox_device_synced(sdevice))
+		return false;
+
+	return !siox_device_wdg_error(sdevice, sdevice->status_read_clean);
+}
+EXPORT_SYMBOL_GPL(siox_device_connected);
+
+static void siox_poll(struct siox_master *smaster)
+{
+	struct siox_device *sdevice;
+	size_t i = smaster->setbuf_len;
+	unsigned int devno = 0;
+	int unsync_error = 0;
+
+	smaster->last_poll = jiffies;
+
+	/*
+	 * The counter bits change in each second cycle, the watchdog bit
+	 * toggles each time.
+	 * The counter bits hold values from [0, 6]. 7 would be possible
+	 * theoretically but the protocol designer considered that a bad idea
+	 * for reasons unknown today. (Maybe that's because then the status read
+	 * back has only zeros in the counter bits then which might be confused
+	 * with a stuck-at-0 error. But for the same reason (with s/0/1/) 0
+	 * could be skipped.)
+	 */
+	if (++smaster->status > 0x0d)
+		smaster->status = 0;
+
+	memset(smaster->buf, 0, smaster->setbuf_len);
+
+	/* prepare data pushed out to devices in buf[0..setbuf_len) */
+	list_for_each_entry(sdevice, &smaster->devices, node) {
+		struct siox_driver *sdriver =
+			to_siox_driver(sdevice->dev.driver);
+		sdevice->status_written = smaster->status;
+
+		i -= sdevice->inbytes;
+
+		/*
+		 * If the device or a previous one is unsynced, don't pet the
+		 * watchdog. This is done to ensure that the device is kept in
+		 * reset when something is wrong.
+		 */
+		if (!siox_device_synced(sdevice))
+			unsync_error = 1;
+
+		if (sdriver && !unsync_error)
+			sdriver->set_data(sdevice, sdevice->status_written,
+					  &smaster->buf[i + 1]);
+		else
+			/*
+			 * Don't trigger watchdog if there is no driver or a
+			 * sync problem
+			 */
+			sdevice->status_written &= ~SIOX_STATUS_WDG;
+
+		smaster->buf[i] = sdevice->status_written;
+
+		trace_siox_set_data(smaster, sdevice, devno, i);
+
+		devno++;
+	}
+
+	smaster->pushpull(smaster, smaster->setbuf_len, smaster->buf,
+			  smaster->getbuf_len,
+			  smaster->buf + smaster->setbuf_len);
+
+	unsync_error = 0;
+
+	/* interpret data pulled in from devices in buf[setbuf_len..] */
+	devno = 0;
+	i = smaster->setbuf_len;
+	list_for_each_entry(sdevice, &smaster->devices, node) {
+		struct siox_driver *sdriver =
+			to_siox_driver(sdevice->dev.driver);
+		u8 status = smaster->buf[i + sdevice->outbytes - 1];
+		u8 status_clean;
+		u8 prev_status_clean = sdevice->status_read_clean;
+		bool synced = true;
+		bool connected = true;
+
+		if (!siox_device_synced(sdevice))
+			unsync_error = 1;
+
+		/*
+		 * If the watchdog bit wasn't toggled in this cycle, report the
+		 * watchdog as active to give a consistent view for drivers and
+		 * sysfs consumers.
+		 */
+		if (!sdriver || unsync_error)
+			status &= ~SIOX_STATUS_WDG;
+
+		status_clean =
+			siox_status_clean(status,
+					  sdevice->status_written_lastcycle);
+
+		/* Check counter bits */
+		if (siox_device_counter_error(sdevice, status_clean)) {
+			bool prev_counter_error;
+
+			synced = false;
+
+			/* only report a new error if the last cycle was ok */
+			prev_counter_error =
+				siox_device_counter_error(sdevice,
+							  prev_status_clean);
+			if (!prev_counter_error) {
+				sdevice->status_errors++;
+				sysfs_notify_dirent(sdevice->status_errors_kn);
+			}
+		}
+
+		/* Check type bits */
+		if (siox_device_type_error(sdevice, status_clean))
+			synced = false;
+
+		/* If the device is unsynced report the watchdog as active */
+		if (!synced) {
+			status &= ~SIOX_STATUS_WDG;
+			status_clean &= ~SIOX_STATUS_WDG;
+		}
+
+		if (siox_device_wdg_error(sdevice, status_clean))
+			connected = false;
+
+		/* The watchdog state changed just now */
+		if ((status_clean ^ prev_status_clean) & SIOX_STATUS_WDG) {
+			sysfs_notify_dirent(sdevice->watchdog_kn);
+
+			if (siox_device_wdg_error(sdevice, status_clean)) {
+				struct kernfs_node *wd_errs =
+					sdevice->watchdog_errors_kn;
+
+				sdevice->watchdog_errors++;
+				sysfs_notify_dirent(wd_errs);
+			}
+		}
+
+		if (connected != sdevice->connected)
+			sysfs_notify_dirent(sdevice->connected_kn);
+
+		sdevice->status_read_clean = status_clean;
+		sdevice->status_written_lastcycle = sdevice->status_written;
+		sdevice->connected = connected;
+
+		trace_siox_get_data(smaster, sdevice, devno, status_clean, i);
+
+		/* only give data read to driver if the device is connected */
+		if (sdriver && connected)
+			sdriver->get_data(sdevice, &smaster->buf[i]);
+
+		devno++;
+		i += sdevice->outbytes;
+	}
+}
+
+static int siox_poll_thread(void *data)
+{
+	struct siox_master *smaster = data;
+	signed long timeout = 0;
+
+	get_device(&smaster->dev);
+
+	for (;;) {
+		if (kthread_should_stop()) {
+			put_device(&smaster->dev);
+			return 0;
+		}
+
+		siox_master_lock(smaster);
+
+		if (smaster->active) {
+			unsigned long next_poll =
+				smaster->last_poll + smaster->poll_interval;
+			if (time_is_before_eq_jiffies(next_poll))
+				siox_poll(smaster);
+
+			timeout = smaster->poll_interval -
+				(jiffies - smaster->last_poll);
+		} else {
+			timeout = MAX_SCHEDULE_TIMEOUT;
+		}
+
+		/*
+		 * Set the task to idle while holding the lock. This makes sure
+		 * that we don't sleep too long when the bus is reenabled before
+		 * schedule_timeout is reached.
+		 */
+		if (timeout > 0)
+			set_current_state(TASK_IDLE);
+
+		siox_master_unlock(smaster);
+
+		if (timeout > 0)
+			schedule_timeout(timeout);
+
+		/*
+		 * I'm not clear if/why it is important to set the state to
+		 * RUNNING again, but it fixes a "do not call blocking ops when
+		 * !TASK_RUNNING;"-warning.
+		 */
+		set_current_state(TASK_RUNNING);
+	}
+}
+
+static int __siox_start(struct siox_master *smaster)
+{
+	if (!(smaster->setbuf_len + smaster->getbuf_len))
+		return -ENODEV;
+
+	if (!smaster->buf)
+		return -ENOMEM;
+
+	if (smaster->active)
+		return 0;
+
+	smaster->active = 1;
+	wake_up_process(smaster->poll_thread);
+
+	return 1;
+}
+
+static int siox_start(struct siox_master *smaster)
+{
+	int ret;
+
+	siox_master_lock(smaster);
+	ret = __siox_start(smaster);
+	siox_master_unlock(smaster);
+
+	return ret;
+}
+
+static int __siox_stop(struct siox_master *smaster)
+{
+	if (smaster->active) {
+		struct siox_device *sdevice;
+
+		smaster->active = 0;
+
+		list_for_each_entry(sdevice, &smaster->devices, node) {
+			if (sdevice->connected)
+				sysfs_notify_dirent(sdevice->connected_kn);
+			sdevice->connected = false;
+		}
+
+		return 1;
+	}
+	return 0;
+}
+
+static int siox_stop(struct siox_master *smaster)
+{
+	int ret;
+
+	siox_master_lock(smaster);
+	ret = __siox_stop(smaster);
+	siox_master_unlock(smaster);
+
+	return ret;
+}
+
+static ssize_t type_show(struct device *dev,
+			 struct device_attribute *attr, char *buf)
+{
+	struct siox_device *sdev = to_siox_device(dev);
+
+	return sprintf(buf, "%s\n", sdev->type);
+}
+
+static DEVICE_ATTR_RO(type);
+
+static ssize_t inbytes_show(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	struct siox_device *sdev = to_siox_device(dev);
+
+	return sprintf(buf, "%zu\n", sdev->inbytes);
+}
+
+static DEVICE_ATTR_RO(inbytes);
+
+static ssize_t outbytes_show(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	struct siox_device *sdev = to_siox_device(dev);
+
+	return sprintf(buf, "%zu\n", sdev->outbytes);
+}
+
+static DEVICE_ATTR_RO(outbytes);
+
+static ssize_t status_errors_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct siox_device *sdev = to_siox_device(dev);
+	unsigned int status_errors;
+
+	siox_master_lock(sdev->smaster);
+
+	status_errors = sdev->status_errors;
+
+	siox_master_unlock(sdev->smaster);
+
+	return sprintf(buf, "%u\n", status_errors);
+}
+
+static DEVICE_ATTR_RO(status_errors);
+
+static ssize_t connected_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct siox_device *sdev = to_siox_device(dev);
+	bool connected;
+
+	siox_master_lock(sdev->smaster);
+
+	connected = sdev->connected;
+
+	siox_master_unlock(sdev->smaster);
+
+	return sprintf(buf, "%u\n", connected);
+}
+
+static DEVICE_ATTR_RO(connected);
+
+static ssize_t watchdog_show(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	struct siox_device *sdev = to_siox_device(dev);
+	u8 status;
+
+	siox_master_lock(sdev->smaster);
+
+	status = sdev->status_read_clean;
+
+	siox_master_unlock(sdev->smaster);
+
+	return sprintf(buf, "%d\n", status & SIOX_STATUS_WDG);
+}
+
+static DEVICE_ATTR_RO(watchdog);
+
+static ssize_t watchdog_errors_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct siox_device *sdev = to_siox_device(dev);
+	unsigned int watchdog_errors;
+
+	siox_master_lock(sdev->smaster);
+
+	watchdog_errors = sdev->watchdog_errors;
+
+	siox_master_unlock(sdev->smaster);
+
+	return sprintf(buf, "%u\n", watchdog_errors);
+}
+
+static DEVICE_ATTR_RO(watchdog_errors);
+
+static struct attribute *siox_device_attrs[] = {
+	&dev_attr_type.attr,
+	&dev_attr_inbytes.attr,
+	&dev_attr_outbytes.attr,
+	&dev_attr_status_errors.attr,
+	&dev_attr_connected.attr,
+	&dev_attr_watchdog.attr,
+	&dev_attr_watchdog_errors.attr,
+	NULL
+};
+ATTRIBUTE_GROUPS(siox_device);
+
+static void siox_device_release(struct device *dev)
+{
+	struct siox_device *sdevice = to_siox_device(dev);
+
+	kfree(sdevice);
+}
+
+static struct device_type siox_device_type = {
+	.groups = siox_device_groups,
+	.release = siox_device_release,
+};
+
+static int siox_match(struct device *dev, struct device_driver *drv)
+{
+	if (dev->type != &siox_device_type)
+		return 0;
+
+	/* up to now there is only a single driver so keeping this simple */
+	return 1;
+}
+
+static struct bus_type siox_bus_type = {
+	.name = "siox",
+	.match = siox_match,
+};
+
+static int siox_driver_probe(struct device *dev)
+{
+	struct siox_driver *sdriver = to_siox_driver(dev->driver);
+	struct siox_device *sdevice = to_siox_device(dev);
+	int ret;
+
+	ret = sdriver->probe(sdevice);
+	return ret;
+}
+
+static int siox_driver_remove(struct device *dev)
+{
+	struct siox_driver *sdriver =
+		container_of(dev->driver, struct siox_driver, driver);
+	struct siox_device *sdevice = to_siox_device(dev);
+	int ret;
+
+	ret = sdriver->remove(sdevice);
+	return ret;
+}
+
+static void siox_driver_shutdown(struct device *dev)
+{
+	struct siox_driver *sdriver =
+		container_of(dev->driver, struct siox_driver, driver);
+	struct siox_device *sdevice = to_siox_device(dev);
+
+	sdriver->shutdown(sdevice);
+}
+
+static ssize_t active_show(struct device *dev,
+			   struct device_attribute *attr, char *buf)
+{
+	struct siox_master *smaster = to_siox_master(dev);
+
+	return sprintf(buf, "%d\n", smaster->active);
+}
+
+static ssize_t active_store(struct device *dev,
+			    struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	struct siox_master *smaster = to_siox_master(dev);
+	int ret;
+	int active;
+
+	ret = kstrtoint(buf, 0, &active);
+	if (ret < 0)
+		return ret;
+
+	if (active)
+		ret = siox_start(smaster);
+	else
+		ret = siox_stop(smaster);
+
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+static DEVICE_ATTR_RW(active);
+
+static struct siox_device *siox_device_add(struct siox_master *smaster,
+					   const char *type, size_t inbytes,
+					   size_t outbytes, u8 statustype);
+
+static ssize_t device_add_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct siox_master *smaster = to_siox_master(dev);
+	int ret;
+	char type[20] = "";
+	size_t inbytes = 0, outbytes = 0;
+	u8 statustype = 0;
+
+	ret = sscanf(buf, "%20s %zu %zu %hhu", type, &inbytes,
+		     &outbytes, &statustype);
+	if (ret != 3 && ret != 4)
+		return -EINVAL;
+
+	if (strcmp(type, "siox-12x8") || inbytes != 2 || outbytes != 4)
+		return -EINVAL;
+
+	siox_device_add(smaster, "siox-12x8", inbytes, outbytes, statustype);
+
+	return count;
+}
+
+static DEVICE_ATTR_WO(device_add);
+
+static void siox_device_remove(struct siox_master *smaster);
+
+static ssize_t device_remove_store(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t count)
+{
+	struct siox_master *smaster = to_siox_master(dev);
+
+	/* XXX? require to write <type> <inbytes> <outbytes> */
+	siox_device_remove(smaster);
+
+	return count;
+}
+
+static DEVICE_ATTR_WO(device_remove);
+
+static ssize_t poll_interval_ns_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct siox_master *smaster = to_siox_master(dev);
+
+	return sprintf(buf, "%lld\n", jiffies_to_nsecs(smaster->poll_interval));
+}
+
+static ssize_t poll_interval_ns_store(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t count)
+{
+	struct siox_master *smaster = to_siox_master(dev);
+	int ret;
+	u64 val;
+
+	ret = kstrtou64(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	siox_master_lock(smaster);
+
+	smaster->poll_interval = nsecs_to_jiffies(val);
+
+	siox_master_unlock(smaster);
+
+	return count;
+}
+
+static DEVICE_ATTR_RW(poll_interval_ns);
+
+static struct attribute *siox_master_attrs[] = {
+	&dev_attr_active.attr,
+	&dev_attr_device_add.attr,
+	&dev_attr_device_remove.attr,
+	&dev_attr_poll_interval_ns.attr,
+	NULL
+};
+ATTRIBUTE_GROUPS(siox_master);
+
+static void siox_master_release(struct device *dev)
+{
+	struct siox_master *smaster = to_siox_master(dev);
+
+	kfree(smaster);
+}
+
+static struct device_type siox_master_type = {
+	.groups = siox_master_groups,
+	.release = siox_master_release,
+};
+
+struct siox_master *siox_master_alloc(struct device *dev,
+				      size_t size)
+{
+	struct siox_master *smaster;
+
+	if (!dev)
+		return NULL;
+
+	smaster = kzalloc(sizeof(*smaster) + size, GFP_KERNEL);
+	if (!smaster)
+		return NULL;
+
+	device_initialize(&smaster->dev);
+
+	smaster->busno = -1;
+	smaster->dev.bus = &siox_bus_type;
+	smaster->dev.type = &siox_master_type;
+	smaster->dev.parent = dev;
+	smaster->poll_interval = DIV_ROUND_UP(HZ, 40);
+
+	dev_set_drvdata(&smaster->dev, &smaster[1]);
+
+	return smaster;
+}
+EXPORT_SYMBOL_GPL(siox_master_alloc);
+
+int siox_master_register(struct siox_master *smaster)
+{
+	int ret;
+
+	if (!siox_is_registered)
+		return -EPROBE_DEFER;
+
+	if (!smaster->pushpull)
+		return -EINVAL;
+
+	dev_set_name(&smaster->dev, "siox-%d", smaster->busno);
+
+	smaster->last_poll = jiffies;
+	smaster->poll_thread = kthread_create(siox_poll_thread, smaster,
+					      "siox-%d", smaster->busno);
+	if (IS_ERR(smaster->poll_thread)) {
+		smaster->active = 0;
+		return PTR_ERR(smaster->poll_thread);
+	}
+
+	mutex_init(&smaster->lock);
+	INIT_LIST_HEAD(&smaster->devices);
+
+	ret = device_add(&smaster->dev);
+	if (ret)
+		kthread_stop(smaster->poll_thread);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(siox_master_register);
+
+void siox_master_unregister(struct siox_master *smaster)
+{
+	/* remove device */
+	device_del(&smaster->dev);
+
+	siox_master_lock(smaster);
+
+	__siox_stop(smaster);
+
+	while (smaster->num_devices) {
+		struct siox_device *sdevice;
+
+		sdevice = container_of(smaster->devices.prev,
+				       struct siox_device, node);
+		list_del(&sdevice->node);
+		smaster->num_devices--;
+
+		siox_master_unlock(smaster);
+
+		device_unregister(&sdevice->dev);
+
+		siox_master_lock(smaster);
+	}
+
+	siox_master_unlock(smaster);
+
+	put_device(&smaster->dev);
+}
+EXPORT_SYMBOL_GPL(siox_master_unregister);
+
+static struct siox_device *siox_device_add(struct siox_master *smaster,
+					   const char *type, size_t inbytes,
+					   size_t outbytes, u8 statustype)
+{
+	struct siox_device *sdevice;
+	int ret;
+	size_t buf_len;
+
+	sdevice = kzalloc(sizeof(*sdevice), GFP_KERNEL);
+	if (!sdevice)
+		return ERR_PTR(-ENOMEM);
+
+	sdevice->type = type;
+	sdevice->inbytes = inbytes;
+	sdevice->outbytes = outbytes;
+	sdevice->statustype = statustype;
+
+	sdevice->smaster = smaster;
+	sdevice->dev.parent = &smaster->dev;
+	sdevice->dev.bus = &siox_bus_type;
+	sdevice->dev.type = &siox_device_type;
+
+	siox_master_lock(smaster);
+
+	dev_set_name(&sdevice->dev, "siox-%d-%d",
+		     smaster->busno, smaster->num_devices);
+
+	buf_len = smaster->setbuf_len + inbytes +
+		smaster->getbuf_len + outbytes;
+	if (smaster->buf_len < buf_len) {
+		u8 *buf = krealloc(smaster->buf, buf_len, GFP_KERNEL);
+
+		if (!buf) {
+			dev_err(&smaster->dev,
+				"failed to realloc buffer to %zu\n", buf_len);
+			ret = -ENOMEM;
+			goto err_buf_alloc;
+		}
+
+		smaster->buf_len = buf_len;
+		smaster->buf = buf;
+	}
+
+	ret = device_register(&sdevice->dev);
+	if (ret) {
+		dev_err(&smaster->dev, "failed to register device: %d\n", ret);
+
+		goto err_device_register;
+	}
+
+	smaster->num_devices++;
+	list_add_tail(&sdevice->node, &smaster->devices);
+
+	smaster->setbuf_len += sdevice->inbytes;
+	smaster->getbuf_len += sdevice->outbytes;
+
+	sdevice->status_errors_kn = sysfs_get_dirent(sdevice->dev.kobj.sd,
+						     "status_errors");
+	sdevice->watchdog_kn = sysfs_get_dirent(sdevice->dev.kobj.sd,
+						"watchdog");
+	sdevice->watchdog_errors_kn = sysfs_get_dirent(sdevice->dev.kobj.sd,
+						       "watchdog_errors");
+	sdevice->connected_kn = sysfs_get_dirent(sdevice->dev.kobj.sd,
+						 "connected");
+
+	siox_master_unlock(smaster);
+
+	return sdevice;
+
+err_device_register:
+	/* don't care to make the buffer smaller again */
+
+err_buf_alloc:
+	siox_master_unlock(smaster);
+
+	kfree(sdevice);
+
+	return ERR_PTR(ret);
+}
+
+static void siox_device_remove(struct siox_master *smaster)
+{
+	struct siox_device *sdevice;
+
+	siox_master_lock(smaster);
+
+	if (!smaster->num_devices) {
+		siox_master_unlock(smaster);
+		return;
+	}
+
+	sdevice = container_of(smaster->devices.prev, struct siox_device, node);
+	list_del(&sdevice->node);
+	smaster->num_devices--;
+
+	smaster->setbuf_len -= sdevice->inbytes;
+	smaster->getbuf_len -= sdevice->outbytes;
+
+	if (!smaster->num_devices)
+		__siox_stop(smaster);
+
+	siox_master_unlock(smaster);
+
+	/*
+	 * This must be done without holding the master lock because we're
+	 * called from device_remove_store which also holds a sysfs mutex.
+	 * device_unregister tries to aquire the same lock.
+	 */
+	device_unregister(&sdevice->dev);
+}
+
+int __siox_driver_register(struct siox_driver *sdriver, struct module *owner)
+{
+	int ret;
+
+	if (unlikely(!siox_is_registered))
+		return -EPROBE_DEFER;
+
+	if (!sdriver->set_data && !sdriver->get_data) {
+		pr_err("Driver %s doesn't provide needed callbacks\n",
+		       sdriver->driver.name);
+		return -EINVAL;
+	}
+
+	sdriver->driver.owner = owner;
+	sdriver->driver.bus = &siox_bus_type;
+
+	if (sdriver->probe)
+		sdriver->driver.probe = siox_driver_probe;
+	if (sdriver->remove)
+		sdriver->driver.remove = siox_driver_remove;
+	if (sdriver->shutdown)
+		sdriver->driver.shutdown = siox_driver_shutdown;
+
+	ret = driver_register(&sdriver->driver);
+	if (ret)
+		pr_err("Failed to register siox driver %s (%d)\n",
+		       sdriver->driver.name, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__siox_driver_register);
+
+static int __init siox_init(void)
+{
+	int ret;
+
+	ret = bus_register(&siox_bus_type);
+	if (ret) {
+		pr_err("Registration of SIOX bus type failed: %d\n", ret);
+		return ret;
+	}
+
+	siox_is_registered = true;
+
+	return 0;
+}
+subsys_initcall(siox_init);
+
+static void __exit siox_exit(void)
+{
+	bus_unregister(&siox_bus_type);
+}
+module_exit(siox_exit);
+
+MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
+MODULE_DESCRIPTION("Eckelmann SIOX driver core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/siox/siox.h b/drivers/siox/siox.h
new file mode 100644
index 0000000..c674bf6
--- /dev/null
+++ b/drivers/siox/siox.h
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2017 Pengutronix, Uwe Kleine-König <kernel@pengutronix.de>
+ */
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/siox.h>
+
+#define to_siox_master(_dev)	container_of((_dev), struct siox_master, dev)
+struct siox_master {
+	/* these fields should be initialized by the driver */
+	int busno;
+	int (*pushpull)(struct siox_master *smaster,
+			size_t setbuf_len, const u8 setbuf[],
+			size_t getbuf_len, u8 getbuf[]);
+
+	/* might be initialized by the driver, if 0 it is set to HZ / 40 */
+	unsigned long poll_interval; /* in jiffies */
+
+	/* framework private stuff */
+	struct mutex lock;
+	bool active;
+	struct module *owner;
+	struct device dev;
+	unsigned int num_devices;
+	struct list_head devices;
+
+	size_t setbuf_len, getbuf_len;
+	size_t buf_len;
+	u8 *buf;
+	u8 status;
+
+	unsigned long last_poll;
+	struct task_struct *poll_thread;
+};
+
+static inline void *siox_master_get_devdata(struct siox_master *smaster)
+{
+	return dev_get_drvdata(&smaster->dev);
+}
+
+struct siox_master *siox_master_alloc(struct device *dev, size_t size);
+static inline void siox_master_put(struct siox_master *smaster)
+{
+	put_device(&smaster->dev);
+}
+
+int siox_master_register(struct siox_master *smaster);
+void siox_master_unregister(struct siox_master *smaster);
diff --git a/drivers/slimbus/Kconfig b/drivers/slimbus/Kconfig
new file mode 100644
index 0000000..78bdd48
--- /dev/null
+++ b/drivers/slimbus/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# SLIMbus driver configuration
+#
+menuconfig SLIMBUS
+	tristate "SLIMbus support"
+	help
+	  SLIMbus is standard interface between System-on-Chip and audio codec,
+	  and other peripheral components in typical embedded systems.
+
+	  If unsure, choose N.
+
+if SLIMBUS
+
+# SLIMbus controllers
+config SLIM_QCOM_CTRL
+	tristate "Qualcomm SLIMbus Manager Component"
+	depends on SLIMBUS
+	help
+	  Select driver if Qualcomm's SLIMbus Manager Component is
+	  programmed using Linux kernel.
+
+endif
diff --git a/drivers/slimbus/Makefile b/drivers/slimbus/Makefile
new file mode 100644
index 0000000..a35a3da4
--- /dev/null
+++ b/drivers/slimbus/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for kernel SLIMbus framework.
+#
+obj-$(CONFIG_SLIMBUS)			+= slimbus.o
+slimbus-y				:= core.o messaging.o sched.o
+
+#Controllers
+obj-$(CONFIG_SLIM_QCOM_CTRL)		+= slim-qcom-ctrl.o
+slim-qcom-ctrl-y			:= qcom-ctrl.o
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
new file mode 100644
index 0000000..4988a8f
--- /dev/null
+++ b/drivers/slimbus/core.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/idr.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/slimbus.h>
+#include "slimbus.h"
+
+static DEFINE_IDA(ctrl_ida);
+
+static const struct slim_device_id *slim_match(const struct slim_device_id *id,
+					       const struct slim_device *sbdev)
+{
+	while (id->manf_id != 0 || id->prod_code != 0) {
+		if (id->manf_id == sbdev->e_addr.manf_id &&
+		    id->prod_code == sbdev->e_addr.prod_code)
+			return id;
+		id++;
+	}
+	return NULL;
+}
+
+static int slim_device_match(struct device *dev, struct device_driver *drv)
+{
+	struct slim_device *sbdev = to_slim_device(dev);
+	struct slim_driver *sbdrv = to_slim_driver(drv);
+
+	return !!slim_match(sbdrv->id_table, sbdev);
+}
+
+static int slim_device_probe(struct device *dev)
+{
+	struct slim_device	*sbdev = to_slim_device(dev);
+	struct slim_driver	*sbdrv = to_slim_driver(dev->driver);
+
+	return sbdrv->probe(sbdev);
+}
+
+static int slim_device_remove(struct device *dev)
+{
+	struct slim_device *sbdev = to_slim_device(dev);
+	struct slim_driver *sbdrv;
+
+	if (dev->driver) {
+		sbdrv = to_slim_driver(dev->driver);
+		if (sbdrv->remove)
+			sbdrv->remove(sbdev);
+	}
+
+	return 0;
+}
+
+struct bus_type slimbus_bus = {
+	.name		= "slimbus",
+	.match		= slim_device_match,
+	.probe		= slim_device_probe,
+	.remove		= slim_device_remove,
+};
+EXPORT_SYMBOL_GPL(slimbus_bus);
+
+/*
+ * __slim_driver_register() - Client driver registration with SLIMbus
+ *
+ * @drv:Client driver to be associated with client-device.
+ * @owner: owning module/driver
+ *
+ * This API will register the client driver with the SLIMbus
+ * It is called from the driver's module-init function.
+ */
+int __slim_driver_register(struct slim_driver *drv, struct module *owner)
+{
+	/* ID table and probe are mandatory */
+	if (!drv->id_table || !drv->probe)
+		return -EINVAL;
+
+	drv->driver.bus = &slimbus_bus;
+	drv->driver.owner = owner;
+
+	return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(__slim_driver_register);
+
+/*
+ * slim_driver_unregister() - Undo effect of slim_driver_register
+ *
+ * @drv: Client driver to be unregistered
+ */
+void slim_driver_unregister(struct slim_driver *drv)
+{
+	driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(slim_driver_unregister);
+
+static void slim_dev_release(struct device *dev)
+{
+	struct slim_device *sbdev = to_slim_device(dev);
+
+	kfree(sbdev);
+}
+
+static int slim_add_device(struct slim_controller *ctrl,
+			   struct slim_device *sbdev,
+			   struct device_node *node)
+{
+	sbdev->dev.bus = &slimbus_bus;
+	sbdev->dev.parent = ctrl->dev;
+	sbdev->dev.release = slim_dev_release;
+	sbdev->dev.driver = NULL;
+	sbdev->ctrl = ctrl;
+
+	if (node)
+		sbdev->dev.of_node = of_node_get(node);
+
+	dev_set_name(&sbdev->dev, "%x:%x:%x:%x",
+				  sbdev->e_addr.manf_id,
+				  sbdev->e_addr.prod_code,
+				  sbdev->e_addr.dev_index,
+				  sbdev->e_addr.instance);
+
+	return device_register(&sbdev->dev);
+}
+
+static struct slim_device *slim_alloc_device(struct slim_controller *ctrl,
+					     struct slim_eaddr *eaddr,
+					     struct device_node *node)
+{
+	struct slim_device *sbdev;
+	int ret;
+
+	sbdev = kzalloc(sizeof(*sbdev), GFP_KERNEL);
+	if (!sbdev)
+		return NULL;
+
+	sbdev->e_addr = *eaddr;
+	ret = slim_add_device(ctrl, sbdev, node);
+	if (ret) {
+		kfree(sbdev);
+		return NULL;
+	}
+
+	return sbdev;
+}
+
+static void of_register_slim_devices(struct slim_controller *ctrl)
+{
+	struct device *dev = ctrl->dev;
+	struct device_node *node;
+
+	if (!ctrl->dev->of_node)
+		return;
+
+	for_each_child_of_node(ctrl->dev->of_node, node) {
+		struct slim_device *sbdev;
+		struct slim_eaddr e_addr;
+		const char *compat = NULL;
+		int reg[2], ret;
+		int manf_id, prod_code;
+
+		compat = of_get_property(node, "compatible", NULL);
+		if (!compat)
+			continue;
+
+		ret = sscanf(compat, "slim%x,%x", &manf_id, &prod_code);
+		if (ret != 2) {
+			dev_err(dev, "Manf ID & Product code not found %s\n",
+				compat);
+			continue;
+		}
+
+		ret = of_property_read_u32_array(node, "reg", reg, 2);
+		if (ret) {
+			dev_err(dev, "Device and Instance id not found:%d\n",
+				ret);
+			continue;
+		}
+
+		e_addr.dev_index = reg[0];
+		e_addr.instance = reg[1];
+		e_addr.manf_id = manf_id;
+		e_addr.prod_code = prod_code;
+
+		sbdev = slim_alloc_device(ctrl, &e_addr, node);
+		if (!sbdev)
+			continue;
+	}
+}
+
+/*
+ * slim_register_controller() - Controller bring-up and registration.
+ *
+ * @ctrl: Controller to be registered.
+ *
+ * A controller is registered with the framework using this API.
+ * If devices on a controller were registered before controller,
+ * this will make sure that they get probed when controller is up
+ */
+int slim_register_controller(struct slim_controller *ctrl)
+{
+	int id;
+
+	id = ida_simple_get(&ctrl_ida, 0, 0, GFP_KERNEL);
+	if (id < 0)
+		return id;
+
+	ctrl->id = id;
+
+	if (!ctrl->min_cg)
+		ctrl->min_cg = SLIM_MIN_CLK_GEAR;
+	if (!ctrl->max_cg)
+		ctrl->max_cg = SLIM_MAX_CLK_GEAR;
+
+	ida_init(&ctrl->laddr_ida);
+	idr_init(&ctrl->tid_idr);
+	mutex_init(&ctrl->lock);
+	mutex_init(&ctrl->sched.m_reconf);
+	init_completion(&ctrl->sched.pause_comp);
+
+	dev_dbg(ctrl->dev, "Bus [%s] registered:dev:%p\n",
+		ctrl->name, ctrl->dev);
+
+	of_register_slim_devices(ctrl);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_register_controller);
+
+/* slim_remove_device: Remove the effect of slim_add_device() */
+static void slim_remove_device(struct slim_device *sbdev)
+{
+	device_unregister(&sbdev->dev);
+}
+
+static int slim_ctrl_remove_device(struct device *dev, void *null)
+{
+	slim_remove_device(to_slim_device(dev));
+	return 0;
+}
+
+/**
+ * slim_unregister_controller() - Controller tear-down.
+ *
+ * @ctrl: Controller to tear-down.
+ */
+int slim_unregister_controller(struct slim_controller *ctrl)
+{
+	/* Remove all clients */
+	device_for_each_child(ctrl->dev, NULL, slim_ctrl_remove_device);
+	/* Enter Clock Pause */
+	slim_ctrl_clk_pause(ctrl, false, 0);
+	ida_simple_remove(&ctrl_ida, ctrl->id);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_unregister_controller);
+
+static void slim_device_update_status(struct slim_device *sbdev,
+				      enum slim_device_status status)
+{
+	struct slim_driver *sbdrv;
+
+	if (sbdev->status == status)
+		return;
+
+	sbdev->status = status;
+	if (!sbdev->dev.driver)
+		return;
+
+	sbdrv = to_slim_driver(sbdev->dev.driver);
+	if (sbdrv->device_status)
+		sbdrv->device_status(sbdev, sbdev->status);
+}
+
+/**
+ * slim_report_absent() - Controller calls this function when a device
+ *	reports absent, OR when the device cannot be communicated with
+ *
+ * @sbdev: Device that cannot be reached, or sent report absent
+ */
+void slim_report_absent(struct slim_device *sbdev)
+{
+	struct slim_controller *ctrl = sbdev->ctrl;
+
+	if (!ctrl)
+		return;
+
+	/* invalidate logical addresses */
+	mutex_lock(&ctrl->lock);
+	sbdev->is_laddr_valid = false;
+	mutex_unlock(&ctrl->lock);
+
+	ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr);
+	slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_DOWN);
+}
+EXPORT_SYMBOL_GPL(slim_report_absent);
+
+static bool slim_eaddr_equal(struct slim_eaddr *a, struct slim_eaddr *b)
+{
+	return (a->manf_id == b->manf_id &&
+		a->prod_code == b->prod_code &&
+		a->dev_index == b->dev_index &&
+		a->instance == b->instance);
+}
+
+static int slim_match_dev(struct device *dev, void *data)
+{
+	struct slim_eaddr *e_addr = data;
+	struct slim_device *sbdev = to_slim_device(dev);
+
+	return slim_eaddr_equal(&sbdev->e_addr, e_addr);
+}
+
+static struct slim_device *find_slim_device(struct slim_controller *ctrl,
+					    struct slim_eaddr *eaddr)
+{
+	struct slim_device *sbdev;
+	struct device *dev;
+
+	dev = device_find_child(ctrl->dev, eaddr, slim_match_dev);
+	if (dev) {
+		sbdev = to_slim_device(dev);
+		return sbdev;
+	}
+
+	return NULL;
+}
+
+/**
+ * slim_get_device() - get handle to a device.
+ *
+ * @ctrl: Controller on which this device will be added/queried
+ * @e_addr: Enumeration address of the device to be queried
+ *
+ * Return: pointer to a device if it has already reported. Creates a new
+ * device and returns pointer to it if the device has not yet enumerated.
+ */
+struct slim_device *slim_get_device(struct slim_controller *ctrl,
+				    struct slim_eaddr *e_addr)
+{
+	struct slim_device *sbdev;
+
+	sbdev = find_slim_device(ctrl, e_addr);
+	if (!sbdev) {
+		sbdev = slim_alloc_device(ctrl, e_addr, NULL);
+		if (!sbdev)
+			return ERR_PTR(-ENOMEM);
+	}
+
+	return sbdev;
+}
+EXPORT_SYMBOL_GPL(slim_get_device);
+
+static int slim_device_alloc_laddr(struct slim_device *sbdev,
+				   bool report_present)
+{
+	struct slim_controller *ctrl = sbdev->ctrl;
+	u8 laddr;
+	int ret;
+
+	mutex_lock(&ctrl->lock);
+	if (ctrl->get_laddr) {
+		ret = ctrl->get_laddr(ctrl, &sbdev->e_addr, &laddr);
+		if (ret < 0)
+			goto err;
+	} else if (report_present) {
+		ret = ida_simple_get(&ctrl->laddr_ida,
+				     0, SLIM_LA_MANAGER - 1, GFP_KERNEL);
+		if (ret < 0)
+			goto err;
+
+		laddr = ret;
+	} else {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (ctrl->set_laddr) {
+		ret = ctrl->set_laddr(ctrl, &sbdev->e_addr, laddr);
+		if (ret) {
+			ret = -EINVAL;
+			goto err;
+		}
+	}
+
+	sbdev->laddr = laddr;
+	sbdev->is_laddr_valid = true;
+
+	slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_UP);
+
+	dev_dbg(ctrl->dev, "setting slimbus l-addr:%x, ea:%x,%x,%x,%x\n",
+		laddr, sbdev->e_addr.manf_id, sbdev->e_addr.prod_code,
+		sbdev->e_addr.dev_index, sbdev->e_addr.instance);
+
+err:
+	mutex_unlock(&ctrl->lock);
+	return ret;
+
+}
+
+/**
+ * slim_device_report_present() - Report enumerated device.
+ *
+ * @ctrl: Controller with which device is enumerated.
+ * @e_addr: Enumeration address of the device.
+ * @laddr: Return logical address (if valid flag is false)
+ *
+ * Called by controller in response to REPORT_PRESENT. Framework will assign
+ * a logical address to this enumeration address.
+ * Function returns -EXFULL to indicate that all logical addresses are already
+ * taken.
+ */
+int slim_device_report_present(struct slim_controller *ctrl,
+			       struct slim_eaddr *e_addr, u8 *laddr)
+{
+	struct slim_device *sbdev;
+	int ret;
+
+	ret = pm_runtime_get_sync(ctrl->dev);
+
+	if (ctrl->sched.clk_state != SLIM_CLK_ACTIVE) {
+		dev_err(ctrl->dev, "slim ctrl not active,state:%d, ret:%d\n",
+				    ctrl->sched.clk_state, ret);
+		goto slimbus_not_active;
+	}
+
+	sbdev = slim_get_device(ctrl, e_addr);
+	if (IS_ERR(sbdev))
+		return -ENODEV;
+
+	if (sbdev->is_laddr_valid) {
+		*laddr = sbdev->laddr;
+		return 0;
+	}
+
+	ret = slim_device_alloc_laddr(sbdev, true);
+
+slimbus_not_active:
+	pm_runtime_mark_last_busy(ctrl->dev);
+	pm_runtime_put_autosuspend(ctrl->dev);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_device_report_present);
+
+/**
+ * slim_get_logical_addr() - get/allocate logical address of a SLIMbus device.
+ *
+ * @sbdev: client handle requesting the address.
+ *
+ * Return: zero if a logical address is valid or a new logical address
+ * has been assigned. error code in case of error.
+ */
+int slim_get_logical_addr(struct slim_device *sbdev)
+{
+	if (!sbdev->is_laddr_valid)
+		return slim_device_alloc_laddr(sbdev, false);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_get_logical_addr);
+
+static void __exit slimbus_exit(void)
+{
+	bus_unregister(&slimbus_bus);
+}
+module_exit(slimbus_exit);
+
+static int __init slimbus_init(void)
+{
+	return bus_register(&slimbus_bus);
+}
+postcore_initcall(slimbus_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SLIMbus core");
diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
new file mode 100644
index 0000000..755462a
--- /dev/null
+++ b/drivers/slimbus/messaging.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ */
+
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include "slimbus.h"
+
+/**
+ * slim_msg_response() - Deliver Message response received from a device to the
+ *			framework.
+ *
+ * @ctrl: Controller handle
+ * @reply: Reply received from the device
+ * @len: Length of the reply
+ * @tid: Transaction ID received with which framework can associate reply.
+ *
+ * Called by controller to inform framework about the response received.
+ * This helps in making the API asynchronous, and controller-driver doesn't need
+ * to manage 1 more table other than the one managed by framework mapping TID
+ * with buffers
+ */
+void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 len)
+{
+	struct slim_msg_txn *txn;
+	struct slim_val_inf *msg;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrl->txn_lock, flags);
+	txn = idr_find(&ctrl->tid_idr, tid);
+	if (txn == NULL) {
+		spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+		return;
+	}
+
+	msg = txn->msg;
+	if (msg == NULL || msg->rbuf == NULL) {
+		dev_err(ctrl->dev, "Got response to invalid TID:%d, len:%d\n",
+				tid, len);
+		return;
+	}
+
+	idr_remove(&ctrl->tid_idr, tid);
+	spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+
+	memcpy(msg->rbuf, reply, len);
+	if (txn->comp)
+		complete(txn->comp);
+
+	/* Remove runtime-pm vote now that response was received for TID txn */
+	pm_runtime_mark_last_busy(ctrl->dev);
+	pm_runtime_put_autosuspend(ctrl->dev);
+}
+EXPORT_SYMBOL_GPL(slim_msg_response);
+
+/**
+ * slim_do_transfer() - Process a SLIMbus-messaging transaction
+ *
+ * @ctrl: Controller handle
+ * @txn: Transaction to be sent over SLIMbus
+ *
+ * Called by controller to transmit messaging transactions not dealing with
+ * Interface/Value elements. (e.g. transmittting a message to assign logical
+ * address to a slave device
+ *
+ * Return: -ETIMEDOUT: If transmission of this message timed out
+ *	(e.g. due to bus lines not being clocked or driven by controller)
+ */
+int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
+{
+	DECLARE_COMPLETION_ONSTACK(done);
+	bool need_tid = false, clk_pause_msg = false;
+	unsigned long flags;
+	int ret, tid, timeout;
+
+	/*
+	 * do not vote for runtime-PM if the transactions are part of clock
+	 * pause sequence
+	 */
+	if (ctrl->sched.clk_state == SLIM_CLK_ENTERING_PAUSE &&
+		(txn->mt == SLIM_MSG_MT_CORE &&
+		 txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
+		 txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW))
+		clk_pause_msg = true;
+
+	if (!clk_pause_msg) {
+		ret = pm_runtime_get_sync(ctrl->dev);
+		if (ctrl->sched.clk_state != SLIM_CLK_ACTIVE) {
+			dev_err(ctrl->dev, "ctrl wrong state:%d, ret:%d\n",
+				ctrl->sched.clk_state, ret);
+			goto slim_xfer_err;
+		}
+	}
+
+	need_tid = slim_tid_txn(txn->mt, txn->mc);
+
+	if (need_tid) {
+		spin_lock_irqsave(&ctrl->txn_lock, flags);
+		tid = idr_alloc(&ctrl->tid_idr, txn, 0,
+				SLIM_MAX_TIDS, GFP_KERNEL);
+		txn->tid = tid;
+
+		if (!txn->msg->comp)
+			txn->comp = &done;
+		else
+			txn->comp = txn->comp;
+
+		spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+
+		if (tid < 0)
+			return tid;
+	}
+
+	ret = ctrl->xfer_msg(ctrl, txn);
+
+	if (ret && need_tid && !txn->msg->comp) {
+		unsigned long ms = txn->rl + HZ;
+
+		timeout = wait_for_completion_timeout(txn->comp,
+						      msecs_to_jiffies(ms));
+		if (!timeout) {
+			ret = -ETIMEDOUT;
+			spin_lock_irqsave(&ctrl->txn_lock, flags);
+			idr_remove(&ctrl->tid_idr, tid);
+			spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+		}
+	}
+
+	if (ret)
+		dev_err(ctrl->dev, "Tx:MT:0x%x, MC:0x%x, LA:0x%x failed:%d\n",
+			txn->mt, txn->mc, txn->la, ret);
+
+slim_xfer_err:
+	if (!clk_pause_msg && (!need_tid  || ret == -ETIMEDOUT)) {
+		/*
+		 * remove runtime-pm vote if this was TX only, or
+		 * if there was error during this transaction
+		 */
+		pm_runtime_mark_last_busy(ctrl->dev);
+		pm_runtime_mark_last_busy(ctrl->dev);
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_do_transfer);
+
+static int slim_val_inf_sanity(struct slim_controller *ctrl,
+			       struct slim_val_inf *msg, u8 mc)
+{
+	if (!msg || msg->num_bytes > 16 ||
+	    (msg->start_offset + msg->num_bytes) > 0xC00)
+		goto reterr;
+	switch (mc) {
+	case SLIM_MSG_MC_REQUEST_VALUE:
+	case SLIM_MSG_MC_REQUEST_INFORMATION:
+		if (msg->rbuf != NULL)
+			return 0;
+		break;
+
+	case SLIM_MSG_MC_CHANGE_VALUE:
+	case SLIM_MSG_MC_CLEAR_INFORMATION:
+		if (msg->wbuf != NULL)
+			return 0;
+		break;
+
+	case SLIM_MSG_MC_REQUEST_CHANGE_VALUE:
+	case SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION:
+		if (msg->rbuf != NULL && msg->wbuf != NULL)
+			return 0;
+		break;
+	}
+reterr:
+	dev_err(ctrl->dev, "Sanity check failed:msg:offset:0x%x, mc:%d\n",
+		msg->start_offset, mc);
+	return -EINVAL;
+}
+
+static u16 slim_slicesize(int code)
+{
+	static const u8 sizetocode[16] = {
+		0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7
+	};
+
+	clamp(code, 1, (int)ARRAY_SIZE(sizetocode));
+
+	return sizetocode[code - 1];
+}
+
+/**
+ * slim_xfer_msg() - Transfer a value info message on slim device
+ *
+ * @sbdev: slim device to which this msg has to be transfered
+ * @msg: value info message pointer
+ * @mc: message code of the message
+ *
+ * Called by drivers which want to transfer a vlaue or info elements.
+ *
+ * Return: -ETIMEDOUT: If transmission of this message timed out
+ */
+int slim_xfer_msg(struct slim_device *sbdev, struct slim_val_inf *msg,
+		  u8 mc)
+{
+	DEFINE_SLIM_LDEST_TXN(txn_stack, mc, 6, sbdev->laddr, msg);
+	struct slim_msg_txn *txn = &txn_stack;
+	struct slim_controller *ctrl = sbdev->ctrl;
+	int ret;
+	u16 sl;
+
+	if (!ctrl)
+		return -EINVAL;
+
+	ret = slim_val_inf_sanity(ctrl, msg, mc);
+	if (ret)
+		return ret;
+
+	sl = slim_slicesize(msg->num_bytes);
+
+	dev_dbg(ctrl->dev, "SB xfer msg:os:%x, len:%d, MC:%x, sl:%x\n",
+		msg->start_offset, msg->num_bytes, mc, sl);
+
+	txn->ec = ((sl | (1 << 3)) | ((msg->start_offset & 0xFFF) << 4));
+
+	switch (mc) {
+	case SLIM_MSG_MC_REQUEST_CHANGE_VALUE:
+	case SLIM_MSG_MC_CHANGE_VALUE:
+	case SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION:
+	case SLIM_MSG_MC_CLEAR_INFORMATION:
+		txn->rl += msg->num_bytes;
+	default:
+		break;
+	}
+
+	if (slim_tid_txn(txn->mt, txn->mc))
+		txn->rl++;
+
+	return slim_do_transfer(ctrl, txn);
+}
+EXPORT_SYMBOL_GPL(slim_xfer_msg);
+
+static void slim_fill_msg(struct slim_val_inf *msg, u32 addr,
+			 size_t count, u8 *rbuf, u8 *wbuf)
+{
+	msg->start_offset = addr;
+	msg->num_bytes = count;
+	msg->rbuf = rbuf;
+	msg->wbuf = wbuf;
+}
+
+/**
+ * slim_read() - Read SLIMbus value element
+ *
+ * @sdev: client handle.
+ * @addr:  address of value element to read.
+ * @count: number of bytes to read. Maximum bytes allowed are 16.
+ * @val: will return what the value element value was
+ *
+ * Return: -EINVAL for Invalid parameters, -ETIMEDOUT If transmission of
+ * this message timed out (e.g. due to bus lines not being clocked
+ * or driven by controller)
+ */
+int slim_read(struct slim_device *sdev, u32 addr, size_t count, u8 *val)
+{
+	struct slim_val_inf msg;
+
+	slim_fill_msg(&msg, addr, count, val, NULL);
+
+	return slim_xfer_msg(sdev, &msg, SLIM_MSG_MC_REQUEST_VALUE);
+}
+EXPORT_SYMBOL_GPL(slim_read);
+
+/**
+ * slim_readb() - Read byte from SLIMbus value element
+ *
+ * @sdev: client handle.
+ * @addr:  address in the value element to read.
+ *
+ * Return: byte value of value element.
+ */
+int slim_readb(struct slim_device *sdev, u32 addr)
+{
+	int ret;
+	u8 buf;
+
+	ret = slim_read(sdev, addr, 1, &buf);
+	if (ret < 0)
+		return ret;
+	else
+		return buf;
+}
+EXPORT_SYMBOL_GPL(slim_readb);
+
+/**
+ * slim_write() - Write SLIMbus value element
+ *
+ * @sdev: client handle.
+ * @addr:  address in the value element to write.
+ * @count: number of bytes to write. Maximum bytes allowed are 16.
+ * @val: value to write to value element
+ *
+ * Return: -EINVAL for Invalid parameters, -ETIMEDOUT If transmission of
+ * this message timed out (e.g. due to bus lines not being clocked
+ * or driven by controller)
+ */
+int slim_write(struct slim_device *sdev, u32 addr, size_t count, u8 *val)
+{
+	struct slim_val_inf msg;
+
+	slim_fill_msg(&msg, addr, count,  val, NULL);
+
+	return slim_xfer_msg(sdev, &msg, SLIM_MSG_MC_CHANGE_VALUE);
+}
+EXPORT_SYMBOL_GPL(slim_write);
+
+/**
+ * slim_writeb() - Write byte to SLIMbus value element
+ *
+ * @sdev: client handle.
+ * @addr:  address of value element to write.
+ * @value: value to write to value element
+ *
+ * Return: -EINVAL for Invalid parameters, -ETIMEDOUT If transmission of
+ * this message timed out (e.g. due to bus lines not being clocked
+ * or driven by controller)
+ *
+ */
+int slim_writeb(struct slim_device *sdev, u32 addr, u8 value)
+{
+	return slim_write(sdev, addr, 1, &value);
+}
+EXPORT_SYMBOL_GPL(slim_writeb);
diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c
new file mode 100644
index 0000000..f51de12
--- /dev/null
+++ b/drivers/slimbus/qcom-ctrl.c
@@ -0,0 +1,750 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ */
+
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include "slimbus.h"
+
+/* Manager registers */
+#define	MGR_CFG		0x200
+#define	MGR_STATUS	0x204
+#define	MGR_INT_EN	0x210
+#define	MGR_INT_STAT	0x214
+#define	MGR_INT_CLR	0x218
+#define	MGR_TX_MSG	0x230
+#define	MGR_RX_MSG	0x270
+#define	MGR_IE_STAT	0x2F0
+#define	MGR_VE_STAT	0x300
+#define	MGR_CFG_ENABLE	1
+
+/* Framer registers */
+#define	FRM_CFG		0x400
+#define	FRM_STAT	0x404
+#define	FRM_INT_EN	0x410
+#define	FRM_INT_STAT	0x414
+#define	FRM_INT_CLR	0x418
+#define	FRM_WAKEUP	0x41C
+#define	FRM_CLKCTL_DONE	0x420
+#define	FRM_IE_STAT	0x430
+#define	FRM_VE_STAT	0x440
+
+/* Interface registers */
+#define	INTF_CFG	0x600
+#define	INTF_STAT	0x604
+#define	INTF_INT_EN	0x610
+#define	INTF_INT_STAT	0x614
+#define	INTF_INT_CLR	0x618
+#define	INTF_IE_STAT	0x630
+#define	INTF_VE_STAT	0x640
+
+/* Interrupt status bits */
+#define	MGR_INT_TX_NACKED_2	BIT(25)
+#define	MGR_INT_MSG_BUF_CONTE	BIT(26)
+#define	MGR_INT_RX_MSG_RCVD	BIT(30)
+#define	MGR_INT_TX_MSG_SENT	BIT(31)
+
+/* Framer config register settings */
+#define	FRM_ACTIVE	1
+#define	CLK_GEAR	7
+#define	ROOT_FREQ	11
+#define	REF_CLK_GEAR	15
+#define	INTR_WAKE	19
+
+#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
+		((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
+
+#define SLIM_ROOT_FREQ 24576000
+#define QCOM_SLIM_AUTOSUSPEND 1000
+
+/* MAX message size over control channel */
+#define SLIM_MSGQ_BUF_LEN	40
+#define QCOM_TX_MSGS 2
+#define QCOM_RX_MSGS	8
+#define QCOM_BUF_ALLOC_RETRIES	10
+
+#define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r))
+
+/* V2 Component registers */
+#define CFG_PORT_V2(r) ((r ## _V2))
+#define	COMP_CFG_V2		4
+#define	COMP_TRUST_CFG_V2	0x3000
+
+/* V1 Component registers */
+#define CFG_PORT_V1(r) ((r ## _V1))
+#define	COMP_CFG_V1		0
+#define	COMP_TRUST_CFG_V1	0x14
+
+/* Resource group info for manager, and non-ported generic device-components */
+#define EE_MGR_RSC_GRP	(1 << 10)
+#define EE_NGD_2	(2 << 6)
+#define EE_NGD_1	0
+
+struct slim_ctrl_buf {
+	void		*base;
+	dma_addr_t	phy;
+	spinlock_t	lock;
+	int		head;
+	int		tail;
+	int		sl_sz;
+	int		n;
+};
+
+struct qcom_slim_ctrl {
+	struct slim_controller  ctrl;
+	struct slim_framer	framer;
+	struct device		*dev;
+	void __iomem		*base;
+	void __iomem		*slew_reg;
+
+	struct slim_ctrl_buf	rx;
+	struct slim_ctrl_buf	tx;
+
+	struct completion	**wr_comp;
+	int			irq;
+	struct workqueue_struct *rxwq;
+	struct work_struct	wd;
+	struct clk		*rclk;
+	struct clk		*hclk;
+};
+
+static void qcom_slim_queue_tx(struct qcom_slim_ctrl *ctrl, void *buf,
+			       u8 len, u32 tx_reg)
+{
+	int count = (len + 3) >> 2;
+
+	__iowrite32_copy(ctrl->base + tx_reg, buf, count);
+
+	/* Ensure Oder of subsequent writes */
+	mb();
+}
+
+static void *slim_alloc_rxbuf(struct qcom_slim_ctrl *ctrl)
+{
+	unsigned long flags;
+	int idx;
+
+	spin_lock_irqsave(&ctrl->rx.lock, flags);
+	if ((ctrl->rx.tail + 1) % ctrl->rx.n == ctrl->rx.head) {
+		spin_unlock_irqrestore(&ctrl->rx.lock, flags);
+		dev_err(ctrl->dev, "RX QUEUE full!");
+		return NULL;
+	}
+	idx = ctrl->rx.tail;
+	ctrl->rx.tail = (ctrl->rx.tail + 1) % ctrl->rx.n;
+	spin_unlock_irqrestore(&ctrl->rx.lock, flags);
+
+	return ctrl->rx.base + (idx * ctrl->rx.sl_sz);
+}
+
+void slim_ack_txn(struct qcom_slim_ctrl *ctrl, int err)
+{
+	struct completion *comp;
+	unsigned long flags;
+	int idx;
+
+	spin_lock_irqsave(&ctrl->tx.lock, flags);
+	idx = ctrl->tx.head;
+	ctrl->tx.head = (ctrl->tx.head + 1) % ctrl->tx.n;
+	spin_unlock_irqrestore(&ctrl->tx.lock, flags);
+
+	comp = ctrl->wr_comp[idx];
+	ctrl->wr_comp[idx] = NULL;
+
+	complete(comp);
+}
+
+static irqreturn_t qcom_slim_handle_tx_irq(struct qcom_slim_ctrl *ctrl,
+					   u32 stat)
+{
+	int err = 0;
+
+	if (stat & MGR_INT_TX_MSG_SENT)
+		writel_relaxed(MGR_INT_TX_MSG_SENT,
+			       ctrl->base + MGR_INT_CLR);
+
+	if (stat & MGR_INT_TX_NACKED_2) {
+		u32 mgr_stat = readl_relaxed(ctrl->base + MGR_STATUS);
+		u32 mgr_ie_stat = readl_relaxed(ctrl->base + MGR_IE_STAT);
+		u32 frm_stat = readl_relaxed(ctrl->base + FRM_STAT);
+		u32 frm_cfg = readl_relaxed(ctrl->base + FRM_CFG);
+		u32 frm_intr_stat = readl_relaxed(ctrl->base + FRM_INT_STAT);
+		u32 frm_ie_stat = readl_relaxed(ctrl->base + FRM_IE_STAT);
+		u32 intf_stat = readl_relaxed(ctrl->base + INTF_STAT);
+		u32 intf_intr_stat = readl_relaxed(ctrl->base + INTF_INT_STAT);
+		u32 intf_ie_stat = readl_relaxed(ctrl->base + INTF_IE_STAT);
+
+		writel_relaxed(MGR_INT_TX_NACKED_2, ctrl->base + MGR_INT_CLR);
+
+		dev_err(ctrl->dev, "TX Nack MGR:int:0x%x, stat:0x%x\n",
+			stat, mgr_stat);
+		dev_err(ctrl->dev, "TX Nack MGR:ie:0x%x\n", mgr_ie_stat);
+		dev_err(ctrl->dev, "TX Nack FRM:int:0x%x, stat:0x%x\n",
+			frm_intr_stat, frm_stat);
+		dev_err(ctrl->dev, "TX Nack FRM:cfg:0x%x, ie:0x%x\n",
+			frm_cfg, frm_ie_stat);
+		dev_err(ctrl->dev, "TX Nack INTF:intr:0x%x, stat:0x%x\n",
+			intf_intr_stat, intf_stat);
+		dev_err(ctrl->dev, "TX Nack INTF:ie:0x%x\n",
+			intf_ie_stat);
+		err = -ENOTCONN;
+	}
+
+	slim_ack_txn(ctrl, err);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t qcom_slim_handle_rx_irq(struct qcom_slim_ctrl *ctrl,
+					   u32 stat)
+{
+	u32 *rx_buf, pkt[10];
+	bool q_rx = false;
+	u8 mc, mt, len;
+
+	pkt[0] = readl_relaxed(ctrl->base + MGR_RX_MSG);
+	mt = SLIM_HEADER_GET_MT(pkt[0]);
+	len = SLIM_HEADER_GET_RL(pkt[0]);
+	mc = SLIM_HEADER_GET_MC(pkt[0]>>8);
+
+	/*
+	 * this message cannot be handled by ISR, so
+	 * let work-queue handle it
+	 */
+	if (mt == SLIM_MSG_MT_CORE && mc == SLIM_MSG_MC_REPORT_PRESENT) {
+		rx_buf = (u32 *)slim_alloc_rxbuf(ctrl);
+		if (!rx_buf) {
+			dev_err(ctrl->dev, "dropping RX:0x%x due to RX full\n",
+					pkt[0]);
+			goto rx_ret_irq;
+		}
+		rx_buf[0] = pkt[0];
+
+	} else {
+		rx_buf = pkt;
+	}
+
+	__ioread32_copy(rx_buf + 1, ctrl->base + MGR_RX_MSG + 4,
+			DIV_ROUND_UP(len, 4));
+
+	switch (mc) {
+
+	case SLIM_MSG_MC_REPORT_PRESENT:
+		q_rx = true;
+		break;
+	case SLIM_MSG_MC_REPLY_INFORMATION:
+	case SLIM_MSG_MC_REPLY_VALUE:
+		slim_msg_response(&ctrl->ctrl, (u8 *)(rx_buf + 1),
+				  (u8)(*rx_buf >> 24), (len - 4));
+		break;
+	default:
+		dev_err(ctrl->dev, "unsupported MC,%x MT:%x\n",
+			mc, mt);
+		break;
+	}
+rx_ret_irq:
+	writel(MGR_INT_RX_MSG_RCVD, ctrl->base +
+		       MGR_INT_CLR);
+	if (q_rx)
+		queue_work(ctrl->rxwq, &ctrl->wd);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t qcom_slim_interrupt(int irq, void *d)
+{
+	struct qcom_slim_ctrl *ctrl = d;
+	u32 stat = readl_relaxed(ctrl->base + MGR_INT_STAT);
+	int ret = IRQ_NONE;
+
+	if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2)
+		ret = qcom_slim_handle_tx_irq(ctrl, stat);
+
+	if (stat & MGR_INT_RX_MSG_RCVD)
+		ret = qcom_slim_handle_rx_irq(ctrl, stat);
+
+	return ret;
+}
+
+static int qcom_clk_pause_wakeup(struct slim_controller *sctrl)
+{
+	struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
+
+	clk_prepare_enable(ctrl->hclk);
+	clk_prepare_enable(ctrl->rclk);
+	enable_irq(ctrl->irq);
+
+	writel_relaxed(1, ctrl->base + FRM_WAKEUP);
+	/* Make sure framer wakeup write goes through before ISR fires */
+	mb();
+	/*
+	 * HW Workaround: Currently, slave is reporting lost-sync messages
+	 * after SLIMbus comes out of clock pause.
+	 * Transaction with slave fail before slave reports that message
+	 * Give some time for that report to come
+	 * SLIMbus wakes up in clock gear 10 at 24.576MHz. With each superframe
+	 * being 250 usecs, we wait for 5-10 superframes here to ensure
+	 * we get the message
+	 */
+	usleep_range(1250, 2500);
+	return 0;
+}
+
+void *slim_alloc_txbuf(struct qcom_slim_ctrl *ctrl, struct slim_msg_txn *txn,
+		       struct completion *done)
+{
+	unsigned long flags;
+	int idx;
+
+	spin_lock_irqsave(&ctrl->tx.lock, flags);
+	if (((ctrl->tx.head + 1) % ctrl->tx.n) == ctrl->tx.tail) {
+		spin_unlock_irqrestore(&ctrl->tx.lock, flags);
+		dev_err(ctrl->dev, "controller TX buf unavailable");
+		return NULL;
+	}
+	idx = ctrl->tx.tail;
+	ctrl->wr_comp[idx] = done;
+	ctrl->tx.tail = (ctrl->tx.tail + 1) % ctrl->tx.n;
+
+	spin_unlock_irqrestore(&ctrl->tx.lock, flags);
+
+	return ctrl->tx.base + (idx * ctrl->tx.sl_sz);
+}
+
+
+static int qcom_xfer_msg(struct slim_controller *sctrl,
+			 struct slim_msg_txn *txn)
+{
+	struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
+	DECLARE_COMPLETION_ONSTACK(done);
+	void *pbuf = slim_alloc_txbuf(ctrl, txn, &done);
+	unsigned long ms = txn->rl + HZ;
+	u8 *puc;
+	int ret = 0, timeout, retries = QCOM_BUF_ALLOC_RETRIES;
+	u8 la = txn->la;
+	u32 *head;
+	/* HW expects length field to be excluded */
+	txn->rl--;
+
+	/* spin till buffer is made available */
+	if (!pbuf) {
+		while (retries--) {
+			usleep_range(10000, 15000);
+			pbuf = slim_alloc_txbuf(ctrl, txn, &done);
+			if (pbuf)
+				break;
+		}
+	}
+
+	if (!retries && !pbuf)
+		return -ENOMEM;
+
+	puc = (u8 *)pbuf;
+	head = (u32 *)pbuf;
+
+	if (txn->dt == SLIM_MSG_DEST_LOGICALADDR) {
+		*head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt,
+						txn->mc, 0, la);
+		puc += 3;
+	} else {
+		*head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt,
+						txn->mc, 1, la);
+		puc += 2;
+	}
+
+	if (slim_tid_txn(txn->mt, txn->mc))
+		*(puc++) = txn->tid;
+
+	if (slim_ec_txn(txn->mt, txn->mc)) {
+		*(puc++) = (txn->ec & 0xFF);
+		*(puc++) = (txn->ec >> 8) & 0xFF;
+	}
+
+	if (txn->msg && txn->msg->wbuf)
+		memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes);
+
+	qcom_slim_queue_tx(ctrl, head, txn->rl, MGR_TX_MSG);
+	timeout = wait_for_completion_timeout(&done, msecs_to_jiffies(ms));
+
+	if (!timeout) {
+		dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
+					txn->mt);
+		ret = -ETIMEDOUT;
+	}
+
+	return ret;
+
+}
+
+static int qcom_set_laddr(struct slim_controller *sctrl,
+				struct slim_eaddr *ead, u8 laddr)
+{
+	struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
+	struct {
+		__be16 manf_id;
+		__be16 prod_code;
+		u8 dev_index;
+		u8 instance;
+		u8 laddr;
+	} __packed p;
+	struct slim_val_inf msg = {0};
+	DEFINE_SLIM_EDEST_TXN(txn, SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
+			      10, laddr, &msg);
+	int ret;
+
+	p.manf_id = cpu_to_be16(ead->manf_id);
+	p.prod_code = cpu_to_be16(ead->prod_code);
+	p.dev_index = ead->dev_index;
+	p.instance = ead->instance;
+	p.laddr = laddr;
+
+	msg.wbuf = (void *)&p;
+	msg.num_bytes = 7;
+	ret = slim_do_transfer(&ctrl->ctrl, &txn);
+
+	if (ret)
+		dev_err(ctrl->dev, "set LA:0x%x failed:ret:%d\n",
+				  laddr, ret);
+	return ret;
+}
+
+static int slim_get_current_rxbuf(struct qcom_slim_ctrl *ctrl, void *buf)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrl->rx.lock, flags);
+	if (ctrl->rx.tail == ctrl->rx.head) {
+		spin_unlock_irqrestore(&ctrl->rx.lock, flags);
+		return -ENODATA;
+	}
+	memcpy(buf, ctrl->rx.base + (ctrl->rx.head * ctrl->rx.sl_sz),
+				ctrl->rx.sl_sz);
+
+	ctrl->rx.head = (ctrl->rx.head + 1) % ctrl->rx.n;
+	spin_unlock_irqrestore(&ctrl->rx.lock, flags);
+
+	return 0;
+}
+
+static void qcom_slim_rxwq(struct work_struct *work)
+{
+	u8 buf[SLIM_MSGQ_BUF_LEN];
+	u8 mc, mt, len;
+	int ret;
+	struct qcom_slim_ctrl *ctrl = container_of(work, struct qcom_slim_ctrl,
+						 wd);
+
+	while ((slim_get_current_rxbuf(ctrl, buf)) != -ENODATA) {
+		len = SLIM_HEADER_GET_RL(buf[0]);
+		mt = SLIM_HEADER_GET_MT(buf[0]);
+		mc = SLIM_HEADER_GET_MC(buf[1]);
+		if (mt == SLIM_MSG_MT_CORE &&
+			mc == SLIM_MSG_MC_REPORT_PRESENT) {
+			struct slim_eaddr ea;
+			u8 laddr;
+
+			ea.manf_id = be16_to_cpup((__be16 *)&buf[2]);
+			ea.prod_code = be16_to_cpup((__be16 *)&buf[4]);
+			ea.dev_index = buf[6];
+			ea.instance = buf[7];
+
+			ret = slim_device_report_present(&ctrl->ctrl, &ea,
+							 &laddr);
+			if (ret < 0)
+				dev_err(ctrl->dev, "assign laddr failed:%d\n",
+					ret);
+		} else {
+			dev_err(ctrl->dev, "unexpected message:mc:%x, mt:%x\n",
+				mc, mt);
+		}
+	}
+}
+
+static void qcom_slim_prg_slew(struct platform_device *pdev,
+				struct qcom_slim_ctrl *ctrl)
+{
+	struct resource	*slew_mem;
+
+	if (!ctrl->slew_reg) {
+		/* SLEW RATE register for this SLIMbus */
+		slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+				"slew");
+		ctrl->slew_reg = devm_ioremap(&pdev->dev, slew_mem->start,
+				resource_size(slew_mem));
+		if (!ctrl->slew_reg)
+			return;
+	}
+
+	writel_relaxed(1, ctrl->slew_reg);
+	/* Make sure SLIMbus-slew rate enabling goes through */
+	wmb();
+}
+
+static int qcom_slim_probe(struct platform_device *pdev)
+{
+	struct qcom_slim_ctrl *ctrl;
+	struct slim_controller *sctrl;
+	struct resource *slim_mem;
+	int ret, ver;
+
+	ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl)
+		return -ENOMEM;
+
+	ctrl->hclk = devm_clk_get(&pdev->dev, "iface");
+	if (IS_ERR(ctrl->hclk))
+		return PTR_ERR(ctrl->hclk);
+
+	ctrl->rclk = devm_clk_get(&pdev->dev, "core");
+	if (IS_ERR(ctrl->rclk))
+		return PTR_ERR(ctrl->rclk);
+
+	ret = clk_set_rate(ctrl->rclk, SLIM_ROOT_FREQ);
+	if (ret) {
+		dev_err(&pdev->dev, "ref-clock set-rate failed:%d\n", ret);
+		return ret;
+	}
+
+	ctrl->irq = platform_get_irq(pdev, 0);
+	if (!ctrl->irq) {
+		dev_err(&pdev->dev, "no slimbus IRQ\n");
+		return -ENODEV;
+	}
+
+	sctrl = &ctrl->ctrl;
+	sctrl->dev = &pdev->dev;
+	ctrl->dev = &pdev->dev;
+	platform_set_drvdata(pdev, ctrl);
+	dev_set_drvdata(ctrl->dev, ctrl);
+
+	slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
+	ctrl->base = devm_ioremap_resource(ctrl->dev, slim_mem);
+	if (!ctrl->base) {
+		dev_err(&pdev->dev, "IOremap failed\n");
+		return -ENOMEM;
+	}
+
+	sctrl->set_laddr = qcom_set_laddr;
+	sctrl->xfer_msg = qcom_xfer_msg;
+	sctrl->wakeup =  qcom_clk_pause_wakeup;
+	ctrl->tx.n = QCOM_TX_MSGS;
+	ctrl->tx.sl_sz = SLIM_MSGQ_BUF_LEN;
+	ctrl->rx.n = QCOM_RX_MSGS;
+	ctrl->rx.sl_sz = SLIM_MSGQ_BUF_LEN;
+	ctrl->wr_comp = kzalloc(sizeof(struct completion *) * QCOM_TX_MSGS,
+				GFP_KERNEL);
+	if (!ctrl->wr_comp)
+		return -ENOMEM;
+
+	spin_lock_init(&ctrl->rx.lock);
+	spin_lock_init(&ctrl->tx.lock);
+	INIT_WORK(&ctrl->wd, qcom_slim_rxwq);
+	ctrl->rxwq = create_singlethread_workqueue("qcom_slim_rx");
+	if (!ctrl->rxwq) {
+		dev_err(ctrl->dev, "Failed to start Rx WQ\n");
+		return -ENOMEM;
+	}
+
+	ctrl->framer.rootfreq = SLIM_ROOT_FREQ / 8;
+	ctrl->framer.superfreq =
+		ctrl->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
+	sctrl->a_framer = &ctrl->framer;
+	sctrl->clkgear = SLIM_MAX_CLK_GEAR;
+
+	qcom_slim_prg_slew(pdev, ctrl);
+
+	ret = devm_request_irq(&pdev->dev, ctrl->irq, qcom_slim_interrupt,
+				IRQF_TRIGGER_HIGH, "qcom_slim_irq", ctrl);
+	if (ret) {
+		dev_err(&pdev->dev, "request IRQ failed\n");
+		goto err_request_irq_failed;
+	}
+
+	ret = clk_prepare_enable(ctrl->hclk);
+	if (ret)
+		goto err_hclk_enable_failed;
+
+	ret = clk_prepare_enable(ctrl->rclk);
+	if (ret)
+		goto err_rclk_enable_failed;
+
+	ctrl->tx.base = dmam_alloc_coherent(&pdev->dev,
+					   (ctrl->tx.sl_sz * ctrl->tx.n),
+					   &ctrl->tx.phy, GFP_KERNEL);
+	if (!ctrl->tx.base) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ctrl->rx.base = dmam_alloc_coherent(&pdev->dev,
+					   (ctrl->rx.sl_sz * ctrl->rx.n),
+					   &ctrl->rx.phy, GFP_KERNEL);
+	if (!ctrl->rx.base) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	/* Register with framework before enabling frame, clock */
+	ret = slim_register_controller(&ctrl->ctrl);
+	if (ret) {
+		dev_err(ctrl->dev, "error adding controller\n");
+		goto err;
+	}
+
+	ver = readl_relaxed(ctrl->base);
+	/* Version info in 16 MSbits */
+	ver >>= 16;
+	/* Component register initialization */
+	writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver));
+	writel((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
+				ctrl->base + CFG_PORT(COMP_TRUST_CFG, ver));
+
+	writel((MGR_INT_TX_NACKED_2 |
+			MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
+			MGR_INT_TX_MSG_SENT), ctrl->base + MGR_INT_EN);
+	writel(1, ctrl->base + MGR_CFG);
+	/* Framer register initialization */
+	writel((1 << INTR_WAKE) | (0xA << REF_CLK_GEAR) |
+		(0xA << CLK_GEAR) | (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
+		ctrl->base + FRM_CFG);
+	writel(MGR_CFG_ENABLE, ctrl->base + MGR_CFG);
+	writel(1, ctrl->base + INTF_CFG);
+	writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver));
+
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, QCOM_SLIM_AUTOSUSPEND);
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_mark_last_busy(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	dev_dbg(ctrl->dev, "QCOM SB controller is up:ver:0x%x!\n", ver);
+	return 0;
+
+err:
+	clk_disable_unprepare(ctrl->rclk);
+err_rclk_enable_failed:
+	clk_disable_unprepare(ctrl->hclk);
+err_hclk_enable_failed:
+err_request_irq_failed:
+	destroy_workqueue(ctrl->rxwq);
+	return ret;
+}
+
+static int qcom_slim_remove(struct platform_device *pdev)
+{
+	struct qcom_slim_ctrl *ctrl = platform_get_drvdata(pdev);
+
+	pm_runtime_disable(&pdev->dev);
+	slim_unregister_controller(&ctrl->ctrl);
+	destroy_workqueue(ctrl->rxwq);
+	return 0;
+}
+
+/*
+ * If PM_RUNTIME is not defined, these 2 functions become helper
+ * functions to be called from system suspend/resume.
+ */
+#ifdef CONFIG_PM
+static int qcom_slim_runtime_suspend(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct qcom_slim_ctrl *ctrl = platform_get_drvdata(pdev);
+	int ret;
+
+	dev_dbg(device, "pm_runtime: suspending...\n");
+	ret = slim_ctrl_clk_pause(&ctrl->ctrl, false, SLIM_CLK_UNSPECIFIED);
+	if (ret) {
+		dev_err(device, "clk pause not entered:%d", ret);
+	} else {
+		disable_irq(ctrl->irq);
+		clk_disable_unprepare(ctrl->hclk);
+		clk_disable_unprepare(ctrl->rclk);
+	}
+	return ret;
+}
+
+static int qcom_slim_runtime_resume(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct qcom_slim_ctrl *ctrl = platform_get_drvdata(pdev);
+	int ret = 0;
+
+	dev_dbg(device, "pm_runtime: resuming...\n");
+	ret = slim_ctrl_clk_pause(&ctrl->ctrl, true, 0);
+	if (ret)
+		dev_err(device, "clk pause not exited:%d", ret);
+	return ret;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int qcom_slim_suspend(struct device *dev)
+{
+	int ret = 0;
+
+	if (!pm_runtime_enabled(dev) ||
+		(!pm_runtime_suspended(dev))) {
+		dev_dbg(dev, "system suspend");
+		ret = qcom_slim_runtime_suspend(dev);
+	}
+
+	return ret;
+}
+
+static int qcom_slim_resume(struct device *dev)
+{
+	if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
+		int ret;
+
+		dev_dbg(dev, "system resume");
+		ret = qcom_slim_runtime_resume(dev);
+		if (!ret) {
+			pm_runtime_mark_last_busy(dev);
+			pm_request_autosuspend(dev);
+		}
+		return ret;
+
+	}
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops qcom_slim_dev_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(qcom_slim_suspend, qcom_slim_resume)
+	SET_RUNTIME_PM_OPS(
+			   qcom_slim_runtime_suspend,
+			   qcom_slim_runtime_resume,
+			   NULL
+	)
+};
+
+static const struct of_device_id qcom_slim_dt_match[] = {
+	{ .compatible = "qcom,slim", },
+	{ .compatible = "qcom,apq8064-slim", },
+	{}
+};
+
+static struct platform_driver qcom_slim_driver = {
+	.probe = qcom_slim_probe,
+	.remove = qcom_slim_remove,
+	.driver	= {
+		.name = "qcom_slim_ctrl",
+		.of_match_table = qcom_slim_dt_match,
+		.pm = &qcom_slim_dev_pm_ops,
+	},
+};
+module_platform_driver(qcom_slim_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm SLIMbus Controller");
diff --git a/drivers/slimbus/sched.c b/drivers/slimbus/sched.c
new file mode 100644
index 0000000..af84997d
--- /dev/null
+++ b/drivers/slimbus/sched.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ */
+
+#include <linux/errno.h>
+#include "slimbus.h"
+
+/**
+ * slim_ctrl_clk_pause() - Called by slimbus controller to enter/exit
+ *			   'clock pause'
+ * @ctrl: controller requesting bus to be paused or woken up
+ * @wakeup: Wakeup this controller from clock pause.
+ * @restart: Restart time value per spec used for clock pause. This value
+ *	isn't used when controller is to be woken up.
+ *
+ * Slimbus specification needs this sequence to turn-off clocks for the bus.
+ * The sequence involves sending 3 broadcast messages (reconfiguration
+ * sequence) to inform all devices on the bus.
+ * To exit clock-pause, controller typically wakes up active framer device.
+ * This API executes clock pause reconfiguration sequence if wakeup is false.
+ * If wakeup is true, controller's wakeup is called.
+ * For entering clock-pause, -EBUSY is returned if a message txn in pending.
+ */
+int slim_ctrl_clk_pause(struct slim_controller *ctrl, bool wakeup, u8 restart)
+{
+	int i, ret = 0;
+	unsigned long flags;
+	struct slim_sched *sched = &ctrl->sched;
+	struct slim_val_inf msg = {0, 0, NULL, NULL};
+
+	DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_MC_BEGIN_RECONFIGURATION,
+				3, SLIM_LA_MANAGER, &msg);
+
+	if (wakeup == false && restart > SLIM_CLK_UNSPECIFIED)
+		return -EINVAL;
+
+	mutex_lock(&sched->m_reconf);
+	if (wakeup) {
+		if (sched->clk_state == SLIM_CLK_ACTIVE) {
+			mutex_unlock(&sched->m_reconf);
+			return 0;
+		}
+
+		/*
+		 * Fine-tune calculation based on clock gear,
+		 * message-bandwidth after bandwidth management
+		 */
+		ret = wait_for_completion_timeout(&sched->pause_comp,
+				msecs_to_jiffies(100));
+		if (!ret) {
+			mutex_unlock(&sched->m_reconf);
+			pr_err("Previous clock pause did not finish");
+			return -ETIMEDOUT;
+		}
+		ret = 0;
+
+		/*
+		 * Slimbus framework will call controller wakeup
+		 * Controller should make sure that it sets active framer
+		 * out of clock pause
+		 */
+		if (sched->clk_state == SLIM_CLK_PAUSED && ctrl->wakeup)
+			ret = ctrl->wakeup(ctrl);
+		if (!ret)
+			sched->clk_state = SLIM_CLK_ACTIVE;
+		mutex_unlock(&sched->m_reconf);
+
+		return ret;
+	}
+
+	/* already paused */
+	if (ctrl->sched.clk_state == SLIM_CLK_PAUSED) {
+		mutex_unlock(&sched->m_reconf);
+		return 0;
+	}
+
+	spin_lock_irqsave(&ctrl->txn_lock, flags);
+	for (i = 0; i < SLIM_MAX_TIDS; i++) {
+		/* Pending response for a message */
+		if (idr_find(&ctrl->tid_idr, i)) {
+			spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+			mutex_unlock(&sched->m_reconf);
+			return -EBUSY;
+		}
+	}
+	spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+
+	sched->clk_state = SLIM_CLK_ENTERING_PAUSE;
+
+	/* clock pause sequence */
+	ret = slim_do_transfer(ctrl, &txn);
+	if (ret)
+		goto clk_pause_ret;
+
+	txn.mc = SLIM_MSG_MC_NEXT_PAUSE_CLOCK;
+	txn.rl = 4;
+	msg.num_bytes = 1;
+	msg.wbuf = &restart;
+	ret = slim_do_transfer(ctrl, &txn);
+	if (ret)
+		goto clk_pause_ret;
+
+	txn.mc = SLIM_MSG_MC_RECONFIGURE_NOW;
+	txn.rl = 3;
+	msg.num_bytes = 1;
+	msg.wbuf = NULL;
+	ret = slim_do_transfer(ctrl, &txn);
+
+clk_pause_ret:
+	if (ret) {
+		sched->clk_state = SLIM_CLK_ACTIVE;
+	} else {
+		sched->clk_state = SLIM_CLK_PAUSED;
+		complete(&sched->pause_comp);
+	}
+	mutex_unlock(&sched->m_reconf);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_ctrl_clk_pause);
diff --git a/drivers/slimbus/slimbus.h b/drivers/slimbus/slimbus.h
new file mode 100644
index 0000000..79f8e05
--- /dev/null
+++ b/drivers/slimbus/slimbus.h
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ */
+
+#ifndef _DRIVERS_SLIMBUS_H
+#define _DRIVERS_SLIMBUS_H
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/slimbus.h>
+
+/* Standard values per SLIMbus spec needed by controllers and devices */
+#define SLIM_CL_PER_SUPERFRAME		6144
+#define SLIM_CL_PER_SUPERFRAME_DIV8	(SLIM_CL_PER_SUPERFRAME >> 3)
+
+/* SLIMbus message types. Related to interpretation of message code. */
+#define SLIM_MSG_MT_CORE			0x0
+
+/*
+ * SLIM Broadcast header format
+ * BYTE 0: MT[7:5] RL[4:0]
+ * BYTE 1: RSVD[7] MC[6:0]
+ * BYTE 2: RSVD[7:6] DT[5:4] PI[3:0]
+ */
+#define SLIM_MSG_MT_MASK	GENMASK(2, 0)
+#define SLIM_MSG_MT_SHIFT	5
+#define SLIM_MSG_RL_MASK	GENMASK(4, 0)
+#define SLIM_MSG_RL_SHIFT	0
+#define SLIM_MSG_MC_MASK	GENMASK(6, 0)
+#define SLIM_MSG_MC_SHIFT	0
+#define SLIM_MSG_DT_MASK	GENMASK(1, 0)
+#define SLIM_MSG_DT_SHIFT	4
+
+#define SLIM_HEADER_GET_MT(b)	((b >> SLIM_MSG_MT_SHIFT) & SLIM_MSG_MT_MASK)
+#define SLIM_HEADER_GET_RL(b)	((b >> SLIM_MSG_RL_SHIFT) & SLIM_MSG_RL_MASK)
+#define SLIM_HEADER_GET_MC(b)	((b >> SLIM_MSG_MC_SHIFT) & SLIM_MSG_MC_MASK)
+#define SLIM_HEADER_GET_DT(b)	((b >> SLIM_MSG_DT_SHIFT) & SLIM_MSG_DT_MASK)
+
+/* Device management messages used by this framework */
+#define SLIM_MSG_MC_REPORT_PRESENT               0x1
+#define SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS       0x2
+#define SLIM_MSG_MC_REPORT_ABSENT                0xF
+
+/* Clock pause Reconfiguration messages */
+#define SLIM_MSG_MC_BEGIN_RECONFIGURATION        0x40
+#define SLIM_MSG_MC_NEXT_PAUSE_CLOCK             0x4A
+#define SLIM_MSG_MC_RECONFIGURE_NOW              0x5F
+
+/* Clock pause values per SLIMbus spec */
+#define SLIM_CLK_FAST				0
+#define SLIM_CLK_CONST_PHASE			1
+#define SLIM_CLK_UNSPECIFIED			2
+
+/* Destination type Values */
+#define SLIM_MSG_DEST_LOGICALADDR	0
+#define SLIM_MSG_DEST_ENUMADDR		1
+#define	SLIM_MSG_DEST_BROADCAST		3
+
+/* Standard values per SLIMbus spec needed by controllers and devices */
+#define SLIM_MAX_CLK_GEAR		10
+#define SLIM_MIN_CLK_GEAR		1
+
+/* Manager's logical address is set to 0xFF per spec */
+#define SLIM_LA_MANAGER 0xFF
+
+#define SLIM_MAX_TIDS			256
+/**
+ * struct slim_framer - Represents SLIMbus framer.
+ * Every controller may have multiple framers. There is 1 active framer device
+ * responsible for clocking the bus.
+ * Manager is responsible for framer hand-over.
+ * @dev: Driver model representation of the device.
+ * @e_addr: Enumeration address of the framer.
+ * @rootfreq: Root Frequency at which the framer can run. This is maximum
+ *	frequency ('clock gear 10') at which the bus can operate.
+ * @superfreq: Superframes per root frequency. Every frame is 6144 bits.
+ */
+struct slim_framer {
+	struct device		dev;
+	struct slim_eaddr	e_addr;
+	int			rootfreq;
+	int			superfreq;
+};
+
+#define to_slim_framer(d) container_of(d, struct slim_framer, dev)
+
+/**
+ * struct slim_msg_txn - Message to be sent by the controller.
+ *			This structure has packet header,
+ *			payload and buffer to be filled (if any)
+ * @rl: Header field. remaining length.
+ * @mt: Header field. Message type.
+ * @mc: Header field. LSB is message code for type mt.
+ * @dt: Header field. Destination type.
+ * @ec: Element code. Used for elemental access APIs.
+ * @tid: Transaction ID. Used for messages expecting response.
+ *	(relevant for message-codes involving read operation)
+ * @la: Logical address of the device this message is going to.
+ *	(Not used when destination type is broadcast.)
+ * @msg: Elemental access message to be read/written
+ * @comp: completion if read/write is synchronous, used internally
+ *	for tid based transactions.
+ */
+struct slim_msg_txn {
+	u8			rl;
+	u8			mt;
+	u8			mc;
+	u8			dt;
+	u16			ec;
+	u8			tid;
+	u8			la;
+	struct slim_val_inf	*msg;
+	struct	completion	*comp;
+};
+
+/* Frequently used message transaction structures */
+#define DEFINE_SLIM_LDEST_TXN(name, mc, rl, la, msg) \
+	struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_LOGICALADDR, 0,\
+					0, la, msg, }
+
+#define DEFINE_SLIM_BCAST_TXN(name, mc, rl, la, msg) \
+	struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_BROADCAST, 0,\
+					0, la, msg, }
+
+#define DEFINE_SLIM_EDEST_TXN(name, mc, rl, la, msg) \
+	struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_ENUMADDR, 0,\
+					0, la, msg, }
+/**
+ * enum slim_clk_state: SLIMbus controller's clock state used internally for
+ *	maintaining current clock state.
+ * @SLIM_CLK_ACTIVE: SLIMbus clock is active
+ * @SLIM_CLK_ENTERING_PAUSE: SLIMbus clock pause sequence is being sent on the
+ *	bus. If this succeeds, state changes to SLIM_CLK_PAUSED. If the
+ *	transition fails, state changes back to SLIM_CLK_ACTIVE
+ * @SLIM_CLK_PAUSED: SLIMbus controller clock has paused.
+ */
+enum slim_clk_state {
+	SLIM_CLK_ACTIVE,
+	SLIM_CLK_ENTERING_PAUSE,
+	SLIM_CLK_PAUSED,
+};
+
+/**
+ * struct slim_sched: Framework uses this structure internally for scheduling.
+ * @clk_state: Controller's clock state from enum slim_clk_state
+ * @pause_comp: Signals completion of clock pause sequence. This is useful when
+ *	client tries to call SLIMbus transaction when controller is entering
+ *	clock pause.
+ * @m_reconf: This mutex is held until current reconfiguration (data channel
+ *	scheduling, message bandwidth reservation) is done. Message APIs can
+ *	use the bus concurrently when this mutex is held since elemental access
+ *	messages can be sent on the bus when reconfiguration is in progress.
+ */
+struct slim_sched {
+	enum slim_clk_state	clk_state;
+	struct completion	pause_comp;
+	struct mutex		m_reconf;
+};
+
+/**
+ * struct slim_controller  - Controls every instance of SLIMbus
+ *				(similar to 'master' on SPI)
+ * @dev: Device interface to this driver
+ * @id: Board-specific number identifier for this controller/bus
+ * @name: Name for this controller
+ * @min_cg: Minimum clock gear supported by this controller (default value: 1)
+ * @max_cg: Maximum clock gear supported by this controller (default value: 10)
+ * @clkgear: Current clock gear in which this bus is running
+ * @laddr_ida: logical address id allocator
+ * @a_framer: Active framer which is clocking the bus managed by this controller
+ * @lock: Mutex protecting controller data structures
+ * @devices: Slim device list
+ * @tid_idr: tid id allocator
+ * @txn_lock: Lock to protect table of transactions
+ * @sched: scheduler structure used by the controller
+ * @xfer_msg: Transfer a message on this controller (this can be a broadcast
+ *	control/status message like data channel setup, or a unicast message
+ *	like value element read/write.
+ * @set_laddr: Setup logical address at laddr for the slave with elemental
+ *	address e_addr. Drivers implementing controller will be expected to
+ *	send unicast message to this device with its logical address.
+ * @get_laddr: It is possible that controller needs to set fixed logical
+ *	address table and get_laddr can be used in that case so that controller
+ *	can do this assignment. Use case is when the master is on the remote
+ *	processor side, who is resposible for allocating laddr.
+ * @wakeup: This function pointer implements controller-specific procedure
+ *	to wake it up from clock-pause. Framework will call this to bring
+ *	the controller out of clock pause.
+ *
+ *	'Manager device' is responsible for  device management, bandwidth
+ *	allocation, channel setup, and port associations per channel.
+ *	Device management means Logical address assignment/removal based on
+ *	enumeration (report-present, report-absent) of a device.
+ *	Bandwidth allocation is done dynamically by the manager based on active
+ *	channels on the bus, message-bandwidth requests made by SLIMbus devices.
+ *	Based on current bandwidth usage, manager chooses a frequency to run
+ *	the bus at (in steps of 'clock-gear', 1 through 10, each clock gear
+ *	representing twice the frequency than the previous gear).
+ *	Manager is also responsible for entering (and exiting) low-power-mode
+ *	(known as 'clock pause').
+ *	Manager can do handover of framer if there are multiple framers on the
+ *	bus and a certain usecase warrants using certain framer to avoid keeping
+ *	previous framer being powered-on.
+ *
+ *	Controller here performs duties of the manager device, and 'interface
+ *	device'. Interface device is responsible for monitoring the bus and
+ *	reporting information such as loss-of-synchronization, data
+ *	slot-collision.
+ */
+struct slim_controller {
+	struct device		*dev;
+	unsigned int		id;
+	char			name[SLIMBUS_NAME_SIZE];
+	int			min_cg;
+	int			max_cg;
+	int			clkgear;
+	struct ida		laddr_ida;
+	struct slim_framer	*a_framer;
+	struct mutex		lock;
+	struct list_head	devices;
+	struct idr		tid_idr;
+	spinlock_t		txn_lock;
+	struct slim_sched	sched;
+	int			(*xfer_msg)(struct slim_controller *ctrl,
+					    struct slim_msg_txn *tx);
+	int			(*set_laddr)(struct slim_controller *ctrl,
+					     struct slim_eaddr *ea, u8 laddr);
+	int			(*get_laddr)(struct slim_controller *ctrl,
+					     struct slim_eaddr *ea, u8 *laddr);
+	int			(*wakeup)(struct slim_controller *ctrl);
+};
+
+int slim_device_report_present(struct slim_controller *ctrl,
+			       struct slim_eaddr *e_addr, u8 *laddr);
+void slim_report_absent(struct slim_device *sbdev);
+int slim_register_controller(struct slim_controller *ctrl);
+int slim_unregister_controller(struct slim_controller *ctrl);
+void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 l);
+int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn);
+int slim_ctrl_clk_pause(struct slim_controller *ctrl, bool wakeup, u8 restart);
+
+static inline bool slim_tid_txn(u8 mt, u8 mc)
+{
+	return (mt == SLIM_MSG_MT_CORE &&
+		(mc == SLIM_MSG_MC_REQUEST_INFORMATION ||
+		 mc == SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION ||
+		 mc == SLIM_MSG_MC_REQUEST_VALUE ||
+		 mc == SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION));
+}
+
+static inline bool slim_ec_txn(u8 mt, u8 mc)
+{
+	return (mt == SLIM_MSG_MT_CORE &&
+		((mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
+		  mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
+		 (mc >= SLIM_MSG_MC_REQUEST_VALUE &&
+		  mc <= SLIM_MSG_MC_CHANGE_VALUE)));
+}
+#endif /* _LINUX_SLIMBUS_H */
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
new file mode 100644
index 0000000..08aad9f
--- /dev/null
+++ b/drivers/soundwire/Kconfig
@@ -0,0 +1,36 @@
+#
+# SoundWire subsystem configuration
+#
+
+menuconfig SOUNDWIRE
+	bool "SoundWire support"
+	---help---
+	  SoundWire is a 2-Pin interface with data and clock line ratified
+	  by the MIPI Alliance. SoundWire is used for transporting data
+	  typically related to audio functions. SoundWire interface is
+	  optimized to integrate audio devices in mobile or mobile inspired
+	  systems. Say Y to enable this subsystem, N if you do not have such
+	  a device
+
+if SOUNDWIRE
+
+comment "SoundWire Devices"
+
+config SOUNDWIRE_BUS
+	tristate
+
+config SOUNDWIRE_CADENCE
+	tristate
+
+config SOUNDWIRE_INTEL
+	tristate "Intel SoundWire Master driver"
+	select SOUNDWIRE_CADENCE
+	select SOUNDWIRE_BUS
+	depends on X86 && ACPI
+	---help---
+	  SoundWire Intel Master driver.
+	  If you have an Intel platform which has a SoundWire Master then
+	  enable this config option to get the SoundWire support for that
+	  device.
+
+endif
diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
new file mode 100644
index 0000000..e1a74c5
--- /dev/null
+++ b/drivers/soundwire/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for soundwire core
+#
+
+#Bus Objs
+soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o
+obj-$(CONFIG_SOUNDWIRE_BUS) += soundwire-bus.o
+
+#Cadence Objs
+soundwire-cadence-objs := cadence_master.o
+obj-$(CONFIG_SOUNDWIRE_CADENCE) += soundwire-cadence.o
+
+#Intel driver
+soundwire-intel-objs :=	intel.o
+obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel.o
+
+soundwire-intel-init-objs := intel_init.o
+obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel-init.o
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
new file mode 100644
index 0000000..4c34519
--- /dev/null
+++ b/drivers/soundwire/bus.c
@@ -0,0 +1,997 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pm_runtime.h>
+#include <linux/soundwire/sdw_registers.h>
+#include <linux/soundwire/sdw.h>
+#include "bus.h"
+
+/**
+ * sdw_add_bus_master() - add a bus Master instance
+ * @bus: bus instance
+ *
+ * Initializes the bus instance, read properties and create child
+ * devices.
+ */
+int sdw_add_bus_master(struct sdw_bus *bus)
+{
+	int ret;
+
+	if (!bus->dev) {
+		pr_err("SoundWire bus has no device");
+		return -ENODEV;
+	}
+
+	if (!bus->ops) {
+		dev_err(bus->dev, "SoundWire Bus ops are not set");
+		return -EINVAL;
+	}
+
+	mutex_init(&bus->msg_lock);
+	mutex_init(&bus->bus_lock);
+	INIT_LIST_HEAD(&bus->slaves);
+
+	if (bus->ops->read_prop) {
+		ret = bus->ops->read_prop(bus);
+		if (ret < 0) {
+			dev_err(bus->dev, "Bus read properties failed:%d", ret);
+			return ret;
+		}
+	}
+
+	/*
+	 * Device numbers in SoundWire are 0 thru 15. Enumeration device
+	 * number (0), Broadcast device number (15), Group numbers (12 and
+	 * 13) and Master device number (14) are not used for assignment so
+	 * mask these and other higher bits.
+	 */
+
+	/* Set higher order bits */
+	*bus->assigned = ~GENMASK(SDW_BROADCAST_DEV_NUM, SDW_ENUM_DEV_NUM);
+
+	/* Set enumuration device number and broadcast device number */
+	set_bit(SDW_ENUM_DEV_NUM, bus->assigned);
+	set_bit(SDW_BROADCAST_DEV_NUM, bus->assigned);
+
+	/* Set group device numbers and master device number */
+	set_bit(SDW_GROUP12_DEV_NUM, bus->assigned);
+	set_bit(SDW_GROUP13_DEV_NUM, bus->assigned);
+	set_bit(SDW_MASTER_DEV_NUM, bus->assigned);
+
+	/*
+	 * SDW is an enumerable bus, but devices can be powered off. So,
+	 * they won't be able to report as present.
+	 *
+	 * Create Slave devices based on Slaves described in
+	 * the respective firmware (ACPI/DT)
+	 */
+	if (IS_ENABLED(CONFIG_ACPI) && ACPI_HANDLE(bus->dev))
+		ret = sdw_acpi_find_slaves(bus);
+	else
+		ret = -ENOTSUPP; /* No ACPI/DT so error out */
+
+	if (ret) {
+		dev_err(bus->dev, "Finding slaves failed:%d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(sdw_add_bus_master);
+
+static int sdw_delete_slave(struct device *dev, void *data)
+{
+	struct sdw_slave *slave = dev_to_sdw_dev(dev);
+	struct sdw_bus *bus = slave->bus;
+
+	mutex_lock(&bus->bus_lock);
+
+	if (slave->dev_num) /* clear dev_num if assigned */
+		clear_bit(slave->dev_num, bus->assigned);
+
+	list_del_init(&slave->node);
+	mutex_unlock(&bus->bus_lock);
+
+	device_unregister(dev);
+	return 0;
+}
+
+/**
+ * sdw_delete_bus_master() - delete the bus master instance
+ * @bus: bus to be deleted
+ *
+ * Remove the instance, delete the child devices.
+ */
+void sdw_delete_bus_master(struct sdw_bus *bus)
+{
+	device_for_each_child(bus->dev, NULL, sdw_delete_slave);
+}
+EXPORT_SYMBOL(sdw_delete_bus_master);
+
+/*
+ * SDW IO Calls
+ */
+
+static inline int find_response_code(enum sdw_command_response resp)
+{
+	switch (resp) {
+	case SDW_CMD_OK:
+		return 0;
+
+	case SDW_CMD_IGNORED:
+		return -ENODATA;
+
+	case SDW_CMD_TIMEOUT:
+		return -ETIMEDOUT;
+
+	default:
+		return -EIO;
+	}
+}
+
+static inline int do_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
+{
+	int retry = bus->prop.err_threshold;
+	enum sdw_command_response resp;
+	int ret = 0, i;
+
+	for (i = 0; i <= retry; i++) {
+		resp = bus->ops->xfer_msg(bus, msg);
+		ret = find_response_code(resp);
+
+		/* if cmd is ok or ignored return */
+		if (ret == 0 || ret == -ENODATA)
+			return ret;
+	}
+
+	return ret;
+}
+
+static inline int do_transfer_defer(struct sdw_bus *bus,
+			struct sdw_msg *msg, struct sdw_defer *defer)
+{
+	int retry = bus->prop.err_threshold;
+	enum sdw_command_response resp;
+	int ret = 0, i;
+
+	defer->msg = msg;
+	defer->length = msg->len;
+
+	for (i = 0; i <= retry; i++) {
+		resp = bus->ops->xfer_msg_defer(bus, msg, defer);
+		ret = find_response_code(resp);
+		/* if cmd is ok or ignored return */
+		if (ret == 0 || ret == -ENODATA)
+			return ret;
+	}
+
+	return ret;
+}
+
+static int sdw_reset_page(struct sdw_bus *bus, u16 dev_num)
+{
+	int retry = bus->prop.err_threshold;
+	enum sdw_command_response resp;
+	int ret = 0, i;
+
+	for (i = 0; i <= retry; i++) {
+		resp = bus->ops->reset_page_addr(bus, dev_num);
+		ret = find_response_code(resp);
+		/* if cmd is ok or ignored return */
+		if (ret == 0 || ret == -ENODATA)
+			return ret;
+	}
+
+	return ret;
+}
+
+/**
+ * sdw_transfer() - Synchronous transfer message to a SDW Slave device
+ * @bus: SDW bus
+ * @msg: SDW message to be xfered
+ */
+int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
+{
+	int ret;
+
+	mutex_lock(&bus->msg_lock);
+
+	ret = do_transfer(bus, msg);
+	if (ret != 0 && ret != -ENODATA)
+		dev_err(bus->dev, "trf on Slave %d failed:%d\n",
+				msg->dev_num, ret);
+
+	if (msg->page)
+		sdw_reset_page(bus, msg->dev_num);
+
+	mutex_unlock(&bus->msg_lock);
+
+	return ret;
+}
+
+/**
+ * sdw_transfer_defer() - Asynchronously transfer message to a SDW Slave device
+ * @bus: SDW bus
+ * @msg: SDW message to be xfered
+ * @defer: Defer block for signal completion
+ *
+ * Caller needs to hold the msg_lock lock while calling this
+ */
+int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg,
+				struct sdw_defer *defer)
+{
+	int ret;
+
+	if (!bus->ops->xfer_msg_defer)
+		return -ENOTSUPP;
+
+	ret = do_transfer_defer(bus, msg, defer);
+	if (ret != 0 && ret != -ENODATA)
+		dev_err(bus->dev, "Defer trf on Slave %d failed:%d\n",
+				msg->dev_num, ret);
+
+	if (msg->page)
+		sdw_reset_page(bus, msg->dev_num);
+
+	return ret;
+}
+
+
+int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
+		u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf)
+{
+	memset(msg, 0, sizeof(*msg));
+	msg->addr = addr; /* addr is 16 bit and truncated here */
+	msg->len = count;
+	msg->dev_num = dev_num;
+	msg->flags = flags;
+	msg->buf = buf;
+	msg->ssp_sync = false;
+	msg->page = false;
+
+	if (addr < SDW_REG_NO_PAGE) { /* no paging area */
+		return 0;
+	} else if (addr >= SDW_REG_MAX) { /* illegal addr */
+		pr_err("SDW: Invalid address %x passed\n", addr);
+		return -EINVAL;
+	}
+
+	if (addr < SDW_REG_OPTIONAL_PAGE) { /* 32k but no page */
+		if (slave && !slave->prop.paging_support)
+			return 0;
+		/* no need for else as that will fall thru to paging */
+	}
+
+	/* paging mandatory */
+	if (dev_num == SDW_ENUM_DEV_NUM || dev_num == SDW_BROADCAST_DEV_NUM) {
+		pr_err("SDW: Invalid device for paging :%d\n", dev_num);
+		return -EINVAL;
+	}
+
+	if (!slave) {
+		pr_err("SDW: No slave for paging addr\n");
+		return -EINVAL;
+	} else if (!slave->prop.paging_support) {
+		dev_err(&slave->dev,
+			"address %x needs paging but no support", addr);
+		return -EINVAL;
+	}
+
+	msg->addr_page1 = (addr >> SDW_REG_SHIFT(SDW_SCP_ADDRPAGE1_MASK));
+	msg->addr_page2 = (addr >> SDW_REG_SHIFT(SDW_SCP_ADDRPAGE2_MASK));
+	msg->addr |= BIT(15);
+	msg->page = true;
+
+	return 0;
+}
+
+/**
+ * sdw_nread() - Read "n" contiguous SDW Slave registers
+ * @slave: SDW Slave
+ * @addr: Register address
+ * @count: length
+ * @val: Buffer for values to be read
+ */
+int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+{
+	struct sdw_msg msg;
+	int ret;
+
+	ret = sdw_fill_msg(&msg, slave, addr, count,
+			slave->dev_num, SDW_MSG_FLAG_READ, val);
+	if (ret < 0)
+		return ret;
+
+	ret = pm_runtime_get_sync(slave->bus->dev);
+	if (!ret)
+		return ret;
+
+	ret = sdw_transfer(slave->bus, &msg);
+	pm_runtime_put(slave->bus->dev);
+
+	return ret;
+}
+EXPORT_SYMBOL(sdw_nread);
+
+/**
+ * sdw_nwrite() - Write "n" contiguous SDW Slave registers
+ * @slave: SDW Slave
+ * @addr: Register address
+ * @count: length
+ * @val: Buffer for values to be read
+ */
+int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+{
+	struct sdw_msg msg;
+	int ret;
+
+	ret = sdw_fill_msg(&msg, slave, addr, count,
+			slave->dev_num, SDW_MSG_FLAG_WRITE, val);
+	if (ret < 0)
+		return ret;
+
+	ret = pm_runtime_get_sync(slave->bus->dev);
+	if (!ret)
+		return ret;
+
+	ret = sdw_transfer(slave->bus, &msg);
+	pm_runtime_put(slave->bus->dev);
+
+	return ret;
+}
+EXPORT_SYMBOL(sdw_nwrite);
+
+/**
+ * sdw_read() - Read a SDW Slave register
+ * @slave: SDW Slave
+ * @addr: Register address
+ */
+int sdw_read(struct sdw_slave *slave, u32 addr)
+{
+	u8 buf;
+	int ret;
+
+	ret = sdw_nread(slave, addr, 1, &buf);
+	if (ret < 0)
+		return ret;
+	else
+		return buf;
+}
+EXPORT_SYMBOL(sdw_read);
+
+/**
+ * sdw_write() - Write a SDW Slave register
+ * @slave: SDW Slave
+ * @addr: Register address
+ * @value: Register value
+ */
+int sdw_write(struct sdw_slave *slave, u32 addr, u8 value)
+{
+	return sdw_nwrite(slave, addr, 1, &value);
+
+}
+EXPORT_SYMBOL(sdw_write);
+
+/*
+ * SDW alert handling
+ */
+
+/* called with bus_lock held */
+static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i)
+{
+	struct sdw_slave *slave = NULL;
+
+	list_for_each_entry(slave, &bus->slaves, node) {
+		if (slave->dev_num == i)
+			return slave;
+	}
+
+	return NULL;
+}
+
+static int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id)
+{
+
+	if ((slave->id.unique_id != id.unique_id) ||
+	    (slave->id.mfg_id != id.mfg_id) ||
+	    (slave->id.part_id != id.part_id) ||
+	    (slave->id.class_id != id.class_id))
+		return -ENODEV;
+
+	return 0;
+}
+
+/* called with bus_lock held */
+static int sdw_get_device_num(struct sdw_slave *slave)
+{
+	int bit;
+
+	bit = find_first_zero_bit(slave->bus->assigned, SDW_MAX_DEVICES);
+	if (bit == SDW_MAX_DEVICES) {
+		bit = -ENODEV;
+		goto err;
+	}
+
+	/*
+	 * Do not update dev_num in Slave data structure here,
+	 * Update once program dev_num is successful
+	 */
+	set_bit(bit, slave->bus->assigned);
+
+err:
+	return bit;
+}
+
+static int sdw_assign_device_num(struct sdw_slave *slave)
+{
+	int ret, dev_num;
+
+	/* check first if device number is assigned, if so reuse that */
+	if (!slave->dev_num) {
+		mutex_lock(&slave->bus->bus_lock);
+		dev_num = sdw_get_device_num(slave);
+		mutex_unlock(&slave->bus->bus_lock);
+		if (dev_num < 0) {
+			dev_err(slave->bus->dev, "Get dev_num failed: %d",
+								dev_num);
+			return dev_num;
+		}
+	} else {
+		dev_info(slave->bus->dev,
+				"Slave already registered dev_num:%d",
+				slave->dev_num);
+
+		/* Clear the slave->dev_num to transfer message on device 0 */
+		dev_num = slave->dev_num;
+		slave->dev_num = 0;
+
+	}
+
+	ret = sdw_write(slave, SDW_SCP_DEVNUMBER, dev_num);
+	if (ret < 0) {
+		dev_err(&slave->dev, "Program device_num failed: %d", ret);
+		return ret;
+	}
+
+	/* After xfer of msg, restore dev_num */
+	slave->dev_num = dev_num;
+
+	return 0;
+}
+
+void sdw_extract_slave_id(struct sdw_bus *bus,
+			u64 addr, struct sdw_slave_id *id)
+{
+	dev_dbg(bus->dev, "SDW Slave Addr: %llx", addr);
+
+	/*
+	 * Spec definition
+	 *   Register		Bit	Contents
+	 *   DevId_0 [7:4]	47:44	sdw_version
+	 *   DevId_0 [3:0]	43:40	unique_id
+	 *   DevId_1		39:32	mfg_id [15:8]
+	 *   DevId_2		31:24	mfg_id [7:0]
+	 *   DevId_3		23:16	part_id [15:8]
+	 *   DevId_4		15:08	part_id [7:0]
+	 *   DevId_5		07:00	class_id
+	 */
+	id->sdw_version = (addr >> 44) & GENMASK(3, 0);
+	id->unique_id = (addr >> 40) & GENMASK(3, 0);
+	id->mfg_id = (addr >> 24) & GENMASK(15, 0);
+	id->part_id = (addr >> 8) & GENMASK(15, 0);
+	id->class_id = addr & GENMASK(7, 0);
+
+	dev_dbg(bus->dev,
+		"SDW Slave class_id %x, part_id %x, mfg_id %x, unique_id %x, version %x",
+				id->class_id, id->part_id, id->mfg_id,
+				id->unique_id, id->sdw_version);
+
+}
+
+static int sdw_program_device_num(struct sdw_bus *bus)
+{
+	u8 buf[SDW_NUM_DEV_ID_REGISTERS] = {0};
+	struct sdw_slave *slave, *_s;
+	struct sdw_slave_id id;
+	struct sdw_msg msg;
+	bool found = false;
+	int count = 0, ret;
+	u64 addr;
+
+	/* No Slave, so use raw xfer api */
+	ret = sdw_fill_msg(&msg, NULL, SDW_SCP_DEVID_0,
+			SDW_NUM_DEV_ID_REGISTERS, 0, SDW_MSG_FLAG_READ, buf);
+	if (ret < 0)
+		return ret;
+
+	do {
+		ret = sdw_transfer(bus, &msg);
+		if (ret == -ENODATA) { /* end of device id reads */
+			ret = 0;
+			break;
+		}
+		if (ret < 0) {
+			dev_err(bus->dev, "DEVID read fail:%d\n", ret);
+			break;
+		}
+
+		/*
+		 * Construct the addr and extract. Cast the higher shift
+		 * bits to avoid truncation due to size limit.
+		 */
+		addr = buf[5] | (buf[4] << 8) | (buf[3] << 16) |
+			(buf[2] << 24) | ((unsigned long long)buf[1] << 32) |
+			((unsigned long long)buf[0] << 40);
+
+		sdw_extract_slave_id(bus, addr, &id);
+
+		/* Now compare with entries */
+		list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
+			if (sdw_compare_devid(slave, id) == 0) {
+				found = true;
+
+				/*
+				 * Assign a new dev_num to this Slave and
+				 * not mark it present. It will be marked
+				 * present after it reports ATTACHED on new
+				 * dev_num
+				 */
+				ret = sdw_assign_device_num(slave);
+				if (ret) {
+					dev_err(slave->bus->dev,
+						"Assign dev_num failed:%d",
+						ret);
+					return ret;
+				}
+
+				break;
+			}
+		}
+
+		if (found == false) {
+			/* TODO: Park this device in Group 13 */
+			dev_err(bus->dev, "Slave Entry not found");
+		}
+
+		count++;
+
+		/*
+		 * Check till error out or retry (count) exhausts.
+		 * Device can drop off and rejoin during enumeration
+		 * so count till twice the bound.
+		 */
+
+	} while (ret == 0 && count < (SDW_MAX_DEVICES * 2));
+
+	return ret;
+}
+
+static void sdw_modify_slave_status(struct sdw_slave *slave,
+				enum sdw_slave_status status)
+{
+	mutex_lock(&slave->bus->bus_lock);
+	slave->status = status;
+	mutex_unlock(&slave->bus->bus_lock);
+}
+
+static int sdw_initialize_slave(struct sdw_slave *slave)
+{
+	struct sdw_slave_prop *prop = &slave->prop;
+	int ret;
+	u8 val;
+
+	/*
+	 * Set bus clash, parity and SCP implementation
+	 * defined interrupt mask
+	 * TODO: Read implementation defined interrupt mask
+	 * from Slave property
+	 */
+	val = SDW_SCP_INT1_IMPL_DEF | SDW_SCP_INT1_BUS_CLASH |
+					SDW_SCP_INT1_PARITY;
+
+	/* Enable SCP interrupts */
+	ret = sdw_update(slave, SDW_SCP_INTMASK1, val, val);
+	if (ret < 0) {
+		dev_err(slave->bus->dev,
+				"SDW_SCP_INTMASK1 write failed:%d", ret);
+		return ret;
+	}
+
+	/* No need to continue if DP0 is not present */
+	if (!slave->prop.dp0_prop)
+		return 0;
+
+	/* Enable DP0 interrupts */
+	val = prop->dp0_prop->device_interrupts;
+	val |= SDW_DP0_INT_PORT_READY | SDW_DP0_INT_BRA_FAILURE;
+
+	ret = sdw_update(slave, SDW_DP0_INTMASK, val, val);
+	if (ret < 0) {
+		dev_err(slave->bus->dev,
+				"SDW_DP0_INTMASK read failed:%d", ret);
+		return val;
+	}
+
+	return 0;
+}
+
+static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
+{
+	u8 clear = 0, impl_int_mask;
+	int status, status2, ret, count = 0;
+
+	status = sdw_read(slave, SDW_DP0_INT);
+	if (status < 0) {
+		dev_err(slave->bus->dev,
+				"SDW_DP0_INT read failed:%d", status);
+		return status;
+	}
+
+	do {
+
+		if (status & SDW_DP0_INT_TEST_FAIL) {
+			dev_err(&slave->dev, "Test fail for port 0");
+			clear |= SDW_DP0_INT_TEST_FAIL;
+		}
+
+		/*
+		 * Assumption: PORT_READY interrupt will be received only for
+		 * ports implementing Channel Prepare state machine (CP_SM)
+		 */
+
+		if (status & SDW_DP0_INT_PORT_READY) {
+			complete(&slave->port_ready[0]);
+			clear |= SDW_DP0_INT_PORT_READY;
+		}
+
+		if (status & SDW_DP0_INT_BRA_FAILURE) {
+			dev_err(&slave->dev, "BRA failed");
+			clear |= SDW_DP0_INT_BRA_FAILURE;
+		}
+
+		impl_int_mask = SDW_DP0_INT_IMPDEF1 |
+			SDW_DP0_INT_IMPDEF2 | SDW_DP0_INT_IMPDEF3;
+
+		if (status & impl_int_mask) {
+			clear |= impl_int_mask;
+			*slave_status = clear;
+		}
+
+		/* clear the interrupt */
+		ret = sdw_write(slave, SDW_DP0_INT, clear);
+		if (ret < 0) {
+			dev_err(slave->bus->dev,
+				"SDW_DP0_INT write failed:%d", ret);
+			return ret;
+		}
+
+		/* Read DP0 interrupt again */
+		status2 = sdw_read(slave, SDW_DP0_INT);
+		if (status2 < 0) {
+			dev_err(slave->bus->dev,
+				"SDW_DP0_INT read failed:%d", status);
+			return status;
+		}
+		status &= status2;
+
+		count++;
+
+		/* we can get alerts while processing so keep retrying */
+	} while (status != 0 && count < SDW_READ_INTR_CLEAR_RETRY);
+
+	if (count == SDW_READ_INTR_CLEAR_RETRY)
+		dev_warn(slave->bus->dev, "Reached MAX_RETRY on DP0 read");
+
+	return ret;
+}
+
+static int sdw_handle_port_interrupt(struct sdw_slave *slave,
+		int port, u8 *slave_status)
+{
+	u8 clear = 0, impl_int_mask;
+	int status, status2, ret, count = 0;
+	u32 addr;
+
+	if (port == 0)
+		return sdw_handle_dp0_interrupt(slave, slave_status);
+
+	addr = SDW_DPN_INT(port);
+	status = sdw_read(slave, addr);
+	if (status < 0) {
+		dev_err(slave->bus->dev,
+				"SDW_DPN_INT read failed:%d", status);
+
+		return status;
+	}
+
+	do {
+
+		if (status & SDW_DPN_INT_TEST_FAIL) {
+			dev_err(&slave->dev, "Test fail for port:%d", port);
+			clear |= SDW_DPN_INT_TEST_FAIL;
+		}
+
+		/*
+		 * Assumption: PORT_READY interrupt will be received only
+		 * for ports implementing CP_SM.
+		 */
+		if (status & SDW_DPN_INT_PORT_READY) {
+			complete(&slave->port_ready[port]);
+			clear |= SDW_DPN_INT_PORT_READY;
+		}
+
+		impl_int_mask = SDW_DPN_INT_IMPDEF1 |
+			SDW_DPN_INT_IMPDEF2 | SDW_DPN_INT_IMPDEF3;
+
+
+		if (status & impl_int_mask) {
+			clear |= impl_int_mask;
+			*slave_status = clear;
+		}
+
+		/* clear the interrupt */
+		ret = sdw_write(slave, addr, clear);
+		if (ret < 0) {
+			dev_err(slave->bus->dev,
+					"SDW_DPN_INT write failed:%d", ret);
+			return ret;
+		}
+
+		/* Read DPN interrupt again */
+		status2 = sdw_read(slave, addr);
+		if (status < 0) {
+			dev_err(slave->bus->dev,
+					"SDW_DPN_INT read failed:%d", status);
+			return status;
+		}
+		status &= status2;
+
+		count++;
+
+		/* we can get alerts while processing so keep retrying */
+	} while (status != 0 && count < SDW_READ_INTR_CLEAR_RETRY);
+
+	if (count == SDW_READ_INTR_CLEAR_RETRY)
+		dev_warn(slave->bus->dev, "Reached MAX_RETRY on port read");
+
+	return ret;
+}
+
+static int sdw_handle_slave_alerts(struct sdw_slave *slave)
+{
+	struct sdw_slave_intr_status slave_intr;
+	u8 clear = 0, bit, port_status[15];
+	int port_num, stat, ret, count = 0;
+	unsigned long port;
+	bool slave_notify = false;
+	u8 buf, buf2[2], _buf, _buf2[2];
+
+	sdw_modify_slave_status(slave, SDW_SLAVE_ALERT);
+
+	/* Read Instat 1, Instat 2 and Instat 3 registers */
+	ret = buf = sdw_read(slave, SDW_SCP_INT1);
+	if (ret < 0) {
+		dev_err(slave->bus->dev,
+					"SDW_SCP_INT1 read failed:%d", ret);
+		return ret;
+	}
+
+	ret = sdw_nread(slave, SDW_SCP_INTSTAT2, 2, buf2);
+	if (ret < 0) {
+		dev_err(slave->bus->dev,
+					"SDW_SCP_INT2/3 read failed:%d", ret);
+		return ret;
+	}
+
+	do {
+		/*
+		 * Check parity, bus clash and Slave (impl defined)
+		 * interrupt
+		 */
+		if (buf & SDW_SCP_INT1_PARITY) {
+			dev_err(&slave->dev, "Parity error detected");
+			clear |= SDW_SCP_INT1_PARITY;
+		}
+
+		if (buf & SDW_SCP_INT1_BUS_CLASH) {
+			dev_err(&slave->dev, "Bus clash error detected");
+			clear |= SDW_SCP_INT1_BUS_CLASH;
+		}
+
+		/*
+		 * When bus clash or parity errors are detected, such errors
+		 * are unlikely to be recoverable errors.
+		 * TODO: In such scenario, reset bus. Make this configurable
+		 * via sysfs property with bus reset being the default.
+		 */
+
+		if (buf & SDW_SCP_INT1_IMPL_DEF) {
+			dev_dbg(&slave->dev, "Slave impl defined interrupt\n");
+			clear |= SDW_SCP_INT1_IMPL_DEF;
+			slave_notify = true;
+		}
+
+		/* Check port 0 - 3 interrupts */
+		port = buf & SDW_SCP_INT1_PORT0_3;
+
+		/* To get port number corresponding to bits, shift it */
+		port = port >> SDW_REG_SHIFT(SDW_SCP_INT1_PORT0_3);
+		for_each_set_bit(bit, &port, 8) {
+			sdw_handle_port_interrupt(slave, bit,
+						&port_status[bit]);
+
+		}
+
+		/* Check if cascade 2 interrupt is present */
+		if (buf & SDW_SCP_INT1_SCP2_CASCADE) {
+			port = buf2[0] & SDW_SCP_INTSTAT2_PORT4_10;
+			for_each_set_bit(bit, &port, 8) {
+				/* scp2 ports start from 4 */
+				port_num = bit + 3;
+				sdw_handle_port_interrupt(slave,
+						port_num,
+						&port_status[port_num]);
+			}
+		}
+
+		/* now check last cascade */
+		if (buf2[0] & SDW_SCP_INTSTAT2_SCP3_CASCADE) {
+			port = buf2[1] & SDW_SCP_INTSTAT3_PORT11_14;
+			for_each_set_bit(bit, &port, 8) {
+				/* scp3 ports start from 11 */
+				port_num = bit + 10;
+				sdw_handle_port_interrupt(slave,
+						port_num,
+						&port_status[port_num]);
+			}
+		}
+
+		/* Update the Slave driver */
+		if (slave_notify && (slave->ops) &&
+					(slave->ops->interrupt_callback)) {
+			slave_intr.control_port = clear;
+			memcpy(slave_intr.port, &port_status,
+						sizeof(slave_intr.port));
+
+			slave->ops->interrupt_callback(slave, &slave_intr);
+		}
+
+		/* Ack interrupt */
+		ret = sdw_write(slave, SDW_SCP_INT1, clear);
+		if (ret < 0) {
+			dev_err(slave->bus->dev,
+					"SDW_SCP_INT1 write failed:%d", ret);
+			return ret;
+		}
+
+		/*
+		 * Read status again to ensure no new interrupts arrived
+		 * while servicing interrupts.
+		 */
+		ret = _buf = sdw_read(slave, SDW_SCP_INT1);
+		if (ret < 0) {
+			dev_err(slave->bus->dev,
+					"SDW_SCP_INT1 read failed:%d", ret);
+			return ret;
+		}
+
+		ret = sdw_nread(slave, SDW_SCP_INTSTAT2, 2, _buf2);
+		if (ret < 0) {
+			dev_err(slave->bus->dev,
+					"SDW_SCP_INT2/3 read failed:%d", ret);
+			return ret;
+		}
+
+		/* Make sure no interrupts are pending */
+		buf &= _buf;
+		buf2[0] &= _buf2[0];
+		buf2[1] &= _buf2[1];
+		stat = buf || buf2[0] || buf2[1];
+
+		/*
+		 * Exit loop if Slave is continuously in ALERT state even
+		 * after servicing the interrupt multiple times.
+		 */
+		count++;
+
+		/* we can get alerts while processing so keep retrying */
+	} while (stat != 0 && count < SDW_READ_INTR_CLEAR_RETRY);
+
+	if (count == SDW_READ_INTR_CLEAR_RETRY)
+		dev_warn(slave->bus->dev, "Reached MAX_RETRY on alert read");
+
+	return ret;
+}
+
+static int sdw_update_slave_status(struct sdw_slave *slave,
+				enum sdw_slave_status status)
+{
+	if ((slave->ops) && (slave->ops->update_status))
+		return slave->ops->update_status(slave, status);
+
+	return 0;
+}
+
+/**
+ * sdw_handle_slave_status() - Handle Slave status
+ * @bus: SDW bus instance
+ * @status: Status for all Slave(s)
+ */
+int sdw_handle_slave_status(struct sdw_bus *bus,
+			enum sdw_slave_status status[])
+{
+	enum sdw_slave_status prev_status;
+	struct sdw_slave *slave;
+	int i, ret = 0;
+
+	if (status[0] == SDW_SLAVE_ATTACHED) {
+		ret = sdw_program_device_num(bus);
+		if (ret)
+			dev_err(bus->dev, "Slave attach failed: %d", ret);
+	}
+
+	/* Continue to check other slave statuses */
+	for (i = 1; i <= SDW_MAX_DEVICES; i++) {
+		mutex_lock(&bus->bus_lock);
+		if (test_bit(i, bus->assigned) == false) {
+			mutex_unlock(&bus->bus_lock);
+			continue;
+		}
+		mutex_unlock(&bus->bus_lock);
+
+		slave = sdw_get_slave(bus, i);
+		if (!slave)
+			continue;
+
+		switch (status[i]) {
+		case SDW_SLAVE_UNATTACHED:
+			if (slave->status == SDW_SLAVE_UNATTACHED)
+				break;
+
+			sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
+			break;
+
+		case SDW_SLAVE_ALERT:
+			ret = sdw_handle_slave_alerts(slave);
+			if (ret)
+				dev_err(bus->dev,
+					"Slave %d alert handling failed: %d",
+					i, ret);
+			break;
+
+		case SDW_SLAVE_ATTACHED:
+			if (slave->status == SDW_SLAVE_ATTACHED)
+				break;
+
+			prev_status = slave->status;
+			sdw_modify_slave_status(slave, SDW_SLAVE_ATTACHED);
+
+			if (prev_status == SDW_SLAVE_ALERT)
+				break;
+
+			ret = sdw_initialize_slave(slave);
+			if (ret)
+				dev_err(bus->dev,
+					"Slave %d initialization failed: %d",
+					i, ret);
+
+			break;
+
+		default:
+			dev_err(bus->dev, "Invalid slave %d status:%d",
+							i, status[i]);
+			break;
+		}
+
+		ret = sdw_update_slave_status(slave, status[i]);
+		if (ret)
+			dev_err(slave->bus->dev,
+				"Update Slave status failed:%d", ret);
+
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(sdw_handle_slave_status);
diff --git a/drivers/soundwire/bus.h b/drivers/soundwire/bus.h
new file mode 100644
index 0000000..345c34d
--- /dev/null
+++ b/drivers/soundwire/bus.h
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+#ifndef __SDW_BUS_H
+#define __SDW_BUS_H
+
+#if IS_ENABLED(CONFIG_ACPI)
+int sdw_acpi_find_slaves(struct sdw_bus *bus);
+#else
+static inline int sdw_acpi_find_slaves(struct sdw_bus *bus)
+{
+	return -ENOTSUPP;
+}
+#endif
+
+void sdw_extract_slave_id(struct sdw_bus *bus,
+			u64 addr, struct sdw_slave_id *id);
+
+enum {
+	SDW_MSG_FLAG_READ = 0,
+	SDW_MSG_FLAG_WRITE,
+};
+
+/**
+ * struct sdw_msg - Message structure
+ * @addr: Register address accessed in the Slave
+ * @len: number of messages
+ * @dev_num: Slave device number
+ * @addr_page1: SCP address page 1 Slave register
+ * @addr_page2: SCP address page 2 Slave register
+ * @flags: transfer flags, indicate if xfer is read or write
+ * @buf: message data buffer
+ * @ssp_sync: Send message at SSP (Stream Synchronization Point)
+ * @page: address requires paging
+ */
+struct sdw_msg {
+	u16 addr;
+	u16 len;
+	u8 dev_num;
+	u8 addr_page1;
+	u8 addr_page2;
+	u8 flags;
+	u8 *buf;
+	bool ssp_sync;
+	bool page;
+};
+
+int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg);
+int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg,
+				struct sdw_defer *defer);
+
+#define SDW_READ_INTR_CLEAR_RETRY	10
+
+int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
+		u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf);
+
+/* Read-Modify-Write Slave register */
+static inline int
+sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
+{
+	int tmp;
+
+	tmp = sdw_read(slave, addr);
+	if (tmp < 0)
+		return tmp;
+
+	tmp = (tmp & ~mask) | val;
+	return sdw_write(slave, addr, tmp);
+}
+
+#endif /* __SDW_BUS_H */
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
new file mode 100644
index 0000000..d5f3a70
--- /dev/null
+++ b/drivers/soundwire/bus_type.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2015-17 Intel Corporation.
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pm_domain.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_type.h>
+
+/**
+ * sdw_get_device_id - find the matching SoundWire device id
+ * @slave: SoundWire Slave Device
+ * @drv: SoundWire Slave Driver
+ *
+ * The match is done by comparing the mfg_id and part_id from the
+ * struct sdw_device_id.
+ */
+static const struct sdw_device_id *
+sdw_get_device_id(struct sdw_slave *slave, struct sdw_driver *drv)
+{
+	const struct sdw_device_id *id = drv->id_table;
+
+	while (id && id->mfg_id) {
+		if (slave->id.mfg_id == id->mfg_id &&
+		    slave->id.part_id == id->part_id)
+			return id;
+		id++;
+	}
+
+	return NULL;
+}
+
+static int sdw_bus_match(struct device *dev, struct device_driver *ddrv)
+{
+	struct sdw_slave *slave = dev_to_sdw_dev(dev);
+	struct sdw_driver *drv = drv_to_sdw_driver(ddrv);
+
+	return !!sdw_get_device_id(slave, drv);
+}
+
+int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size)
+{
+	/* modalias is sdw:m<mfg_id>p<part_id> */
+
+	return snprintf(buf, size, "sdw:m%04Xp%04X\n",
+			slave->id.mfg_id, slave->id.part_id);
+}
+
+static int sdw_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct sdw_slave *slave = dev_to_sdw_dev(dev);
+	char modalias[32];
+
+	sdw_slave_modalias(slave, modalias, sizeof(modalias));
+
+	if (add_uevent_var(env, "MODALIAS=%s", modalias))
+		return -ENOMEM;
+
+	return 0;
+}
+
+struct bus_type sdw_bus_type = {
+	.name = "soundwire",
+	.match = sdw_bus_match,
+	.uevent = sdw_uevent,
+};
+EXPORT_SYMBOL_GPL(sdw_bus_type);
+
+static int sdw_drv_probe(struct device *dev)
+{
+	struct sdw_slave *slave = dev_to_sdw_dev(dev);
+	struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
+	const struct sdw_device_id *id;
+	int ret;
+
+	id = sdw_get_device_id(slave, drv);
+	if (!id)
+		return -ENODEV;
+
+	slave->ops = drv->ops;
+
+	/*
+	 * attach to power domain but don't turn on (last arg)
+	 */
+	ret = dev_pm_domain_attach(dev, false);
+	if (ret != -EPROBE_DEFER) {
+		ret = drv->probe(slave, id);
+		if (ret) {
+			dev_err(dev, "Probe of %s failed: %d\n", drv->name, ret);
+			dev_pm_domain_detach(dev, false);
+		}
+	}
+
+	if (ret)
+		return ret;
+
+	/* device is probed so let's read the properties now */
+	if (slave->ops && slave->ops->read_prop)
+		slave->ops->read_prop(slave);
+
+	/*
+	 * Check for valid clk_stop_timeout, use DisCo worst case value of
+	 * 300ms
+	 *
+	 * TODO: check the timeouts and driver removal case
+	 */
+	if (slave->prop.clk_stop_timeout == 0)
+		slave->prop.clk_stop_timeout = 300;
+
+	slave->bus->clk_stop_timeout = max_t(u32, slave->bus->clk_stop_timeout,
+					slave->prop.clk_stop_timeout);
+
+	return 0;
+}
+
+static int sdw_drv_remove(struct device *dev)
+{
+	struct sdw_slave *slave = dev_to_sdw_dev(dev);
+	struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
+	int ret = 0;
+
+	if (drv->remove)
+		ret = drv->remove(slave);
+
+	dev_pm_domain_detach(dev, false);
+
+	return ret;
+}
+
+static void sdw_drv_shutdown(struct device *dev)
+{
+	struct sdw_slave *slave = dev_to_sdw_dev(dev);
+	struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
+
+	if (drv->shutdown)
+		drv->shutdown(slave);
+}
+
+/**
+ * __sdw_register_driver() - register a SoundWire Slave driver
+ * @drv: driver to register
+ * @owner: owning module/driver
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int __sdw_register_driver(struct sdw_driver *drv, struct module *owner)
+{
+	drv->driver.bus = &sdw_bus_type;
+
+	if (!drv->probe) {
+		pr_err("driver %s didn't provide SDW probe routine\n",
+							drv->name);
+		return -EINVAL;
+	}
+
+	drv->driver.owner = owner;
+	drv->driver.probe = sdw_drv_probe;
+
+	if (drv->remove)
+		drv->driver.remove = sdw_drv_remove;
+
+	if (drv->shutdown)
+		drv->driver.shutdown = sdw_drv_shutdown;
+
+	return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(__sdw_register_driver);
+
+/**
+ * sdw_unregister_driver() - unregisters the SoundWire Slave driver
+ * @drv: driver to unregister
+ */
+void sdw_unregister_driver(struct sdw_driver *drv)
+{
+	driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(sdw_unregister_driver);
+
+static int __init sdw_bus_init(void)
+{
+	return bus_register(&sdw_bus_type);
+}
+
+static void __exit sdw_bus_exit(void)
+{
+	bus_unregister(&sdw_bus_type);
+}
+
+postcore_initcall(sdw_bus_init);
+module_exit(sdw_bus_exit);
+
+MODULE_DESCRIPTION("SoundWire bus");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
new file mode 100644
index 0000000..3a9b146
--- /dev/null
+++ b/drivers/soundwire/cadence_master.c
@@ -0,0 +1,751 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+/*
+ * Cadence SoundWire Master module
+ * Used by Master driver
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/soundwire/sdw_registers.h>
+#include <linux/soundwire/sdw.h>
+#include "bus.h"
+#include "cadence_master.h"
+
+#define CDNS_MCP_CONFIG				0x0
+
+#define CDNS_MCP_CONFIG_MCMD_RETRY		GENMASK(27, 24)
+#define CDNS_MCP_CONFIG_MPREQ_DELAY		GENMASK(20, 16)
+#define CDNS_MCP_CONFIG_MMASTER			BIT(7)
+#define CDNS_MCP_CONFIG_BUS_REL			BIT(6)
+#define CDNS_MCP_CONFIG_SNIFFER			BIT(5)
+#define CDNS_MCP_CONFIG_SSPMOD			BIT(4)
+#define CDNS_MCP_CONFIG_CMD			BIT(3)
+#define CDNS_MCP_CONFIG_OP			GENMASK(2, 0)
+#define CDNS_MCP_CONFIG_OP_NORMAL		0
+
+#define CDNS_MCP_CONTROL			0x4
+
+#define CDNS_MCP_CONTROL_RST_DELAY		GENMASK(10, 8)
+#define CDNS_MCP_CONTROL_CMD_RST		BIT(7)
+#define CDNS_MCP_CONTROL_SOFT_RST		BIT(6)
+#define CDNS_MCP_CONTROL_SW_RST			BIT(5)
+#define CDNS_MCP_CONTROL_HW_RST			BIT(4)
+#define CDNS_MCP_CONTROL_CLK_PAUSE		BIT(3)
+#define CDNS_MCP_CONTROL_CLK_STOP_CLR		BIT(2)
+#define CDNS_MCP_CONTROL_CMD_ACCEPT		BIT(1)
+#define CDNS_MCP_CONTROL_BLOCK_WAKEUP		BIT(0)
+
+
+#define CDNS_MCP_CMDCTRL			0x8
+#define CDNS_MCP_SSPSTAT			0xC
+#define CDNS_MCP_FRAME_SHAPE			0x10
+#define CDNS_MCP_FRAME_SHAPE_INIT		0x14
+
+#define CDNS_MCP_CONFIG_UPDATE			0x18
+#define CDNS_MCP_CONFIG_UPDATE_BIT		BIT(0)
+
+#define CDNS_MCP_PHYCTRL			0x1C
+#define CDNS_MCP_SSP_CTRL0			0x20
+#define CDNS_MCP_SSP_CTRL1			0x28
+#define CDNS_MCP_CLK_CTRL0			0x30
+#define CDNS_MCP_CLK_CTRL1			0x38
+
+#define CDNS_MCP_STAT				0x40
+
+#define CDNS_MCP_STAT_ACTIVE_BANK		BIT(20)
+#define CDNS_MCP_STAT_CLK_STOP			BIT(16)
+
+#define CDNS_MCP_INTSTAT			0x44
+#define CDNS_MCP_INTMASK			0x48
+
+#define CDNS_MCP_INT_IRQ			BIT(31)
+#define CDNS_MCP_INT_WAKEUP			BIT(16)
+#define CDNS_MCP_INT_SLAVE_RSVD			BIT(15)
+#define CDNS_MCP_INT_SLAVE_ALERT		BIT(14)
+#define CDNS_MCP_INT_SLAVE_ATTACH		BIT(13)
+#define CDNS_MCP_INT_SLAVE_NATTACH		BIT(12)
+#define CDNS_MCP_INT_SLAVE_MASK			GENMASK(15, 12)
+#define CDNS_MCP_INT_DPINT			BIT(11)
+#define CDNS_MCP_INT_CTRL_CLASH			BIT(10)
+#define CDNS_MCP_INT_DATA_CLASH			BIT(9)
+#define CDNS_MCP_INT_CMD_ERR			BIT(7)
+#define CDNS_MCP_INT_RX_WL			BIT(2)
+#define CDNS_MCP_INT_TXE			BIT(1)
+
+#define CDNS_MCP_INTSET				0x4C
+
+#define CDNS_SDW_SLAVE_STAT			0x50
+#define CDNS_MCP_SLAVE_STAT_MASK		BIT(1, 0)
+
+#define CDNS_MCP_SLAVE_INTSTAT0			0x54
+#define CDNS_MCP_SLAVE_INTSTAT1			0x58
+#define CDNS_MCP_SLAVE_INTSTAT_NPRESENT		BIT(0)
+#define CDNS_MCP_SLAVE_INTSTAT_ATTACHED		BIT(1)
+#define CDNS_MCP_SLAVE_INTSTAT_ALERT		BIT(2)
+#define CDNS_MCP_SLAVE_INTSTAT_RESERVED		BIT(3)
+#define CDNS_MCP_SLAVE_STATUS_BITS		GENMASK(3, 0)
+#define CDNS_MCP_SLAVE_STATUS_NUM		4
+
+#define CDNS_MCP_SLAVE_INTMASK0			0x5C
+#define CDNS_MCP_SLAVE_INTMASK1			0x60
+
+#define CDNS_MCP_SLAVE_INTMASK0_MASK		GENMASK(30, 0)
+#define CDNS_MCP_SLAVE_INTMASK1_MASK		GENMASK(16, 0)
+
+#define CDNS_MCP_PORT_INTSTAT			0x64
+#define CDNS_MCP_PDI_STAT			0x6C
+
+#define CDNS_MCP_FIFOLEVEL			0x78
+#define CDNS_MCP_FIFOSTAT			0x7C
+#define CDNS_MCP_RX_FIFO_AVAIL			GENMASK(5, 0)
+
+#define CDNS_MCP_CMD_BASE			0x80
+#define CDNS_MCP_RESP_BASE			0x80
+#define CDNS_MCP_CMD_LEN			0x20
+#define CDNS_MCP_CMD_WORD_LEN			0x4
+
+#define CDNS_MCP_CMD_SSP_TAG			BIT(31)
+#define CDNS_MCP_CMD_COMMAND			GENMASK(30, 28)
+#define CDNS_MCP_CMD_DEV_ADDR			GENMASK(27, 24)
+#define CDNS_MCP_CMD_REG_ADDR_H			GENMASK(23, 16)
+#define CDNS_MCP_CMD_REG_ADDR_L			GENMASK(15, 8)
+#define CDNS_MCP_CMD_REG_DATA			GENMASK(7, 0)
+
+#define CDNS_MCP_CMD_READ			2
+#define CDNS_MCP_CMD_WRITE			3
+
+#define CDNS_MCP_RESP_RDATA			GENMASK(15, 8)
+#define CDNS_MCP_RESP_ACK			BIT(0)
+#define CDNS_MCP_RESP_NACK			BIT(1)
+
+#define CDNS_DP_SIZE				128
+
+#define CDNS_DPN_B0_CONFIG(n)			(0x100 + CDNS_DP_SIZE * (n))
+#define CDNS_DPN_B0_CH_EN(n)			(0x104 + CDNS_DP_SIZE * (n))
+#define CDNS_DPN_B0_SAMPLE_CTRL(n)		(0x108 + CDNS_DP_SIZE * (n))
+#define CDNS_DPN_B0_OFFSET_CTRL(n)		(0x10C + CDNS_DP_SIZE * (n))
+#define CDNS_DPN_B0_HCTRL(n)			(0x110 + CDNS_DP_SIZE * (n))
+#define CDNS_DPN_B0_ASYNC_CTRL(n)		(0x114 + CDNS_DP_SIZE * (n))
+
+#define CDNS_DPN_B1_CONFIG(n)			(0x118 + CDNS_DP_SIZE * (n))
+#define CDNS_DPN_B1_CH_EN(n)			(0x11C + CDNS_DP_SIZE * (n))
+#define CDNS_DPN_B1_SAMPLE_CTRL(n)		(0x120 + CDNS_DP_SIZE * (n))
+#define CDNS_DPN_B1_OFFSET_CTRL(n)		(0x124 + CDNS_DP_SIZE * (n))
+#define CDNS_DPN_B1_HCTRL(n)			(0x128 + CDNS_DP_SIZE * (n))
+#define CDNS_DPN_B1_ASYNC_CTRL(n)		(0x12C + CDNS_DP_SIZE * (n))
+
+#define CDNS_DPN_CONFIG_BPM			BIT(18)
+#define CDNS_DPN_CONFIG_BGC			GENMASK(17, 16)
+#define CDNS_DPN_CONFIG_WL			GENMASK(12, 8)
+#define CDNS_DPN_CONFIG_PORT_DAT		GENMASK(3, 2)
+#define CDNS_DPN_CONFIG_PORT_FLOW		GENMASK(1, 0)
+
+#define CDNS_DPN_SAMPLE_CTRL_SI			GENMASK(15, 0)
+
+#define CDNS_DPN_OFFSET_CTRL_1			GENMASK(7, 0)
+#define CDNS_DPN_OFFSET_CTRL_2			GENMASK(15, 8)
+
+#define CDNS_DPN_HCTRL_HSTOP			GENMASK(3, 0)
+#define CDNS_DPN_HCTRL_HSTART			GENMASK(7, 4)
+#define CDNS_DPN_HCTRL_LCTRL			GENMASK(10, 8)
+
+#define CDNS_PORTCTRL				0x130
+#define CDNS_PORTCTRL_DIRN			BIT(7)
+#define CDNS_PORTCTRL_BANK_INVERT		BIT(8)
+
+#define CDNS_PORT_OFFSET			0x80
+
+#define CDNS_PDI_CONFIG(n)			(0x1100 + (n) * 16)
+
+#define CDNS_PDI_CONFIG_SOFT_RESET		BIT(24)
+#define CDNS_PDI_CONFIG_CHANNEL			GENMASK(15, 8)
+#define CDNS_PDI_CONFIG_PORT			GENMASK(4, 0)
+
+/* Driver defaults */
+
+#define CDNS_DEFAULT_CLK_DIVIDER		0
+#define CDNS_DEFAULT_FRAME_SHAPE		0x30
+#define CDNS_DEFAULT_SSP_INTERVAL		0x18
+#define CDNS_TX_TIMEOUT				2000
+
+#define CDNS_PCM_PDI_OFFSET			0x2
+#define CDNS_PDM_PDI_OFFSET			0x6
+
+#define CDNS_SCP_RX_FIFOLEVEL			0x2
+
+/*
+ * register accessor helpers
+ */
+static inline u32 cdns_readl(struct sdw_cdns *cdns, int offset)
+{
+	return readl(cdns->registers + offset);
+}
+
+static inline void cdns_writel(struct sdw_cdns *cdns, int offset, u32 value)
+{
+	writel(value, cdns->registers + offset);
+}
+
+static inline void cdns_updatel(struct sdw_cdns *cdns,
+				int offset, u32 mask, u32 val)
+{
+	u32 tmp;
+
+	tmp = cdns_readl(cdns, offset);
+	tmp = (tmp & ~mask) | val;
+	cdns_writel(cdns, offset, tmp);
+}
+
+static int cdns_clear_bit(struct sdw_cdns *cdns, int offset, u32 value)
+{
+	int timeout = 10;
+	u32 reg_read;
+
+	writel(value, cdns->registers + offset);
+
+	/* Wait for bit to be self cleared */
+	do {
+		reg_read = readl(cdns->registers + offset);
+		if ((reg_read & value) == 0)
+			return 0;
+
+		timeout--;
+		udelay(50);
+	} while (timeout != 0);
+
+	return -EAGAIN;
+}
+
+/*
+ * IO Calls
+ */
+static enum sdw_command_response cdns_fill_msg_resp(
+			struct sdw_cdns *cdns,
+			struct sdw_msg *msg, int count, int offset)
+{
+	int nack = 0, no_ack = 0;
+	int i;
+
+	/* check message response */
+	for (i = 0; i < count; i++) {
+		if (!(cdns->response_buf[i] & CDNS_MCP_RESP_ACK)) {
+			no_ack = 1;
+			dev_dbg(cdns->dev, "Msg Ack not received\n");
+			if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) {
+				nack = 1;
+				dev_err(cdns->dev, "Msg NACK received\n");
+			}
+		}
+	}
+
+	if (nack) {
+		dev_err(cdns->dev, "Msg NACKed for Slave %d\n", msg->dev_num);
+		return SDW_CMD_FAIL;
+	} else if (no_ack) {
+		dev_dbg(cdns->dev, "Msg ignored for Slave %d\n", msg->dev_num);
+		return SDW_CMD_IGNORED;
+	}
+
+	/* fill response */
+	for (i = 0; i < count; i++)
+		msg->buf[i + offset] = cdns->response_buf[i] >>
+				SDW_REG_SHIFT(CDNS_MCP_RESP_RDATA);
+
+	return SDW_CMD_OK;
+}
+
+static enum sdw_command_response
+_cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
+				int offset, int count, bool defer)
+{
+	unsigned long time;
+	u32 base, i, data;
+	u16 addr;
+
+	/* Program the watermark level for RX FIFO */
+	if (cdns->msg_count != count) {
+		cdns_writel(cdns, CDNS_MCP_FIFOLEVEL, count);
+		cdns->msg_count = count;
+	}
+
+	base = CDNS_MCP_CMD_BASE;
+	addr = msg->addr;
+
+	for (i = 0; i < count; i++) {
+		data = msg->dev_num << SDW_REG_SHIFT(CDNS_MCP_CMD_DEV_ADDR);
+		data |= cmd << SDW_REG_SHIFT(CDNS_MCP_CMD_COMMAND);
+		data |= addr++  << SDW_REG_SHIFT(CDNS_MCP_CMD_REG_ADDR_L);
+
+		if (msg->flags == SDW_MSG_FLAG_WRITE)
+			data |= msg->buf[i + offset];
+
+		data |= msg->ssp_sync << SDW_REG_SHIFT(CDNS_MCP_CMD_SSP_TAG);
+		cdns_writel(cdns, base, data);
+		base += CDNS_MCP_CMD_WORD_LEN;
+	}
+
+	if (defer)
+		return SDW_CMD_OK;
+
+	/* wait for timeout or response */
+	time = wait_for_completion_timeout(&cdns->tx_complete,
+				msecs_to_jiffies(CDNS_TX_TIMEOUT));
+	if (!time) {
+		dev_err(cdns->dev, "IO transfer timed out\n");
+		msg->len = 0;
+		return SDW_CMD_TIMEOUT;
+	}
+
+	return cdns_fill_msg_resp(cdns, msg, count, offset);
+}
+
+static enum sdw_command_response cdns_program_scp_addr(
+			struct sdw_cdns *cdns, struct sdw_msg *msg)
+{
+	int nack = 0, no_ack = 0;
+	unsigned long time;
+	u32 data[2], base;
+	int i;
+
+	/* Program the watermark level for RX FIFO */
+	if (cdns->msg_count != CDNS_SCP_RX_FIFOLEVEL) {
+		cdns_writel(cdns, CDNS_MCP_FIFOLEVEL, CDNS_SCP_RX_FIFOLEVEL);
+		cdns->msg_count = CDNS_SCP_RX_FIFOLEVEL;
+	}
+
+	data[0] = msg->dev_num << SDW_REG_SHIFT(CDNS_MCP_CMD_DEV_ADDR);
+	data[0] |= 0x3 << SDW_REG_SHIFT(CDNS_MCP_CMD_COMMAND);
+	data[1] = data[0];
+
+	data[0] |= SDW_SCP_ADDRPAGE1 << SDW_REG_SHIFT(CDNS_MCP_CMD_REG_ADDR_L);
+	data[1] |= SDW_SCP_ADDRPAGE2 << SDW_REG_SHIFT(CDNS_MCP_CMD_REG_ADDR_L);
+
+	data[0] |= msg->addr_page1;
+	data[1] |= msg->addr_page2;
+
+	base = CDNS_MCP_CMD_BASE;
+	cdns_writel(cdns, base, data[0]);
+	base += CDNS_MCP_CMD_WORD_LEN;
+	cdns_writel(cdns, base, data[1]);
+
+	time = wait_for_completion_timeout(&cdns->tx_complete,
+				msecs_to_jiffies(CDNS_TX_TIMEOUT));
+	if (!time) {
+		dev_err(cdns->dev, "SCP Msg trf timed out\n");
+		msg->len = 0;
+		return SDW_CMD_TIMEOUT;
+	}
+
+	/* check response the writes */
+	for (i = 0; i < 2; i++) {
+		if (!(cdns->response_buf[i] & CDNS_MCP_RESP_ACK)) {
+			no_ack = 1;
+			dev_err(cdns->dev, "Program SCP Ack not received");
+			if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) {
+				nack = 1;
+				dev_err(cdns->dev, "Program SCP NACK received");
+			}
+		}
+	}
+
+	/* For NACK, NO ack, don't return err if we are in Broadcast mode */
+	if (nack) {
+		dev_err(cdns->dev,
+			"SCP_addrpage NACKed for Slave %d", msg->dev_num);
+		return SDW_CMD_FAIL;
+	} else if (no_ack) {
+		dev_dbg(cdns->dev,
+			"SCP_addrpage ignored for Slave %d", msg->dev_num);
+		return SDW_CMD_IGNORED;
+	}
+
+	return SDW_CMD_OK;
+}
+
+static int cdns_prep_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int *cmd)
+{
+	int ret;
+
+	if (msg->page) {
+		ret = cdns_program_scp_addr(cdns, msg);
+		if (ret) {
+			msg->len = 0;
+			return ret;
+		}
+	}
+
+	switch (msg->flags) {
+	case SDW_MSG_FLAG_READ:
+		*cmd = CDNS_MCP_CMD_READ;
+		break;
+
+	case SDW_MSG_FLAG_WRITE:
+		*cmd = CDNS_MCP_CMD_WRITE;
+		break;
+
+	default:
+		dev_err(cdns->dev, "Invalid msg cmd: %d\n", msg->flags);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static enum sdw_command_response
+cdns_xfer_msg(struct sdw_bus *bus, struct sdw_msg *msg)
+{
+	struct sdw_cdns *cdns = bus_to_cdns(bus);
+	int cmd = 0, ret, i;
+
+	ret = cdns_prep_msg(cdns, msg, &cmd);
+	if (ret)
+		return SDW_CMD_FAIL_OTHER;
+
+	for (i = 0; i < msg->len / CDNS_MCP_CMD_LEN; i++) {
+		ret = _cdns_xfer_msg(cdns, msg, cmd, i * CDNS_MCP_CMD_LEN,
+				CDNS_MCP_CMD_LEN, false);
+		if (ret < 0)
+			goto exit;
+	}
+
+	if (!(msg->len % CDNS_MCP_CMD_LEN))
+		goto exit;
+
+	ret = _cdns_xfer_msg(cdns, msg, cmd, i * CDNS_MCP_CMD_LEN,
+			msg->len % CDNS_MCP_CMD_LEN, false);
+
+exit:
+	return ret;
+}
+
+static enum sdw_command_response
+cdns_xfer_msg_defer(struct sdw_bus *bus,
+		struct sdw_msg *msg, struct sdw_defer *defer)
+{
+	struct sdw_cdns *cdns = bus_to_cdns(bus);
+	int cmd = 0, ret;
+
+	/* for defer only 1 message is supported */
+	if (msg->len > 1)
+		return -ENOTSUPP;
+
+	ret = cdns_prep_msg(cdns, msg, &cmd);
+	if (ret)
+		return SDW_CMD_FAIL_OTHER;
+
+	cdns->defer = defer;
+	cdns->defer->length = msg->len;
+
+	return _cdns_xfer_msg(cdns, msg, cmd, 0, msg->len, true);
+}
+
+static enum sdw_command_response
+cdns_reset_page_addr(struct sdw_bus *bus, unsigned int dev_num)
+{
+	struct sdw_cdns *cdns = bus_to_cdns(bus);
+	struct sdw_msg msg;
+
+	/* Create dummy message with valid device number */
+	memset(&msg, 0, sizeof(msg));
+	msg.dev_num = dev_num;
+
+	return cdns_program_scp_addr(cdns, &msg);
+}
+
+/*
+ * IRQ handling
+ */
+
+static void cdns_read_response(struct sdw_cdns *cdns)
+{
+	u32 num_resp, cmd_base;
+	int i;
+
+	num_resp = cdns_readl(cdns, CDNS_MCP_FIFOSTAT);
+	num_resp &= CDNS_MCP_RX_FIFO_AVAIL;
+
+	cmd_base = CDNS_MCP_CMD_BASE;
+
+	for (i = 0; i < num_resp; i++) {
+		cdns->response_buf[i] = cdns_readl(cdns, cmd_base);
+		cmd_base += CDNS_MCP_CMD_WORD_LEN;
+	}
+}
+
+static int cdns_update_slave_status(struct sdw_cdns *cdns,
+					u32 slave0, u32 slave1)
+{
+	enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
+	bool is_slave = false;
+	u64 slave, mask;
+	int i, set_status;
+
+	/* combine the two status */
+	slave = ((u64)slave1 << 32) | slave0;
+	memset(status, 0, sizeof(status));
+
+	for (i = 0; i <= SDW_MAX_DEVICES; i++) {
+		mask = (slave >> (i * CDNS_MCP_SLAVE_STATUS_NUM)) &
+				CDNS_MCP_SLAVE_STATUS_BITS;
+		if (!mask)
+			continue;
+
+		is_slave = true;
+		set_status = 0;
+
+		if (mask & CDNS_MCP_SLAVE_INTSTAT_RESERVED) {
+			status[i] = SDW_SLAVE_RESERVED;
+			set_status++;
+		}
+
+		if (mask & CDNS_MCP_SLAVE_INTSTAT_ATTACHED) {
+			status[i] = SDW_SLAVE_ATTACHED;
+			set_status++;
+		}
+
+		if (mask & CDNS_MCP_SLAVE_INTSTAT_ALERT) {
+			status[i] = SDW_SLAVE_ALERT;
+			set_status++;
+		}
+
+		if (mask & CDNS_MCP_SLAVE_INTSTAT_NPRESENT) {
+			status[i] = SDW_SLAVE_UNATTACHED;
+			set_status++;
+		}
+
+		/* first check if Slave reported multiple status */
+		if (set_status > 1) {
+			dev_warn(cdns->dev,
+					"Slave reported multiple Status: %d\n",
+					status[i]);
+			/*
+			 * TODO: we need to reread the status here by
+			 * issuing a PING cmd
+			 */
+		}
+	}
+
+	if (is_slave)
+		return sdw_handle_slave_status(&cdns->bus, status);
+
+	return 0;
+}
+
+/**
+ * sdw_cdns_irq() - Cadence interrupt handler
+ * @irq: irq number
+ * @dev_id: irq context
+ */
+irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
+{
+	struct sdw_cdns *cdns = dev_id;
+	u32 int_status;
+	int ret = IRQ_HANDLED;
+
+	/* Check if the link is up */
+	if (!cdns->link_up)
+		return IRQ_NONE;
+
+	int_status = cdns_readl(cdns, CDNS_MCP_INTSTAT);
+
+	if (!(int_status & CDNS_MCP_INT_IRQ))
+		return IRQ_NONE;
+
+	if (int_status & CDNS_MCP_INT_RX_WL) {
+		cdns_read_response(cdns);
+
+		if (cdns->defer) {
+			cdns_fill_msg_resp(cdns, cdns->defer->msg,
+					cdns->defer->length, 0);
+			complete(&cdns->defer->complete);
+			cdns->defer = NULL;
+		} else
+			complete(&cdns->tx_complete);
+	}
+
+	if (int_status & CDNS_MCP_INT_CTRL_CLASH) {
+
+		/* Slave is driving bit slot during control word */
+		dev_err_ratelimited(cdns->dev, "Bus clash for control word\n");
+		int_status |= CDNS_MCP_INT_CTRL_CLASH;
+	}
+
+	if (int_status & CDNS_MCP_INT_DATA_CLASH) {
+		/*
+		 * Multiple slaves trying to drive bit slot, or issue with
+		 * ownership of data bits or Slave gone bonkers
+		 */
+		dev_err_ratelimited(cdns->dev, "Bus clash for data word\n");
+		int_status |= CDNS_MCP_INT_DATA_CLASH;
+	}
+
+	if (int_status & CDNS_MCP_INT_SLAVE_MASK) {
+		/* Mask the Slave interrupt and wake thread */
+		cdns_updatel(cdns, CDNS_MCP_INTMASK,
+				CDNS_MCP_INT_SLAVE_MASK, 0);
+
+		int_status &= ~CDNS_MCP_INT_SLAVE_MASK;
+		ret = IRQ_WAKE_THREAD;
+	}
+
+	cdns_writel(cdns, CDNS_MCP_INTSTAT, int_status);
+	return ret;
+}
+EXPORT_SYMBOL(sdw_cdns_irq);
+
+/**
+ * sdw_cdns_thread() - Cadence irq thread handler
+ * @irq: irq number
+ * @dev_id: irq context
+ */
+irqreturn_t sdw_cdns_thread(int irq, void *dev_id)
+{
+	struct sdw_cdns *cdns = dev_id;
+	u32 slave0, slave1;
+
+	dev_dbg(cdns->dev, "Slave status change\n");
+
+	slave0 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT0);
+	slave1 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT1);
+
+	cdns_update_slave_status(cdns, slave0, slave1);
+	cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT0, slave0);
+	cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT1, slave1);
+
+	/* clear and unmask Slave interrupt now */
+	cdns_writel(cdns, CDNS_MCP_INTSTAT, CDNS_MCP_INT_SLAVE_MASK);
+	cdns_updatel(cdns, CDNS_MCP_INTMASK,
+			CDNS_MCP_INT_SLAVE_MASK, CDNS_MCP_INT_SLAVE_MASK);
+
+	return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(sdw_cdns_thread);
+
+/*
+ * init routines
+ */
+static int _cdns_enable_interrupt(struct sdw_cdns *cdns)
+{
+	u32 mask;
+
+	cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK0,
+				CDNS_MCP_SLAVE_INTMASK0_MASK);
+	cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK1,
+				CDNS_MCP_SLAVE_INTMASK1_MASK);
+
+	mask = CDNS_MCP_INT_SLAVE_RSVD | CDNS_MCP_INT_SLAVE_ALERT |
+		CDNS_MCP_INT_SLAVE_ATTACH | CDNS_MCP_INT_SLAVE_NATTACH |
+		CDNS_MCP_INT_CTRL_CLASH | CDNS_MCP_INT_DATA_CLASH |
+		CDNS_MCP_INT_RX_WL | CDNS_MCP_INT_IRQ | CDNS_MCP_INT_DPINT;
+
+	cdns_writel(cdns, CDNS_MCP_INTMASK, mask);
+
+	return 0;
+}
+
+/**
+ * sdw_cdns_enable_interrupt() - Enable SDW interrupts and update config
+ * @cdns: Cadence instance
+ */
+int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns)
+{
+	int ret;
+
+	_cdns_enable_interrupt(cdns);
+	ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE,
+			CDNS_MCP_CONFIG_UPDATE_BIT);
+	if (ret < 0)
+		dev_err(cdns->dev, "Config update timedout");
+
+	return ret;
+}
+EXPORT_SYMBOL(sdw_cdns_enable_interrupt);
+
+/**
+ * sdw_cdns_init() - Cadence initialization
+ * @cdns: Cadence instance
+ */
+int sdw_cdns_init(struct sdw_cdns *cdns)
+{
+	u32 val;
+	int ret;
+
+	/* Exit clock stop */
+	ret = cdns_clear_bit(cdns, CDNS_MCP_CONTROL,
+			CDNS_MCP_CONTROL_CLK_STOP_CLR);
+	if (ret < 0) {
+		dev_err(cdns->dev, "Couldn't exit from clock stop\n");
+		return ret;
+	}
+
+	/* Set clock divider */
+	val = cdns_readl(cdns, CDNS_MCP_CLK_CTRL0);
+	val |= CDNS_DEFAULT_CLK_DIVIDER;
+	cdns_writel(cdns, CDNS_MCP_CLK_CTRL0, val);
+
+	/* Set the default frame shape */
+	cdns_writel(cdns, CDNS_MCP_FRAME_SHAPE_INIT, CDNS_DEFAULT_FRAME_SHAPE);
+
+	/* Set SSP interval to default value */
+	cdns_writel(cdns, CDNS_MCP_SSP_CTRL0, CDNS_DEFAULT_SSP_INTERVAL);
+	cdns_writel(cdns, CDNS_MCP_SSP_CTRL1, CDNS_DEFAULT_SSP_INTERVAL);
+
+	/* Set cmd accept mode */
+	cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_ACCEPT,
+					CDNS_MCP_CONTROL_CMD_ACCEPT);
+
+	/* Configure mcp config */
+	val = cdns_readl(cdns, CDNS_MCP_CONFIG);
+
+	/* Set Max cmd retry to 15 */
+	val |= CDNS_MCP_CONFIG_MCMD_RETRY;
+
+	/* Set frame delay between PREQ and ping frame to 15 frames */
+	val |= 0xF << SDW_REG_SHIFT(CDNS_MCP_CONFIG_MPREQ_DELAY);
+
+	/* Disable auto bus release */
+	val &= ~CDNS_MCP_CONFIG_BUS_REL;
+
+	/* Disable sniffer mode */
+	val &= ~CDNS_MCP_CONFIG_SNIFFER;
+
+	/* Set cmd mode for Tx and Rx cmds */
+	val &= ~CDNS_MCP_CONFIG_CMD;
+
+	/* Set operation to normal */
+	val &= ~CDNS_MCP_CONFIG_OP;
+	val |= CDNS_MCP_CONFIG_OP_NORMAL;
+
+	cdns_writel(cdns, CDNS_MCP_CONFIG, val);
+
+	return 0;
+}
+EXPORT_SYMBOL(sdw_cdns_init);
+
+struct sdw_master_ops sdw_cdns_master_ops = {
+	.read_prop = sdw_master_read_prop,
+	.xfer_msg = cdns_xfer_msg,
+	.xfer_msg_defer = cdns_xfer_msg_defer,
+	.reset_page_addr = cdns_reset_page_addr,
+};
+EXPORT_SYMBOL(sdw_cdns_master_ops);
+
+/**
+ * sdw_cdns_probe() - Cadence probe routine
+ * @cdns: Cadence instance
+ */
+int sdw_cdns_probe(struct sdw_cdns *cdns)
+{
+	init_completion(&cdns->tx_complete);
+
+	return 0;
+}
+EXPORT_SYMBOL(sdw_cdns_probe);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Cadence Soundwire Library");
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
new file mode 100644
index 0000000..beaf6c9
--- /dev/null
+++ b/drivers/soundwire/cadence_master.h
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+#ifndef __SDW_CADENCE_H
+#define __SDW_CADENCE_H
+
+/**
+ * struct sdw_cdns - Cadence driver context
+ * @dev: Linux device
+ * @bus: Bus handle
+ * @instance: instance number
+ * @response_buf: SoundWire response buffer
+ * @tx_complete: Tx completion
+ * @defer: Defer pointer
+ * @registers: Cadence registers
+ * @link_up: Link status
+ * @msg_count: Messages sent on bus
+ */
+struct sdw_cdns {
+	struct device *dev;
+	struct sdw_bus bus;
+	unsigned int instance;
+
+	u32 response_buf[0x80];
+	struct completion tx_complete;
+	struct sdw_defer *defer;
+
+	void __iomem *registers;
+
+	bool link_up;
+	unsigned int msg_count;
+};
+
+#define bus_to_cdns(_bus) container_of(_bus, struct sdw_cdns, bus)
+
+/* Exported symbols */
+
+int sdw_cdns_probe(struct sdw_cdns *cdns);
+extern struct sdw_master_ops sdw_cdns_master_ops;
+
+irqreturn_t sdw_cdns_irq(int irq, void *dev_id);
+irqreturn_t sdw_cdns_thread(int irq, void *dev_id);
+
+int sdw_cdns_init(struct sdw_cdns *cdns);
+int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns);
+
+
+#endif /* __SDW_CADENCE_H */
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
new file mode 100644
index 0000000..6a9177e
--- /dev/null
+++ b/drivers/soundwire/intel.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+/*
+ * Soundwire Intel Master Driver
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/soundwire/sdw_registers.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_intel.h>
+#include "cadence_master.h"
+#include "intel.h"
+
+/* Intel SHIM Registers Definition */
+#define SDW_SHIM_LCAP			0x0
+#define SDW_SHIM_LCTL			0x4
+#define SDW_SHIM_IPPTR			0x8
+#define SDW_SHIM_SYNC			0xC
+
+#define SDW_SHIM_CTLSCAP(x)		(0x010 + 0x60 * x)
+#define SDW_SHIM_CTLS0CM(x)		(0x012 + 0x60 * x)
+#define SDW_SHIM_CTLS1CM(x)		(0x014 + 0x60 * x)
+#define SDW_SHIM_CTLS2CM(x)		(0x016 + 0x60 * x)
+#define SDW_SHIM_CTLS3CM(x)		(0x018 + 0x60 * x)
+#define SDW_SHIM_PCMSCAP(x)		(0x020 + 0x60 * x)
+
+#define SDW_SHIM_PCMSYCHM(x, y)		(0x022 + (0x60 * x) + (0x2 * y))
+#define SDW_SHIM_PCMSYCHC(x, y)		(0x042 + (0x60 * x) + (0x2 * y))
+#define SDW_SHIM_PDMSCAP(x)		(0x062 + 0x60 * x)
+#define SDW_SHIM_IOCTL(x)		(0x06C + 0x60 * x)
+#define SDW_SHIM_CTMCTL(x)		(0x06E + 0x60 * x)
+
+#define SDW_SHIM_WAKEEN			0x190
+#define SDW_SHIM_WAKESTS		0x192
+
+#define SDW_SHIM_LCTL_SPA		BIT(0)
+#define SDW_SHIM_LCTL_CPA		BIT(8)
+
+#define SDW_SHIM_SYNC_SYNCPRD_VAL	0x176F
+#define SDW_SHIM_SYNC_SYNCPRD		GENMASK(14, 0)
+#define SDW_SHIM_SYNC_SYNCCPU		BIT(15)
+#define SDW_SHIM_SYNC_CMDSYNC_MASK	GENMASK(19, 16)
+#define SDW_SHIM_SYNC_CMDSYNC		BIT(16)
+#define SDW_SHIM_SYNC_SYNCGO		BIT(24)
+
+#define SDW_SHIM_PCMSCAP_ISS		GENMASK(3, 0)
+#define SDW_SHIM_PCMSCAP_OSS		GENMASK(7, 4)
+#define SDW_SHIM_PCMSCAP_BSS		GENMASK(12, 8)
+
+#define SDW_SHIM_PCMSYCM_LCHN		GENMASK(3, 0)
+#define SDW_SHIM_PCMSYCM_HCHN		GENMASK(7, 4)
+#define SDW_SHIM_PCMSYCM_STREAM		GENMASK(13, 8)
+#define SDW_SHIM_PCMSYCM_DIR		BIT(15)
+
+#define SDW_SHIM_PDMSCAP_ISS		GENMASK(3, 0)
+#define SDW_SHIM_PDMSCAP_OSS		GENMASK(7, 4)
+#define SDW_SHIM_PDMSCAP_BSS		GENMASK(12, 8)
+#define SDW_SHIM_PDMSCAP_CPSS		GENMASK(15, 13)
+
+#define SDW_SHIM_IOCTL_MIF		BIT(0)
+#define SDW_SHIM_IOCTL_CO		BIT(1)
+#define SDW_SHIM_IOCTL_COE		BIT(2)
+#define SDW_SHIM_IOCTL_DO		BIT(3)
+#define SDW_SHIM_IOCTL_DOE		BIT(4)
+#define SDW_SHIM_IOCTL_BKE		BIT(5)
+#define SDW_SHIM_IOCTL_WPDD		BIT(6)
+#define SDW_SHIM_IOCTL_CIBD		BIT(8)
+#define SDW_SHIM_IOCTL_DIBD		BIT(9)
+
+#define SDW_SHIM_CTMCTL_DACTQE		BIT(0)
+#define SDW_SHIM_CTMCTL_DODS		BIT(1)
+#define SDW_SHIM_CTMCTL_DOAIS		GENMASK(4, 3)
+
+#define SDW_SHIM_WAKEEN_ENABLE		BIT(0)
+#define SDW_SHIM_WAKESTS_STATUS		BIT(0)
+
+/* Intel ALH Register definitions */
+#define SDW_ALH_STRMZCFG(x)		(0x000 + (0x4 * x))
+
+#define SDW_ALH_STRMZCFG_DMAT_VAL	0x3
+#define SDW_ALH_STRMZCFG_DMAT		GENMASK(7, 0)
+#define SDW_ALH_STRMZCFG_CHN		GENMASK(19, 16)
+
+struct sdw_intel {
+	struct sdw_cdns cdns;
+	int instance;
+	struct sdw_intel_link_res *res;
+};
+
+#define cdns_to_intel(_cdns) container_of(_cdns, struct sdw_intel, cdns)
+
+/*
+ * Read, write helpers for HW registers
+ */
+static inline int intel_readl(void __iomem *base, int offset)
+{
+	return readl(base + offset);
+}
+
+static inline void intel_writel(void __iomem *base, int offset, int value)
+{
+	writel(value, base + offset);
+}
+
+static inline u16 intel_readw(void __iomem *base, int offset)
+{
+	return readw(base + offset);
+}
+
+static inline void intel_writew(void __iomem *base, int offset, u16 value)
+{
+	writew(value, base + offset);
+}
+
+static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask)
+{
+	int timeout = 10;
+	u32 reg_read;
+
+	writel(value, base + offset);
+	do {
+		reg_read = readl(base + offset);
+		if (!(reg_read & mask))
+			return 0;
+
+		timeout--;
+		udelay(50);
+	} while (timeout != 0);
+
+	return -EAGAIN;
+}
+
+static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask)
+{
+	int timeout = 10;
+	u32 reg_read;
+
+	writel(value, base + offset);
+	do {
+		reg_read = readl(base + offset);
+		if (reg_read & mask)
+			return 0;
+
+		timeout--;
+		udelay(50);
+	} while (timeout != 0);
+
+	return -EAGAIN;
+}
+
+/*
+ * shim ops
+ */
+
+static int intel_link_power_up(struct sdw_intel *sdw)
+{
+	unsigned int link_id = sdw->instance;
+	void __iomem *shim = sdw->res->shim;
+	int spa_mask, cpa_mask;
+	int link_control, ret;
+
+	/* Link power up sequence */
+	link_control = intel_readl(shim, SDW_SHIM_LCTL);
+	spa_mask = (SDW_SHIM_LCTL_SPA << link_id);
+	cpa_mask = (SDW_SHIM_LCTL_CPA << link_id);
+	link_control |=  spa_mask;
+
+	ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
+	if (ret < 0)
+		return ret;
+
+	sdw->cdns.link_up = true;
+	return 0;
+}
+
+static int intel_shim_init(struct sdw_intel *sdw)
+{
+	void __iomem *shim = sdw->res->shim;
+	unsigned int link_id = sdw->instance;
+	int sync_reg, ret;
+	u16 ioctl = 0, act = 0;
+
+	/* Initialize Shim */
+	ioctl |= SDW_SHIM_IOCTL_BKE;
+	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+
+	ioctl |= SDW_SHIM_IOCTL_WPDD;
+	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+
+	ioctl |= SDW_SHIM_IOCTL_DO;
+	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+
+	ioctl |= SDW_SHIM_IOCTL_DOE;
+	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+
+	/* Switch to MIP from Glue logic */
+	ioctl = intel_readw(shim,  SDW_SHIM_IOCTL(link_id));
+
+	ioctl &= ~(SDW_SHIM_IOCTL_DOE);
+	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+
+	ioctl &= ~(SDW_SHIM_IOCTL_DO);
+	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+
+	ioctl |= (SDW_SHIM_IOCTL_MIF);
+	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+
+	ioctl &= ~(SDW_SHIM_IOCTL_BKE);
+	ioctl &= ~(SDW_SHIM_IOCTL_COE);
+
+	intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+
+	act |= 0x1 << SDW_REG_SHIFT(SDW_SHIM_CTMCTL_DOAIS);
+	act |= SDW_SHIM_CTMCTL_DACTQE;
+	act |= SDW_SHIM_CTMCTL_DODS;
+	intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act);
+
+	/* Now set SyncPRD period */
+	sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
+	sync_reg |= (SDW_SHIM_SYNC_SYNCPRD_VAL <<
+			SDW_REG_SHIFT(SDW_SHIM_SYNC_SYNCPRD));
+
+	/* Set SyncCPU bit */
+	sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
+	ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
+				SDW_SHIM_SYNC_SYNCCPU);
+	if (ret < 0)
+		dev_err(sdw->cdns.dev, "Failed to set sync period: %d", ret);
+
+	return ret;
+}
+
+static int intel_prop_read(struct sdw_bus *bus)
+{
+	/* Initialize with default handler to read all DisCo properties */
+	sdw_master_read_prop(bus);
+
+	/* BIOS is not giving some values correctly. So, lets override them */
+	bus->prop.num_freq = 1;
+	bus->prop.freq = devm_kcalloc(bus->dev, sizeof(*bus->prop.freq),
+					bus->prop.num_freq, GFP_KERNEL);
+	if (!bus->prop.freq)
+		return -ENOMEM;
+
+	bus->prop.freq[0] = bus->prop.max_freq;
+	bus->prop.err_threshold = 5;
+
+	return 0;
+}
+
+/*
+ * probe and init
+ */
+static int intel_probe(struct platform_device *pdev)
+{
+	struct sdw_intel *sdw;
+	int ret;
+
+	sdw = devm_kzalloc(&pdev->dev, sizeof(*sdw), GFP_KERNEL);
+	if (!sdw)
+		return -ENOMEM;
+
+	sdw->instance = pdev->id;
+	sdw->res = dev_get_platdata(&pdev->dev);
+	sdw->cdns.dev = &pdev->dev;
+	sdw->cdns.registers = sdw->res->registers;
+	sdw->cdns.instance = sdw->instance;
+	sdw->cdns.msg_count = 0;
+	sdw->cdns.bus.dev = &pdev->dev;
+	sdw->cdns.bus.link_id = pdev->id;
+
+	sdw_cdns_probe(&sdw->cdns);
+
+	/* Set property read ops */
+	sdw_cdns_master_ops.read_prop = intel_prop_read;
+	sdw->cdns.bus.ops = &sdw_cdns_master_ops;
+
+	platform_set_drvdata(pdev, sdw);
+
+	ret = sdw_add_bus_master(&sdw->cdns.bus);
+	if (ret) {
+		dev_err(&pdev->dev, "sdw_add_bus_master fail: %d\n", ret);
+		goto err_master_reg;
+	}
+
+	/* Initialize shim and controller */
+	intel_link_power_up(sdw);
+	intel_shim_init(sdw);
+
+	ret = sdw_cdns_init(&sdw->cdns);
+	if (ret)
+		goto err_init;
+
+	sdw_cdns_enable_interrupt(&sdw->cdns);
+	if (ret)
+		goto err_init;
+
+	/* Acquire IRQ */
+	ret = request_threaded_irq(sdw->res->irq, sdw_cdns_irq,
+			sdw_cdns_thread, IRQF_SHARED, KBUILD_MODNAME,
+			&sdw->cdns);
+	if (ret < 0) {
+		dev_err(sdw->cdns.dev, "unable to grab IRQ %d, disabling device\n",
+				sdw->res->irq);
+		goto err_init;
+	}
+
+	return 0;
+
+err_init:
+	sdw_delete_bus_master(&sdw->cdns.bus);
+err_master_reg:
+	return ret;
+}
+
+static int intel_remove(struct platform_device *pdev)
+{
+	struct sdw_intel *sdw;
+
+	sdw = platform_get_drvdata(pdev);
+
+	free_irq(sdw->res->irq, sdw);
+	sdw_delete_bus_master(&sdw->cdns.bus);
+
+	return 0;
+}
+
+static struct platform_driver sdw_intel_drv = {
+	.probe = intel_probe,
+	.remove = intel_remove,
+	.driver = {
+		.name = "int-sdw",
+
+	},
+};
+
+module_platform_driver(sdw_intel_drv);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:int-sdw");
+MODULE_DESCRIPTION("Intel Soundwire Master Driver");
diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h
new file mode 100644
index 0000000..ffa30d9
--- /dev/null
+++ b/drivers/soundwire/intel.h
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+#ifndef __SDW_INTEL_LOCAL_H
+#define __SDW_INTEL_LOCAL_H
+
+/**
+ * struct sdw_intel_res - Soundwire link resources
+ * @registers: Link IO registers base
+ * @shim: Audio shim pointer
+ * @alh: ALH (Audio Link Hub) pointer
+ * @irq: Interrupt line
+ *
+ * This is set as pdata for each link instance.
+ */
+struct sdw_intel_link_res {
+	void __iomem *registers;
+	void __iomem *shim;
+	void __iomem *alh;
+	int irq;
+};
+
+#endif /* __SDW_INTEL_LOCAL_H */
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
new file mode 100644
index 0000000..6f2bb99
--- /dev/null
+++ b/drivers/soundwire/intel_init.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+/*
+ * SDW Intel Init Routines
+ *
+ * Initializes and creates SDW devices based on ACPI and Hardware values
+ */
+
+#include <linux/acpi.h>
+#include <linux/platform_device.h>
+#include <linux/soundwire/sdw_intel.h>
+#include "intel.h"
+
+#define SDW_MAX_LINKS		4
+#define SDW_SHIM_LCAP		0x0
+#define SDW_SHIM_BASE		0x2C000
+#define SDW_ALH_BASE		0x2C800
+#define SDW_LINK_BASE		0x30000
+#define SDW_LINK_SIZE		0x10000
+
+struct sdw_link_data {
+	struct sdw_intel_link_res res;
+	struct platform_device *pdev;
+};
+
+struct sdw_intel_ctx {
+	int count;
+	struct sdw_link_data *links;
+};
+
+static int sdw_intel_cleanup_pdev(struct sdw_intel_ctx *ctx)
+{
+	struct sdw_link_data *link = ctx->links;
+	int i;
+
+	if (!link)
+		return 0;
+
+	for (i = 0; i < ctx->count; i++) {
+		if (link->pdev)
+			platform_device_unregister(link->pdev);
+		link++;
+	}
+
+	kfree(ctx->links);
+	ctx->links = NULL;
+
+	return 0;
+}
+
+static struct sdw_intel_ctx
+*sdw_intel_add_controller(struct sdw_intel_res *res)
+{
+	struct platform_device_info pdevinfo;
+	struct platform_device *pdev;
+	struct sdw_link_data *link;
+	struct sdw_intel_ctx *ctx;
+	struct acpi_device *adev;
+	int ret, i;
+	u8 count;
+	u32 caps;
+
+	if (acpi_bus_get_device(res->handle, &adev))
+		return NULL;
+
+	/* Found controller, find links supported */
+	count = 0;
+	ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
+				  "mipi-sdw-master-count", &count, 1);
+
+	/* Don't fail on error, continue and use hw value */
+	if (ret) {
+		dev_err(&adev->dev,
+			"Failed to read mipi-sdw-master-count: %d\n", ret);
+		count = SDW_MAX_LINKS;
+	}
+
+	/* Check SNDWLCAP.LCOUNT */
+	caps = ioread32(res->mmio_base + SDW_SHIM_BASE + SDW_SHIM_LCAP);
+
+	/* Check HW supported vs property value and use min of two */
+	count = min_t(u8, caps, count);
+
+	/* Check count is within bounds */
+	if (count > SDW_MAX_LINKS) {
+		dev_err(&adev->dev, "Link count %d exceeds max %d\n",
+						count, SDW_MAX_LINKS);
+		return NULL;
+	}
+
+	dev_dbg(&adev->dev, "Creating %d SDW Link devices\n", count);
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return NULL;
+
+	ctx->count = count;
+	ctx->links = kcalloc(ctx->count, sizeof(*ctx->links), GFP_KERNEL);
+	if (!ctx->links)
+		goto link_err;
+
+	link = ctx->links;
+
+	/* Create SDW Master devices */
+	for (i = 0; i < count; i++) {
+
+		link->res.irq = res->irq;
+		link->res.registers = res->mmio_base + SDW_LINK_BASE
+					+ (SDW_LINK_SIZE * i);
+		link->res.shim = res->mmio_base + SDW_SHIM_BASE;
+		link->res.alh = res->mmio_base + SDW_ALH_BASE;
+
+		memset(&pdevinfo, 0, sizeof(pdevinfo));
+
+		pdevinfo.parent = res->parent;
+		pdevinfo.name = "int-sdw";
+		pdevinfo.id = i;
+		pdevinfo.fwnode = acpi_fwnode_handle(adev);
+		pdevinfo.data = &link->res;
+		pdevinfo.size_data = sizeof(link->res);
+
+		pdev = platform_device_register_full(&pdevinfo);
+		if (IS_ERR(pdev)) {
+			dev_err(&adev->dev,
+				"platform device creation failed: %ld\n",
+				PTR_ERR(pdev));
+			goto pdev_err;
+		}
+
+		link->pdev = pdev;
+		link++;
+	}
+
+	return ctx;
+
+pdev_err:
+	sdw_intel_cleanup_pdev(ctx);
+link_err:
+	kfree(ctx);
+	return NULL;
+}
+
+static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
+					void *cdata, void **return_value)
+{
+	struct sdw_intel_res *res = cdata;
+	struct acpi_device *adev;
+
+	if (acpi_bus_get_device(handle, &adev)) {
+		dev_err(&adev->dev, "Couldn't find ACPI handle\n");
+		return AE_NOT_FOUND;
+	}
+
+	res->handle = handle;
+	return AE_OK;
+}
+
+/**
+ * sdw_intel_init() - SoundWire Intel init routine
+ * @parent_handle: ACPI parent handle
+ * @res: resource data
+ *
+ * This scans the namespace and creates SoundWire link controller devices
+ * based on the info queried.
+ */
+void *sdw_intel_init(acpi_handle *parent_handle, struct sdw_intel_res *res)
+{
+	acpi_status status;
+
+	status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
+					parent_handle, 1,
+					sdw_intel_acpi_cb,
+					NULL, res, NULL);
+	if (ACPI_FAILURE(status))
+		return NULL;
+
+	return sdw_intel_add_controller(res);
+}
+EXPORT_SYMBOL(sdw_intel_init);
+
+/**
+ * sdw_intel_exit() - SoundWire Intel exit
+ * @arg: callback context
+ *
+ * Delete the controller instances created and cleanup
+ */
+void sdw_intel_exit(void *arg)
+{
+	struct sdw_intel_ctx *ctx = arg;
+
+	sdw_intel_cleanup_pdev(ctx);
+	kfree(ctx);
+}
+EXPORT_SYMBOL(sdw_intel_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Intel Soundwire Init Library");
diff --git a/drivers/soundwire/mipi_disco.c b/drivers/soundwire/mipi_disco.c
new file mode 100644
index 0000000..fdeba0c
--- /dev/null
+++ b/drivers/soundwire/mipi_disco.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+/*
+ * MIPI Discovery And Configuration (DisCo) Specification for SoundWire
+ * specifies properties to be implemented for SoundWire Masters and Slaves.
+ * The DisCo spec doesn't mandate these properties. However, SDW bus cannot
+ * work without knowing these values.
+ *
+ * The helper functions read the Master and Slave properties. Implementers
+ * of Master or Slave drivers can use any of the below three mechanisms:
+ *    a) Use these APIs here as .read_prop() callback for Master and Slave
+ *    b) Implement own methods and set those as .read_prop(), but invoke
+ *    APIs in this file for generic read and override the values with
+ *    platform specific data
+ *    c) Implement ones own methods which do not use anything provided
+ *    here
+ */
+
+#include <linux/device.h>
+#include <linux/property.h>
+#include <linux/mod_devicetable.h>
+#include <linux/soundwire/sdw.h>
+#include "bus.h"
+
+/**
+ * sdw_master_read_prop() - Read Master properties
+ * @bus: SDW bus instance
+ */
+int sdw_master_read_prop(struct sdw_bus *bus)
+{
+	struct sdw_master_prop *prop = &bus->prop;
+	struct fwnode_handle *link;
+	char name[32];
+	int nval, i;
+
+	device_property_read_u32(bus->dev,
+			"mipi-sdw-sw-interface-revision", &prop->revision);
+
+	/* Find master handle */
+	snprintf(name, sizeof(name),
+			"mipi-sdw-master-%d-subproperties", bus->link_id);
+
+	link = device_get_named_child_node(bus->dev, name);
+	if (!link) {
+		dev_err(bus->dev, "Master node %s not found\n", name);
+		return -EIO;
+	}
+
+	if (fwnode_property_read_bool(link,
+			"mipi-sdw-clock-stop-mode0-supported") == true)
+		prop->clk_stop_mode = SDW_CLK_STOP_MODE0;
+
+	if (fwnode_property_read_bool(link,
+			"mipi-sdw-clock-stop-mode1-supported") == true)
+		prop->clk_stop_mode |= SDW_CLK_STOP_MODE1;
+
+	fwnode_property_read_u32(link,
+			"mipi-sdw-max-clock-frequency", &prop->max_freq);
+
+	nval = fwnode_property_read_u32_array(link,
+			"mipi-sdw-clock-frequencies-supported", NULL, 0);
+	if (nval > 0) {
+
+		prop->num_freq = nval;
+		prop->freq = devm_kcalloc(bus->dev, prop->num_freq,
+				sizeof(*prop->freq), GFP_KERNEL);
+		if (!prop->freq)
+			return -ENOMEM;
+
+		fwnode_property_read_u32_array(link,
+				"mipi-sdw-clock-frequencies-supported",
+				prop->freq, prop->num_freq);
+	}
+
+	/*
+	 * Check the frequencies supported. If FW doesn't provide max
+	 * freq, then populate here by checking values.
+	 */
+	if (!prop->max_freq && prop->freq) {
+		prop->max_freq = prop->freq[0];
+		for (i = 1; i < prop->num_freq; i++) {
+			if (prop->freq[i] > prop->max_freq)
+				prop->max_freq = prop->freq[i];
+		}
+	}
+
+	nval = fwnode_property_read_u32_array(link,
+			"mipi-sdw-supported-clock-gears", NULL, 0);
+	if (nval > 0) {
+
+		prop->num_clk_gears = nval;
+		prop->clk_gears = devm_kcalloc(bus->dev, prop->num_clk_gears,
+				sizeof(*prop->clk_gears), GFP_KERNEL);
+		if (!prop->clk_gears)
+			return -ENOMEM;
+
+		fwnode_property_read_u32_array(link,
+				"mipi-sdw-supported-clock-gears",
+				prop->clk_gears, prop->num_clk_gears);
+	}
+
+	fwnode_property_read_u32(link, "mipi-sdw-default-frame-rate",
+			&prop->default_frame_rate);
+
+	fwnode_property_read_u32(link, "mipi-sdw-default-frame-row-size",
+			&prop->default_row);
+
+	fwnode_property_read_u32(link, "mipi-sdw-default-frame-col-size",
+			&prop->default_col);
+
+	prop->dynamic_frame =  fwnode_property_read_bool(link,
+			"mipi-sdw-dynamic-frame-shape");
+
+	fwnode_property_read_u32(link, "mipi-sdw-command-error-threshold",
+			&prop->err_threshold);
+
+	return 0;
+}
+EXPORT_SYMBOL(sdw_master_read_prop);
+
+static int sdw_slave_read_dp0(struct sdw_slave *slave,
+		struct fwnode_handle *port, struct sdw_dp0_prop *dp0)
+{
+	int nval;
+
+	fwnode_property_read_u32(port, "mipi-sdw-port-max-wordlength",
+			&dp0->max_word);
+
+	fwnode_property_read_u32(port, "mipi-sdw-port-min-wordlength",
+			&dp0->min_word);
+
+	nval = fwnode_property_read_u32_array(port,
+			"mipi-sdw-port-wordlength-configs", NULL, 0);
+	if (nval > 0) {
+
+		dp0->num_words = nval;
+		dp0->words = devm_kcalloc(&slave->dev,
+				dp0->num_words, sizeof(*dp0->words),
+				GFP_KERNEL);
+		if (!dp0->words)
+			return -ENOMEM;
+
+		fwnode_property_read_u32_array(port,
+				"mipi-sdw-port-wordlength-configs",
+				dp0->words, dp0->num_words);
+	}
+
+	dp0->flow_controlled = fwnode_property_read_bool(
+			port, "mipi-sdw-bra-flow-controlled");
+
+	dp0->simple_ch_prep_sm = fwnode_property_read_bool(
+			port, "mipi-sdw-simplified-channel-prepare-sm");
+
+	dp0->device_interrupts = fwnode_property_read_bool(
+			port, "mipi-sdw-imp-def-dp0-interrupts-supported");
+
+	return 0;
+}
+
+static int sdw_slave_read_dpn(struct sdw_slave *slave,
+		struct sdw_dpn_prop *dpn, int count, int ports, char *type)
+{
+	struct fwnode_handle *node;
+	u32 bit, i = 0;
+	int nval;
+	unsigned long addr;
+	char name[40];
+
+	addr = ports;
+	/* valid ports are 1 to 14 so apply mask */
+	addr &= GENMASK(14, 1);
+
+	for_each_set_bit(bit, &addr, 32) {
+		snprintf(name, sizeof(name),
+			"mipi-sdw-dp-%d-%s-subproperties", bit, type);
+
+		dpn[i].num = bit;
+
+		node = device_get_named_child_node(&slave->dev, name);
+		if (!node) {
+			dev_err(&slave->dev, "%s dpN not found\n", name);
+			return -EIO;
+		}
+
+		fwnode_property_read_u32(node, "mipi-sdw-port-max-wordlength",
+					&dpn[i].max_word);
+		fwnode_property_read_u32(node, "mipi-sdw-port-min-wordlength",
+					&dpn[i].min_word);
+
+		nval = fwnode_property_read_u32_array(node,
+				"mipi-sdw-port-wordlength-configs", NULL, 0);
+		if (nval > 0) {
+
+			dpn[i].num_words = nval;
+			dpn[i].words = devm_kcalloc(&slave->dev,
+					dpn[i].num_words,
+					sizeof(*dpn[i].words), GFP_KERNEL);
+			if (!dpn[i].words)
+				return -ENOMEM;
+
+			fwnode_property_read_u32_array(node,
+					"mipi-sdw-port-wordlength-configs",
+					dpn[i].words, dpn[i].num_words);
+		}
+
+		fwnode_property_read_u32(node, "mipi-sdw-data-port-type",
+				&dpn[i].type);
+
+		fwnode_property_read_u32(node,
+				"mipi-sdw-max-grouping-supported",
+				&dpn[i].max_grouping);
+
+		dpn[i].simple_ch_prep_sm = fwnode_property_read_bool(node,
+				"mipi-sdw-simplified-channelprepare-sm");
+
+		fwnode_property_read_u32(node,
+				"mipi-sdw-port-channelprepare-timeout",
+				&dpn[i].ch_prep_timeout);
+
+		fwnode_property_read_u32(node,
+				"mipi-sdw-imp-def-dpn-interrupts-supported",
+				&dpn[i].device_interrupts);
+
+		fwnode_property_read_u32(node, "mipi-sdw-min-channel-number",
+				&dpn[i].min_ch);
+
+		fwnode_property_read_u32(node, "mipi-sdw-max-channel-number",
+				&dpn[i].max_ch);
+
+		nval = fwnode_property_read_u32_array(node,
+				"mipi-sdw-channel-number-list", NULL, 0);
+		if (nval > 0) {
+
+			dpn[i].num_ch = nval;
+			dpn[i].ch = devm_kcalloc(&slave->dev, dpn[i].num_ch,
+					sizeof(*dpn[i].ch), GFP_KERNEL);
+			if (!dpn[i].ch)
+				return -ENOMEM;
+
+			fwnode_property_read_u32_array(node,
+					"mipi-sdw-channel-number-list",
+					dpn[i].ch, dpn[i].num_ch);
+		}
+
+		nval = fwnode_property_read_u32_array(node,
+				"mipi-sdw-channel-combination-list", NULL, 0);
+		if (nval > 0) {
+
+			dpn[i].num_ch_combinations = nval;
+			dpn[i].ch_combinations = devm_kcalloc(&slave->dev,
+					dpn[i].num_ch_combinations,
+					sizeof(*dpn[i].ch_combinations),
+					GFP_KERNEL);
+			if (!dpn[i].ch_combinations)
+				return -ENOMEM;
+
+			fwnode_property_read_u32_array(node,
+					"mipi-sdw-channel-combination-list",
+					dpn[i].ch_combinations,
+					dpn[i].num_ch_combinations);
+		}
+
+		fwnode_property_read_u32(node,
+				"mipi-sdw-modes-supported", &dpn[i].modes);
+
+		fwnode_property_read_u32(node, "mipi-sdw-max-async-buffer",
+				&dpn[i].max_async_buffer);
+
+		dpn[i].block_pack_mode = fwnode_property_read_bool(node,
+				"mipi-sdw-block-packing-mode");
+
+		fwnode_property_read_u32(node, "mipi-sdw-port-encoding-type",
+				&dpn[i].port_encoding);
+
+		/* TODO: Read audio mode */
+
+		i++;
+	}
+
+	return 0;
+}
+
+/**
+ * sdw_slave_read_prop() - Read Slave properties
+ * @slave: SDW Slave
+ */
+int sdw_slave_read_prop(struct sdw_slave *slave)
+{
+	struct sdw_slave_prop *prop = &slave->prop;
+	struct device *dev = &slave->dev;
+	struct fwnode_handle *port;
+	int num_of_ports, nval, i, dp0 = 0;
+
+	device_property_read_u32(dev, "mipi-sdw-sw-interface-revision",
+				&prop->mipi_revision);
+
+	prop->wake_capable = device_property_read_bool(dev,
+				"mipi-sdw-wake-up-unavailable");
+	prop->wake_capable = !prop->wake_capable;
+
+	prop->test_mode_capable = device_property_read_bool(dev,
+				"mipi-sdw-test-mode-supported");
+
+	prop->clk_stop_mode1 = false;
+	if (device_property_read_bool(dev,
+				"mipi-sdw-clock-stop-mode1-supported"))
+		prop->clk_stop_mode1 = true;
+
+	prop->simple_clk_stop_capable = device_property_read_bool(dev,
+			"mipi-sdw-simplified-clockstopprepare-sm-supported");
+
+	device_property_read_u32(dev, "mipi-sdw-clockstopprepare-timeout",
+			&prop->clk_stop_timeout);
+
+	device_property_read_u32(dev, "mipi-sdw-slave-channelprepare-timeout",
+			&prop->ch_prep_timeout);
+
+	device_property_read_u32(dev,
+			"mipi-sdw-clockstopprepare-hard-reset-behavior",
+			&prop->reset_behave);
+
+	prop->high_PHY_capable = device_property_read_bool(dev,
+			"mipi-sdw-highPHY-capable");
+
+	prop->paging_support = device_property_read_bool(dev,
+			"mipi-sdw-paging-support");
+
+	prop->bank_delay_support = device_property_read_bool(dev,
+			"mipi-sdw-bank-delay-support");
+
+	device_property_read_u32(dev,
+			"mipi-sdw-port15-read-behavior", &prop->p15_behave);
+
+	device_property_read_u32(dev, "mipi-sdw-master-count",
+				&prop->master_count);
+
+	device_property_read_u32(dev, "mipi-sdw-source-port-list",
+				&prop->source_ports);
+
+	device_property_read_u32(dev, "mipi-sdw-sink-port-list",
+				&prop->sink_ports);
+
+	/* Read dp0 properties */
+	port = device_get_named_child_node(dev, "mipi-sdw-dp-0-subproperties");
+	if (!port) {
+		dev_dbg(dev, "DP0 node not found!!\n");
+	} else {
+
+		prop->dp0_prop = devm_kzalloc(&slave->dev,
+				sizeof(*prop->dp0_prop), GFP_KERNEL);
+		if (!prop->dp0_prop)
+			return -ENOMEM;
+
+		sdw_slave_read_dp0(slave, port, prop->dp0_prop);
+		dp0 = 1;
+	}
+
+	/*
+	 * Based on each DPn port, get source and sink dpn properties.
+	 * Also, some ports can operate as both source or sink.
+	 */
+
+	/* Allocate memory for set bits in port lists */
+	nval = hweight32(prop->source_ports);
+	prop->src_dpn_prop = devm_kcalloc(&slave->dev, nval,
+				sizeof(*prop->src_dpn_prop), GFP_KERNEL);
+	if (!prop->src_dpn_prop)
+		return -ENOMEM;
+
+	/* Read dpn properties for source port(s) */
+	sdw_slave_read_dpn(slave, prop->src_dpn_prop, nval,
+			prop->source_ports, "source");
+
+	nval = hweight32(prop->sink_ports);
+	prop->sink_dpn_prop = devm_kcalloc(&slave->dev, nval,
+				sizeof(*prop->sink_dpn_prop), GFP_KERNEL);
+	if (!prop->sink_dpn_prop)
+		return -ENOMEM;
+
+	/* Read dpn properties for sink port(s) */
+	sdw_slave_read_dpn(slave, prop->sink_dpn_prop, nval,
+			prop->sink_ports, "sink");
+
+	/* some ports are bidirectional so check total ports by ORing */
+	nval = prop->source_ports | prop->sink_ports;
+	num_of_ports = hweight32(nval) + dp0; /* add DP0 */
+
+	/* Allocate port_ready based on num_of_ports */
+	slave->port_ready = devm_kcalloc(&slave->dev, num_of_ports,
+				sizeof(*slave->port_ready), GFP_KERNEL);
+	if (!slave->port_ready)
+		return -ENOMEM;
+
+	/* Initialize completion */
+	for (i = 0; i < num_of_ports; i++)
+		init_completion(&slave->port_ready[i]);
+
+	return 0;
+}
+EXPORT_SYMBOL(sdw_slave_read_prop);
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
new file mode 100644
index 0000000..ac103bd
--- /dev/null
+++ b/drivers/soundwire/slave.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+#include <linux/acpi.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_type.h>
+#include "bus.h"
+
+static void sdw_slave_release(struct device *dev)
+{
+	struct sdw_slave *slave = dev_to_sdw_dev(dev);
+
+	kfree(slave);
+}
+
+static int sdw_slave_add(struct sdw_bus *bus,
+		struct sdw_slave_id *id, struct fwnode_handle *fwnode)
+{
+	struct sdw_slave *slave;
+	int ret;
+
+	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+	if (!slave)
+		return -ENOMEM;
+
+	/* Initialize data structure */
+	memcpy(&slave->id, id, sizeof(*id));
+	slave->dev.parent = bus->dev;
+	slave->dev.fwnode = fwnode;
+
+	/* name shall be sdw:link:mfg:part:class:unique */
+	dev_set_name(&slave->dev, "sdw:%x:%x:%x:%x:%x",
+			bus->link_id, id->mfg_id, id->part_id,
+			id->class_id, id->unique_id);
+
+	slave->dev.release = sdw_slave_release;
+	slave->dev.bus = &sdw_bus_type;
+	slave->bus = bus;
+	slave->status = SDW_SLAVE_UNATTACHED;
+	slave->dev_num = 0;
+
+	mutex_lock(&bus->bus_lock);
+	list_add_tail(&slave->node, &bus->slaves);
+	mutex_unlock(&bus->bus_lock);
+
+	ret = device_register(&slave->dev);
+	if (ret) {
+		dev_err(bus->dev, "Failed to add slave: ret %d\n", ret);
+
+		/*
+		 * On err, don't free but drop ref as this will be freed
+		 * when release method is invoked.
+		 */
+		mutex_lock(&bus->bus_lock);
+		list_del(&slave->node);
+		mutex_unlock(&bus->bus_lock);
+		put_device(&slave->dev);
+	}
+
+	return ret;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+/*
+ * sdw_acpi_find_slaves() - Find Slave devices in Master ACPI node
+ * @bus: SDW bus instance
+ *
+ * Scans Master ACPI node for SDW child Slave devices and registers it.
+ */
+int sdw_acpi_find_slaves(struct sdw_bus *bus)
+{
+	struct acpi_device *adev, *parent;
+
+	parent = ACPI_COMPANION(bus->dev);
+	if (!parent) {
+		dev_err(bus->dev, "Can't find parent for acpi bind\n");
+		return -ENODEV;
+	}
+
+	list_for_each_entry(adev, &parent->children, node) {
+		unsigned long long addr;
+		struct sdw_slave_id id;
+		unsigned int link_id;
+		acpi_status status;
+
+		status = acpi_evaluate_integer(adev->handle,
+					METHOD_NAME__ADR, NULL, &addr);
+
+		if (ACPI_FAILURE(status)) {
+			dev_err(bus->dev, "_ADR resolution failed: %x\n",
+							status);
+			return status;
+		}
+
+		/* Extract link id from ADR, Bit 51 to 48 (included) */
+		link_id = (addr >> 48) & GENMASK(3, 0);
+
+		/* Check for link_id match */
+		if (link_id != bus->link_id)
+			continue;
+
+		sdw_extract_slave_id(bus, addr, &id);
+
+		/*
+		 * don't error check for sdw_slave_add as we want to continue
+		 * adding Slaves
+		 */
+		sdw_slave_add(bus, &id, acpi_fwnode_handle(adev));
+	}
+
+	return 0;
+}
+
+#endif
diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig
index 99ebdde..8d9cdfb 100644
--- a/drivers/virt/Kconfig
+++ b/drivers/virt/Kconfig
@@ -30,4 +30,5 @@
           4) A kernel interface for receiving callbacks when a managed
 	     partition shuts down.
 
+source "drivers/virt/vboxguest/Kconfig"
 endif
diff --git a/drivers/virt/Makefile b/drivers/virt/Makefile
index c47f04d..d3f7b25 100644
--- a/drivers/virt/Makefile
+++ b/drivers/virt/Makefile
@@ -3,3 +3,4 @@
 #
 
 obj-$(CONFIG_FSL_HV_MANAGER)	+= fsl_hypervisor.o
+obj-y				+= vboxguest/
diff --git a/drivers/virt/vboxguest/Kconfig b/drivers/virt/vboxguest/Kconfig
new file mode 100644
index 0000000..fffd318
--- /dev/null
+++ b/drivers/virt/vboxguest/Kconfig
@@ -0,0 +1,18 @@
+config VBOXGUEST
+	tristate "Virtual Box Guest integration support"
+	depends on X86 && PCI && INPUT
+	help
+	  This is a driver for the Virtual Box Guest PCI device used in
+	  Virtual Box virtual machines. Enabling this driver will add
+	  support for Virtual Box Guest integration features such as
+	  copy-and-paste, seamless mode and OpenGL pass-through.
+
+	  This driver also offers vboxguest IPC functionality which is needed
+	  for the vboxfs driver which offers folder sharing support.
+
+	  If you enable this driver you should also enable the VBOXVIDEO option.
+
+	  Although it is possible to build this module in, it is advised
+	  to build this driver as a module, so that it can be updated
+	  independently of the kernel. Select M to build this driver as a
+	  module.
diff --git a/drivers/virt/vboxguest/Makefile b/drivers/virt/vboxguest/Makefile
new file mode 100644
index 0000000..203b8f4
--- /dev/null
+++ b/drivers/virt/vboxguest/Makefile
@@ -0,0 +1,3 @@
+vboxguest-y := vboxguest_linux.o vboxguest_core.o vboxguest_utils.o
+
+obj-$(CONFIG_VBOXGUEST) += vboxguest.o
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
new file mode 100644
index 0000000..190dbf8
--- /dev/null
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -0,0 +1,1571 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
+/*
+ * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn.
+ *
+ * Copyright (C) 2007-2016 Oracle Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/vbox_err.h>
+#include <linux/vbox_utils.h>
+#include <linux/vmalloc.h>
+#include "vboxguest_core.h"
+#include "vboxguest_version.h"
+
+/* Get the pointer to the first HGCM parameter. */
+#define VBG_IOCTL_HGCM_CALL_PARMS(a) \
+	((struct vmmdev_hgcm_function_parameter *)( \
+		(u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
+/* Get the pointer to the first HGCM parameter in a 32-bit request. */
+#define VBG_IOCTL_HGCM_CALL_PARMS32(a) \
+	((struct vmmdev_hgcm_function_parameter32 *)( \
+		(u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
+
+#define GUEST_MAPPINGS_TRIES	5
+
+/**
+ * Reserves memory in which the VMM can relocate any guest mappings
+ * that are floating around.
+ *
+ * This operation is a little bit tricky since the VMM might not accept
+ * just any address because of address clashes between the three contexts
+ * it operates in, so we try several times.
+ *
+ * Failure to reserve the guest mappings is ignored.
+ *
+ * @gdev:		The Guest extension device.
+ */
+static void vbg_guest_mappings_init(struct vbg_dev *gdev)
+{
+	struct vmmdev_hypervisorinfo *req;
+	void *guest_mappings[GUEST_MAPPINGS_TRIES];
+	struct page **pages = NULL;
+	u32 size, hypervisor_size;
+	int i, rc;
+
+	/* Query the required space. */
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO);
+	if (!req)
+		return;
+
+	req->hypervisor_start = 0;
+	req->hypervisor_size = 0;
+	rc = vbg_req_perform(gdev, req);
+	if (rc < 0)
+		goto out;
+
+	/*
+	 * The VMM will report back if there is nothing it wants to map, like
+	 * for instance in VT-x and AMD-V mode.
+	 */
+	if (req->hypervisor_size == 0)
+		goto out;
+
+	hypervisor_size = req->hypervisor_size;
+	/* Add 4M so that we can align the vmap to 4MiB as the host requires. */
+	size = PAGE_ALIGN(req->hypervisor_size) + SZ_4M;
+
+	pages = kmalloc(sizeof(*pages) * (size >> PAGE_SHIFT), GFP_KERNEL);
+	if (!pages)
+		goto out;
+
+	gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER);
+	if (!gdev->guest_mappings_dummy_page)
+		goto out;
+
+	for (i = 0; i < (size >> PAGE_SHIFT); i++)
+		pages[i] = gdev->guest_mappings_dummy_page;
+
+	/*
+	 * Try several times, the VMM might not accept some addresses because
+	 * of address clashes between the three contexts.
+	 */
+	for (i = 0; i < GUEST_MAPPINGS_TRIES; i++) {
+		guest_mappings[i] = vmap(pages, (size >> PAGE_SHIFT),
+					 VM_MAP, PAGE_KERNEL_RO);
+		if (!guest_mappings[i])
+			break;
+
+		req->header.request_type = VMMDEVREQ_SET_HYPERVISOR_INFO;
+		req->header.rc = VERR_INTERNAL_ERROR;
+		req->hypervisor_size = hypervisor_size;
+		req->hypervisor_start =
+			(unsigned long)PTR_ALIGN(guest_mappings[i], SZ_4M);
+
+		rc = vbg_req_perform(gdev, req);
+		if (rc >= 0) {
+			gdev->guest_mappings = guest_mappings[i];
+			break;
+		}
+	}
+
+	/* Free vmap's from failed attempts. */
+	while (--i >= 0)
+		vunmap(guest_mappings[i]);
+
+	/* On failure free the dummy-page backing the vmap */
+	if (!gdev->guest_mappings) {
+		__free_page(gdev->guest_mappings_dummy_page);
+		gdev->guest_mappings_dummy_page = NULL;
+	}
+
+out:
+	kfree(req);
+	kfree(pages);
+}
+
+/**
+ * Undo what vbg_guest_mappings_init did.
+ *
+ * @gdev:		The Guest extension device.
+ */
+static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
+{
+	struct vmmdev_hypervisorinfo *req;
+	int rc;
+
+	if (!gdev->guest_mappings)
+		return;
+
+	/*
+	 * Tell the host that we're going to free the memory we reserved for
+	 * it, the free it up. (Leak the memory if anything goes wrong here.)
+	 */
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO);
+	if (!req)
+		return;
+
+	req->hypervisor_start = 0;
+	req->hypervisor_size = 0;
+
+	rc = vbg_req_perform(gdev, req);
+
+	kfree(req);
+
+	if (rc < 0) {
+		vbg_err("%s error: %d\n", __func__, rc);
+		return;
+	}
+
+	vunmap(gdev->guest_mappings);
+	gdev->guest_mappings = NULL;
+
+	__free_page(gdev->guest_mappings_dummy_page);
+	gdev->guest_mappings_dummy_page = NULL;
+}
+
+/**
+ * Report the guest information to the host.
+ * Return: 0 or negative errno value.
+ * @gdev:		The Guest extension device.
+ */
+static int vbg_report_guest_info(struct vbg_dev *gdev)
+{
+	/*
+	 * Allocate and fill in the two guest info reports.
+	 */
+	struct vmmdev_guest_info *req1 = NULL;
+	struct vmmdev_guest_info2 *req2 = NULL;
+	int rc, ret = -ENOMEM;
+
+	req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO);
+	req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2);
+	if (!req1 || !req2)
+		goto out_free;
+
+	req1->interface_version = VMMDEV_VERSION;
+	req1->os_type = VMMDEV_OSTYPE_LINUX26;
+#if __BITS_PER_LONG == 64
+	req1->os_type |= VMMDEV_OSTYPE_X64;
+#endif
+
+	req2->additions_major = VBG_VERSION_MAJOR;
+	req2->additions_minor = VBG_VERSION_MINOR;
+	req2->additions_build = VBG_VERSION_BUILD;
+	req2->additions_revision = VBG_SVN_REV;
+	/* (no features defined yet) */
+	req2->additions_features = 0;
+	strlcpy(req2->name, VBG_VERSION_STRING,
+		sizeof(req2->name));
+
+	/*
+	 * There are two protocols here:
+	 *      1. INFO2 + INFO1. Supported by >=3.2.51.
+	 *      2. INFO1 and optionally INFO2. The old protocol.
+	 *
+	 * We try protocol 2 first.  It will fail with VERR_NOT_SUPPORTED
+	 * if not supported by the VMMDev (message ordering requirement).
+	 */
+	rc = vbg_req_perform(gdev, req2);
+	if (rc >= 0) {
+		rc = vbg_req_perform(gdev, req1);
+	} else if (rc == VERR_NOT_SUPPORTED || rc == VERR_NOT_IMPLEMENTED) {
+		rc = vbg_req_perform(gdev, req1);
+		if (rc >= 0) {
+			rc = vbg_req_perform(gdev, req2);
+			if (rc == VERR_NOT_IMPLEMENTED)
+				rc = VINF_SUCCESS;
+		}
+	}
+	ret = vbg_status_code_to_errno(rc);
+
+out_free:
+	kfree(req2);
+	kfree(req1);
+	return ret;
+}
+
+/**
+ * Report the guest driver status to the host.
+ * Return: 0 or negative errno value.
+ * @gdev:		The Guest extension device.
+ * @active:		Flag whether the driver is now active or not.
+ */
+static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
+{
+	struct vmmdev_guest_status *req;
+	int rc;
+
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS);
+	if (!req)
+		return -ENOMEM;
+
+	req->facility = VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER;
+	if (active)
+		req->status = VBOXGUEST_FACILITY_STATUS_ACTIVE;
+	else
+		req->status = VBOXGUEST_FACILITY_STATUS_INACTIVE;
+	req->flags = 0;
+
+	rc = vbg_req_perform(gdev, req);
+	if (rc == VERR_NOT_IMPLEMENTED)	/* Compatibility with older hosts. */
+		rc = VINF_SUCCESS;
+
+	kfree(req);
+
+	return vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Inflate the balloon by one chunk. The caller owns the balloon mutex.
+ * Return: 0 or negative errno value.
+ * @gdev:		The Guest extension device.
+ * @chunk_idx:		Index of the chunk.
+ */
+static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx)
+{
+	struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
+	struct page **pages;
+	int i, rc, ret;
+
+	pages = kmalloc(sizeof(*pages) * VMMDEV_MEMORY_BALLOON_CHUNK_PAGES,
+			GFP_KERNEL | __GFP_NOWARN);
+	if (!pages)
+		return -ENOMEM;
+
+	req->header.size = sizeof(*req);
+	req->inflate = true;
+	req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
+
+	for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) {
+		pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN);
+		if (!pages[i]) {
+			ret = -ENOMEM;
+			goto out_error;
+		}
+
+		req->phys_page[i] = page_to_phys(pages[i]);
+	}
+
+	rc = vbg_req_perform(gdev, req);
+	if (rc < 0) {
+		vbg_err("%s error, rc: %d\n", __func__, rc);
+		ret = vbg_status_code_to_errno(rc);
+		goto out_error;
+	}
+
+	gdev->mem_balloon.pages[chunk_idx] = pages;
+
+	return 0;
+
+out_error:
+	while (--i >= 0)
+		__free_page(pages[i]);
+	kfree(pages);
+
+	return ret;
+}
+
+/**
+ * Deflate the balloon by one chunk. The caller owns the balloon mutex.
+ * Return: 0 or negative errno value.
+ * @gdev:		The Guest extension device.
+ * @chunk_idx:		Index of the chunk.
+ */
+static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx)
+{
+	struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
+	struct page **pages = gdev->mem_balloon.pages[chunk_idx];
+	int i, rc;
+
+	req->header.size = sizeof(*req);
+	req->inflate = false;
+	req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
+
+	for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
+		req->phys_page[i] = page_to_phys(pages[i]);
+
+	rc = vbg_req_perform(gdev, req);
+	if (rc < 0) {
+		vbg_err("%s error, rc: %d\n", __func__, rc);
+		return vbg_status_code_to_errno(rc);
+	}
+
+	for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
+		__free_page(pages[i]);
+	kfree(pages);
+	gdev->mem_balloon.pages[chunk_idx] = NULL;
+
+	return 0;
+}
+
+/**
+ * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size
+ * the host wants the balloon to be and adjust accordingly.
+ */
+static void vbg_balloon_work(struct work_struct *work)
+{
+	struct vbg_dev *gdev =
+		container_of(work, struct vbg_dev, mem_balloon.work);
+	struct vmmdev_memballoon_info *req = gdev->mem_balloon.get_req;
+	u32 i, chunks;
+	int rc, ret;
+
+	/*
+	 * Setting this bit means that we request the value from the host and
+	 * change the guest memory balloon according to the returned value.
+	 */
+	req->event_ack = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
+	rc = vbg_req_perform(gdev, req);
+	if (rc < 0) {
+		vbg_err("%s error, rc: %d)\n", __func__, rc);
+		return;
+	}
+
+	/*
+	 * The host always returns the same maximum amount of chunks, so
+	 * we do this once.
+	 */
+	if (!gdev->mem_balloon.max_chunks) {
+		gdev->mem_balloon.pages =
+			devm_kcalloc(gdev->dev, req->phys_mem_chunks,
+				     sizeof(struct page **), GFP_KERNEL);
+		if (!gdev->mem_balloon.pages)
+			return;
+
+		gdev->mem_balloon.max_chunks = req->phys_mem_chunks;
+	}
+
+	chunks = req->balloon_chunks;
+	if (chunks > gdev->mem_balloon.max_chunks) {
+		vbg_err("%s: illegal balloon size %u (max=%u)\n",
+			__func__, chunks, gdev->mem_balloon.max_chunks);
+		return;
+	}
+
+	if (chunks > gdev->mem_balloon.chunks) {
+		/* inflate */
+		for (i = gdev->mem_balloon.chunks; i < chunks; i++) {
+			ret = vbg_balloon_inflate(gdev, i);
+			if (ret < 0)
+				return;
+
+			gdev->mem_balloon.chunks++;
+		}
+	} else {
+		/* deflate */
+		for (i = gdev->mem_balloon.chunks; i-- > chunks;) {
+			ret = vbg_balloon_deflate(gdev, i);
+			if (ret < 0)
+				return;
+
+			gdev->mem_balloon.chunks--;
+		}
+	}
+}
+
+/**
+ * Callback for heartbeat timer.
+ */
+static void vbg_heartbeat_timer(struct timer_list *t)
+{
+	struct vbg_dev *gdev = from_timer(gdev, t, heartbeat_timer);
+
+	vbg_req_perform(gdev, gdev->guest_heartbeat_req);
+	mod_timer(&gdev->heartbeat_timer,
+		  msecs_to_jiffies(gdev->heartbeat_interval_ms));
+}
+
+/**
+ * Configure the host to check guest's heartbeat
+ * and get heartbeat interval from the host.
+ * Return: 0 or negative errno value.
+ * @gdev:		The Guest extension device.
+ * @enabled:		Set true to enable guest heartbeat checks on host.
+ */
+static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
+{
+	struct vmmdev_heartbeat *req;
+	int rc;
+
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE);
+	if (!req)
+		return -ENOMEM;
+
+	req->enabled = enabled;
+	req->interval_ns = 0;
+	rc = vbg_req_perform(gdev, req);
+	do_div(req->interval_ns, 1000000); /* ns -> ms */
+	gdev->heartbeat_interval_ms = req->interval_ns;
+	kfree(req);
+
+	return vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Initializes the heartbeat timer. This feature may be disabled by the host.
+ * Return: 0 or negative errno value.
+ * @gdev:		The Guest extension device.
+ */
+static int vbg_heartbeat_init(struct vbg_dev *gdev)
+{
+	int ret;
+
+	/* Make sure that heartbeat checking is disabled if we fail. */
+	ret = vbg_heartbeat_host_config(gdev, false);
+	if (ret < 0)
+		return ret;
+
+	ret = vbg_heartbeat_host_config(gdev, true);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Preallocate the request to use it from the timer callback because:
+	 *    1) on Windows vbg_req_alloc must be called at IRQL <= APC_LEVEL
+	 *       and the timer callback runs at DISPATCH_LEVEL;
+	 *    2) avoid repeated allocations.
+	 */
+	gdev->guest_heartbeat_req = vbg_req_alloc(
+					sizeof(*gdev->guest_heartbeat_req),
+					VMMDEVREQ_GUEST_HEARTBEAT);
+	if (!gdev->guest_heartbeat_req)
+		return -ENOMEM;
+
+	vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
+		 __func__, gdev->heartbeat_interval_ms);
+	mod_timer(&gdev->heartbeat_timer, 0);
+
+	return 0;
+}
+
+/**
+ * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking.
+ * @gdev:		The Guest extension device.
+ */
+static void vbg_heartbeat_exit(struct vbg_dev *gdev)
+{
+	del_timer_sync(&gdev->heartbeat_timer);
+	vbg_heartbeat_host_config(gdev, false);
+	kfree(gdev->guest_heartbeat_req);
+
+}
+
+/**
+ * Applies a change to the bit usage tracker.
+ * Return: true if the mask changed, false if not.
+ * @tracker:		The bit usage tracker.
+ * @changed:		The bits to change.
+ * @previous:		The previous value of the bits.
+ */
+static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker,
+				u32 changed, u32 previous)
+{
+	bool global_change = false;
+
+	while (changed) {
+		u32 bit = ffs(changed) - 1;
+		u32 bitmask = BIT(bit);
+
+		if (bitmask & previous) {
+			tracker->per_bit_usage[bit] -= 1;
+			if (tracker->per_bit_usage[bit] == 0) {
+				global_change = true;
+				tracker->mask &= ~bitmask;
+			}
+		} else {
+			tracker->per_bit_usage[bit] += 1;
+			if (tracker->per_bit_usage[bit] == 1) {
+				global_change = true;
+				tracker->mask |= bitmask;
+			}
+		}
+
+		changed &= ~bitmask;
+	}
+
+	return global_change;
+}
+
+/**
+ * Init and termination worker for resetting the (host) event filter on the host
+ * Return: 0 or negative errno value.
+ * @gdev:		   The Guest extension device.
+ * @fixed_events:	   Fixed events (init time).
+ */
+static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
+				       u32 fixed_events)
+{
+	struct vmmdev_mask *req;
+	int rc;
+
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
+	if (!req)
+		return -ENOMEM;
+
+	req->not_mask = U32_MAX & ~fixed_events;
+	req->or_mask = fixed_events;
+	rc = vbg_req_perform(gdev, req);
+	if (rc < 0)
+		vbg_err("%s error, rc: %d\n", __func__, rc);
+
+	kfree(req);
+	return vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Changes the event filter mask for the given session.
+ *
+ * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
+ * do session cleanup. Takes the session spinlock.
+ *
+ * Return: 0 or negative errno value.
+ * @gdev:			The Guest extension device.
+ * @session:			The session.
+ * @or_mask:			The events to add.
+ * @not_mask:			The events to remove.
+ * @session_termination:	Set if we're called by the session cleanup code.
+ *				This tweaks the error handling so we perform
+ *				proper session cleanup even if the host
+ *				misbehaves.
+ */
+static int vbg_set_session_event_filter(struct vbg_dev *gdev,
+					struct vbg_session *session,
+					u32 or_mask, u32 not_mask,
+					bool session_termination)
+{
+	struct vmmdev_mask *req;
+	u32 changed, previous;
+	int rc, ret = 0;
+
+	/* Allocate a request buffer before taking the spinlock */
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
+	if (!req) {
+		if (!session_termination)
+			return -ENOMEM;
+		/* Ignore allocation failure, we must do session cleanup. */
+	}
+
+	mutex_lock(&gdev->session_mutex);
+
+	/* Apply the changes to the session mask. */
+	previous = session->event_filter;
+	session->event_filter |= or_mask;
+	session->event_filter &= ~not_mask;
+
+	/* If anything actually changed, update the global usage counters. */
+	changed = previous ^ session->event_filter;
+	if (!changed)
+		goto out;
+
+	vbg_track_bit_usage(&gdev->event_filter_tracker, changed, previous);
+	or_mask = gdev->fixed_events | gdev->event_filter_tracker.mask;
+
+	if (gdev->event_filter_host == or_mask || !req)
+		goto out;
+
+	gdev->event_filter_host = or_mask;
+	req->or_mask = or_mask;
+	req->not_mask = ~or_mask;
+	rc = vbg_req_perform(gdev, req);
+	if (rc < 0) {
+		ret = vbg_status_code_to_errno(rc);
+
+		/* Failed, roll back (unless it's session termination time). */
+		gdev->event_filter_host = U32_MAX;
+		if (session_termination)
+			goto out;
+
+		vbg_track_bit_usage(&gdev->event_filter_tracker, changed,
+				    session->event_filter);
+		session->event_filter = previous;
+	}
+
+out:
+	mutex_unlock(&gdev->session_mutex);
+	kfree(req);
+
+	return ret;
+}
+
+/**
+ * Init and termination worker for set guest capabilities to zero on the host.
+ * Return: 0 or negative errno value.
+ * @gdev:		The Guest extension device.
+ */
+static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
+{
+	struct vmmdev_mask *req;
+	int rc;
+
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
+	if (!req)
+		return -ENOMEM;
+
+	req->not_mask = U32_MAX;
+	req->or_mask = 0;
+	rc = vbg_req_perform(gdev, req);
+	if (rc < 0)
+		vbg_err("%s error, rc: %d\n", __func__, rc);
+
+	kfree(req);
+	return vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Sets the guest capabilities for a session. Takes the session spinlock.
+ * Return: 0 or negative errno value.
+ * @gdev:			The Guest extension device.
+ * @session:			The session.
+ * @or_mask:			The capabilities to add.
+ * @not_mask:			The capabilities to remove.
+ * @session_termination:	Set if we're called by the session cleanup code.
+ *				This tweaks the error handling so we perform
+ *				proper session cleanup even if the host
+ *				misbehaves.
+ */
+static int vbg_set_session_capabilities(struct vbg_dev *gdev,
+					struct vbg_session *session,
+					u32 or_mask, u32 not_mask,
+					bool session_termination)
+{
+	struct vmmdev_mask *req;
+	u32 changed, previous;
+	int rc, ret = 0;
+
+	/* Allocate a request buffer before taking the spinlock */
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
+	if (!req) {
+		if (!session_termination)
+			return -ENOMEM;
+		/* Ignore allocation failure, we must do session cleanup. */
+	}
+
+	mutex_lock(&gdev->session_mutex);
+
+	/* Apply the changes to the session mask. */
+	previous = session->guest_caps;
+	session->guest_caps |= or_mask;
+	session->guest_caps &= ~not_mask;
+
+	/* If anything actually changed, update the global usage counters. */
+	changed = previous ^ session->guest_caps;
+	if (!changed)
+		goto out;
+
+	vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous);
+	or_mask = gdev->guest_caps_tracker.mask;
+
+	if (gdev->guest_caps_host == or_mask || !req)
+		goto out;
+
+	gdev->guest_caps_host = or_mask;
+	req->or_mask = or_mask;
+	req->not_mask = ~or_mask;
+	rc = vbg_req_perform(gdev, req);
+	if (rc < 0) {
+		ret = vbg_status_code_to_errno(rc);
+
+		/* Failed, roll back (unless it's session termination time). */
+		gdev->guest_caps_host = U32_MAX;
+		if (session_termination)
+			goto out;
+
+		vbg_track_bit_usage(&gdev->guest_caps_tracker, changed,
+				    session->guest_caps);
+		session->guest_caps = previous;
+	}
+
+out:
+	mutex_unlock(&gdev->session_mutex);
+	kfree(req);
+
+	return ret;
+}
+
+/**
+ * vbg_query_host_version get the host feature mask and version information.
+ * Return: 0 or negative errno value.
+ * @gdev:		The Guest extension device.
+ */
+static int vbg_query_host_version(struct vbg_dev *gdev)
+{
+	struct vmmdev_host_version *req;
+	int rc, ret;
+
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION);
+	if (!req)
+		return -ENOMEM;
+
+	rc = vbg_req_perform(gdev, req);
+	ret = vbg_status_code_to_errno(rc);
+	if (ret)
+		goto out;
+
+	snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u",
+		 req->major, req->minor, req->build, req->revision);
+	gdev->host_features = req->features;
+
+	vbg_info("vboxguest: host-version: %s %#x\n", gdev->host_version,
+		 gdev->host_features);
+
+	if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) {
+		vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
+		ret = -ENODEV;
+	}
+
+out:
+	kfree(req);
+	return ret;
+}
+
+/**
+ * Initializes the VBoxGuest device extension when the
+ * device driver is loaded.
+ *
+ * The native code locates the VMMDev on the PCI bus and retrieve
+ * the MMIO and I/O port ranges, this function will take care of
+ * mapping the MMIO memory (if present). Upon successful return
+ * the native code should set up the interrupt handler.
+ *
+ * Return: 0 or negative errno value.
+ *
+ * @gdev:		The Guest extension device.
+ * @fixed_events:	Events that will be enabled upon init and no client
+ *			will ever be allowed to mask.
+ */
+int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
+{
+	int ret = -ENOMEM;
+
+	gdev->fixed_events = fixed_events | VMMDEV_EVENT_HGCM;
+	gdev->event_filter_host = U32_MAX;	/* forces a report */
+	gdev->guest_caps_host = U32_MAX;	/* forces a report */
+
+	init_waitqueue_head(&gdev->event_wq);
+	init_waitqueue_head(&gdev->hgcm_wq);
+	spin_lock_init(&gdev->event_spinlock);
+	mutex_init(&gdev->session_mutex);
+	mutex_init(&gdev->cancel_req_mutex);
+	timer_setup(&gdev->heartbeat_timer, vbg_heartbeat_timer, 0);
+	INIT_WORK(&gdev->mem_balloon.work, vbg_balloon_work);
+
+	gdev->mem_balloon.get_req =
+		vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
+			      VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ);
+	gdev->mem_balloon.change_req =
+		vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
+			      VMMDEVREQ_CHANGE_MEMBALLOON);
+	gdev->cancel_req =
+		vbg_req_alloc(sizeof(*(gdev->cancel_req)),
+			      VMMDEVREQ_HGCM_CANCEL2);
+	gdev->ack_events_req =
+		vbg_req_alloc(sizeof(*gdev->ack_events_req),
+			      VMMDEVREQ_ACKNOWLEDGE_EVENTS);
+	gdev->mouse_status_req =
+		vbg_req_alloc(sizeof(*gdev->mouse_status_req),
+			      VMMDEVREQ_GET_MOUSE_STATUS);
+
+	if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
+	    !gdev->cancel_req || !gdev->ack_events_req ||
+	    !gdev->mouse_status_req)
+		goto err_free_reqs;
+
+	ret = vbg_query_host_version(gdev);
+	if (ret)
+		goto err_free_reqs;
+
+	ret = vbg_report_guest_info(gdev);
+	if (ret) {
+		vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret);
+		goto err_free_reqs;
+	}
+
+	ret = vbg_reset_host_event_filter(gdev, gdev->fixed_events);
+	if (ret) {
+		vbg_err("vboxguest: Error setting fixed event filter: %d\n",
+			ret);
+		goto err_free_reqs;
+	}
+
+	ret = vbg_reset_host_capabilities(gdev);
+	if (ret) {
+		vbg_err("vboxguest: Error clearing guest capabilities: %d\n",
+			ret);
+		goto err_free_reqs;
+	}
+
+	ret = vbg_core_set_mouse_status(gdev, 0);
+	if (ret) {
+		vbg_err("vboxguest: Error clearing mouse status: %d\n", ret);
+		goto err_free_reqs;
+	}
+
+	/* These may fail without requiring the driver init to fail. */
+	vbg_guest_mappings_init(gdev);
+	vbg_heartbeat_init(gdev);
+
+	/* All Done! */
+	ret = vbg_report_driver_status(gdev, true);
+	if (ret < 0)
+		vbg_err("vboxguest: Error reporting driver status: %d\n", ret);
+
+	return 0;
+
+err_free_reqs:
+	kfree(gdev->mouse_status_req);
+	kfree(gdev->ack_events_req);
+	kfree(gdev->cancel_req);
+	kfree(gdev->mem_balloon.change_req);
+	kfree(gdev->mem_balloon.get_req);
+	return ret;
+}
+
+/**
+ * Call this on exit to clean-up vboxguest-core managed resources.
+ *
+ * The native code should call this before the driver is loaded,
+ * but don't call this on shutdown.
+ * @gdev:		The Guest extension device.
+ */
+void vbg_core_exit(struct vbg_dev *gdev)
+{
+	vbg_heartbeat_exit(gdev);
+	vbg_guest_mappings_exit(gdev);
+
+	/* Clear the host flags (mouse status etc). */
+	vbg_reset_host_event_filter(gdev, 0);
+	vbg_reset_host_capabilities(gdev);
+	vbg_core_set_mouse_status(gdev, 0);
+
+	kfree(gdev->mouse_status_req);
+	kfree(gdev->ack_events_req);
+	kfree(gdev->cancel_req);
+	kfree(gdev->mem_balloon.change_req);
+	kfree(gdev->mem_balloon.get_req);
+}
+
+/**
+ * Creates a VBoxGuest user session.
+ *
+ * vboxguest_linux.c calls this when userspace opens the char-device.
+ * Return: A pointer to the new session or an ERR_PTR on error.
+ * @gdev:		The Guest extension device.
+ * @user:		Set if this is a session for the vboxuser device.
+ */
+struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
+{
+	struct vbg_session *session;
+
+	session = kzalloc(sizeof(*session), GFP_KERNEL);
+	if (!session)
+		return ERR_PTR(-ENOMEM);
+
+	session->gdev = gdev;
+	session->user_session = user;
+
+	return session;
+}
+
+/**
+ * Closes a VBoxGuest session.
+ * @session:		The session to close (and free).
+ */
+void vbg_core_close_session(struct vbg_session *session)
+{
+	struct vbg_dev *gdev = session->gdev;
+	int i, rc;
+
+	vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true);
+	vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true);
+
+	for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
+		if (!session->hgcm_client_ids[i])
+			continue;
+
+		vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc);
+	}
+
+	kfree(session);
+}
+
+static int vbg_ioctl_chk(struct vbg_ioctl_hdr *hdr, size_t in_size,
+			 size_t out_size)
+{
+	if (hdr->size_in  != (sizeof(*hdr) + in_size) ||
+	    hdr->size_out != (sizeof(*hdr) + out_size))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int vbg_ioctl_driver_version_info(
+	struct vbg_ioctl_driver_version_info *info)
+{
+	const u16 vbg_maj_version = VBG_IOC_VERSION >> 16;
+	u16 min_maj_version, req_maj_version;
+
+	if (vbg_ioctl_chk(&info->hdr, sizeof(info->u.in), sizeof(info->u.out)))
+		return -EINVAL;
+
+	req_maj_version = info->u.in.req_version >> 16;
+	min_maj_version = info->u.in.min_version >> 16;
+
+	if (info->u.in.min_version > info->u.in.req_version ||
+	    min_maj_version != req_maj_version)
+		return -EINVAL;
+
+	if (info->u.in.min_version <= VBG_IOC_VERSION &&
+	    min_maj_version == vbg_maj_version) {
+		info->u.out.session_version = VBG_IOC_VERSION;
+	} else {
+		info->u.out.session_version = U32_MAX;
+		info->hdr.rc = VERR_VERSION_MISMATCH;
+	}
+
+	info->u.out.driver_version  = VBG_IOC_VERSION;
+	info->u.out.driver_revision = 0;
+	info->u.out.reserved1      = 0;
+	info->u.out.reserved2      = 0;
+
+	return 0;
+}
+
+static bool vbg_wait_event_cond(struct vbg_dev *gdev,
+				struct vbg_session *session,
+				u32 event_mask)
+{
+	unsigned long flags;
+	bool wakeup;
+	u32 events;
+
+	spin_lock_irqsave(&gdev->event_spinlock, flags);
+
+	events = gdev->pending_events & event_mask;
+	wakeup = events || session->cancel_waiters;
+
+	spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+	return wakeup;
+}
+
+/* Must be called with the event_lock held */
+static u32 vbg_consume_events_locked(struct vbg_dev *gdev,
+				     struct vbg_session *session,
+				     u32 event_mask)
+{
+	u32 events = gdev->pending_events & event_mask;
+
+	gdev->pending_events &= ~events;
+	return events;
+}
+
+static int vbg_ioctl_wait_for_events(struct vbg_dev *gdev,
+				     struct vbg_session *session,
+				     struct vbg_ioctl_wait_for_events *wait)
+{
+	u32 timeout_ms = wait->u.in.timeout_ms;
+	u32 event_mask = wait->u.in.events;
+	unsigned long flags;
+	long timeout;
+	int ret = 0;
+
+	if (vbg_ioctl_chk(&wait->hdr, sizeof(wait->u.in), sizeof(wait->u.out)))
+		return -EINVAL;
+
+	if (timeout_ms == U32_MAX)
+		timeout = MAX_SCHEDULE_TIMEOUT;
+	else
+		timeout = msecs_to_jiffies(timeout_ms);
+
+	wait->u.out.events = 0;
+	do {
+		timeout = wait_event_interruptible_timeout(
+				gdev->event_wq,
+				vbg_wait_event_cond(gdev, session, event_mask),
+				timeout);
+
+		spin_lock_irqsave(&gdev->event_spinlock, flags);
+
+		if (timeout < 0 || session->cancel_waiters) {
+			ret = -EINTR;
+		} else if (timeout == 0) {
+			ret = -ETIMEDOUT;
+		} else {
+			wait->u.out.events =
+			   vbg_consume_events_locked(gdev, session, event_mask);
+		}
+
+		spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+		/*
+		 * Someone else may have consumed the event(s) first, in
+		 * which case we go back to waiting.
+		 */
+	} while (ret == 0 && wait->u.out.events == 0);
+
+	return ret;
+}
+
+static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev *gdev,
+					       struct vbg_session *session,
+					       struct vbg_ioctl_hdr *hdr)
+{
+	unsigned long flags;
+
+	if (hdr->size_in != sizeof(*hdr) || hdr->size_out != sizeof(*hdr))
+		return -EINVAL;
+
+	spin_lock_irqsave(&gdev->event_spinlock, flags);
+	session->cancel_waiters = true;
+	spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+	wake_up(&gdev->event_wq);
+
+	return 0;
+}
+
+/**
+ * Checks if the VMM request is allowed in the context of the given session.
+ * Return: 0 or negative errno value.
+ * @gdev:		The Guest extension device.
+ * @session:		The calling session.
+ * @req:		The request.
+ */
+static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
+			   const struct vmmdev_request_header *req)
+{
+	const struct vmmdev_guest_status *guest_status;
+	bool trusted_apps_only;
+
+	switch (req->request_type) {
+	/* Trusted users apps only. */
+	case VMMDEVREQ_QUERY_CREDENTIALS:
+	case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT:
+	case VMMDEVREQ_REGISTER_SHARED_MODULE:
+	case VMMDEVREQ_UNREGISTER_SHARED_MODULE:
+	case VMMDEVREQ_WRITE_COREDUMP:
+	case VMMDEVREQ_GET_CPU_HOTPLUG_REQ:
+	case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS:
+	case VMMDEVREQ_CHECK_SHARED_MODULES:
+	case VMMDEVREQ_GET_PAGE_SHARING_STATUS:
+	case VMMDEVREQ_DEBUG_IS_PAGE_SHARED:
+	case VMMDEVREQ_REPORT_GUEST_STATS:
+	case VMMDEVREQ_REPORT_GUEST_USER_STATE:
+	case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ:
+		trusted_apps_only = true;
+		break;
+
+	/* Anyone. */
+	case VMMDEVREQ_GET_MOUSE_STATUS:
+	case VMMDEVREQ_SET_MOUSE_STATUS:
+	case VMMDEVREQ_SET_POINTER_SHAPE:
+	case VMMDEVREQ_GET_HOST_VERSION:
+	case VMMDEVREQ_IDLE:
+	case VMMDEVREQ_GET_HOST_TIME:
+	case VMMDEVREQ_SET_POWER_STATUS:
+	case VMMDEVREQ_ACKNOWLEDGE_EVENTS:
+	case VMMDEVREQ_CTL_GUEST_FILTER_MASK:
+	case VMMDEVREQ_REPORT_GUEST_STATUS:
+	case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ:
+	case VMMDEVREQ_VIDEMODE_SUPPORTED:
+	case VMMDEVREQ_GET_HEIGHT_REDUCTION:
+	case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2:
+	case VMMDEVREQ_VIDEMODE_SUPPORTED2:
+	case VMMDEVREQ_VIDEO_ACCEL_ENABLE:
+	case VMMDEVREQ_VIDEO_ACCEL_FLUSH:
+	case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION:
+	case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX:
+	case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ:
+	case VMMDEVREQ_GET_VRDPCHANGE_REQ:
+	case VMMDEVREQ_LOG_STRING:
+	case VMMDEVREQ_GET_SESSION_ID:
+		trusted_apps_only = false;
+		break;
+
+	/* Depends on the request parameters... */
+	case VMMDEVREQ_REPORT_GUEST_CAPABILITIES:
+		guest_status = (const struct vmmdev_guest_status *)req;
+		switch (guest_status->facility) {
+		case VBOXGUEST_FACILITY_TYPE_ALL:
+		case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER:
+			vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
+				guest_status->facility);
+			return -EPERM;
+		case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE:
+			trusted_apps_only = true;
+			break;
+		case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT:
+		case VBOXGUEST_FACILITY_TYPE_SEAMLESS:
+		case VBOXGUEST_FACILITY_TYPE_GRAPHICS:
+		default:
+			trusted_apps_only = false;
+			break;
+		}
+		break;
+
+	/* Anything else is not allowed. */
+	default:
+		vbg_err("Denying userspace vmm call type %#08x\n",
+			req->request_type);
+		return -EPERM;
+	}
+
+	if (trusted_apps_only && session->user_session) {
+		vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
+			req->request_type);
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+static int vbg_ioctl_vmmrequest(struct vbg_dev *gdev,
+				struct vbg_session *session, void *data)
+{
+	struct vbg_ioctl_hdr *hdr = data;
+	int ret;
+
+	if (hdr->size_in != hdr->size_out)
+		return -EINVAL;
+
+	if (hdr->size_in > VMMDEV_MAX_VMMDEVREQ_SIZE)
+		return -E2BIG;
+
+	if (hdr->type == VBG_IOCTL_HDR_TYPE_DEFAULT)
+		return -EINVAL;
+
+	ret = vbg_req_allowed(gdev, session, data);
+	if (ret < 0)
+		return ret;
+
+	vbg_req_perform(gdev, data);
+	WARN_ON(hdr->rc == VINF_HGCM_ASYNC_EXECUTE);
+
+	return 0;
+}
+
+static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
+				  struct vbg_session *session,
+				  struct vbg_ioctl_hgcm_connect *conn)
+{
+	u32 client_id;
+	int i, ret;
+
+	if (vbg_ioctl_chk(&conn->hdr, sizeof(conn->u.in), sizeof(conn->u.out)))
+		return -EINVAL;
+
+	/* Find a free place in the sessions clients array and claim it */
+	mutex_lock(&gdev->session_mutex);
+	for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
+		if (!session->hgcm_client_ids[i]) {
+			session->hgcm_client_ids[i] = U32_MAX;
+			break;
+		}
+	}
+	mutex_unlock(&gdev->session_mutex);
+
+	if (i >= ARRAY_SIZE(session->hgcm_client_ids))
+		return -EMFILE;
+
+	ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id,
+			       &conn->hdr.rc);
+
+	mutex_lock(&gdev->session_mutex);
+	if (ret == 0 && conn->hdr.rc >= 0) {
+		conn->u.out.client_id = client_id;
+		session->hgcm_client_ids[i] = client_id;
+	} else {
+		conn->u.out.client_id = 0;
+		session->hgcm_client_ids[i] = 0;
+	}
+	mutex_unlock(&gdev->session_mutex);
+
+	return ret;
+}
+
+static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
+				     struct vbg_session *session,
+				     struct vbg_ioctl_hgcm_disconnect *disconn)
+{
+	u32 client_id;
+	int i, ret;
+
+	if (vbg_ioctl_chk(&disconn->hdr, sizeof(disconn->u.in), 0))
+		return -EINVAL;
+
+	client_id = disconn->u.in.client_id;
+	if (client_id == 0 || client_id == U32_MAX)
+		return -EINVAL;
+
+	mutex_lock(&gdev->session_mutex);
+	for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
+		if (session->hgcm_client_ids[i] == client_id) {
+			session->hgcm_client_ids[i] = U32_MAX;
+			break;
+		}
+	}
+	mutex_unlock(&gdev->session_mutex);
+
+	if (i >= ARRAY_SIZE(session->hgcm_client_ids))
+		return -EINVAL;
+
+	ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc);
+
+	mutex_lock(&gdev->session_mutex);
+	if (ret == 0 && disconn->hdr.rc >= 0)
+		session->hgcm_client_ids[i] = 0;
+	else
+		session->hgcm_client_ids[i] = client_id;
+	mutex_unlock(&gdev->session_mutex);
+
+	return ret;
+}
+
+static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
+			       struct vbg_session *session, bool f32bit,
+			       struct vbg_ioctl_hgcm_call *call)
+{
+	size_t actual_size;
+	u32 client_id;
+	int i, ret;
+
+	if (call->hdr.size_in < sizeof(*call))
+		return -EINVAL;
+
+	if (call->hdr.size_in != call->hdr.size_out)
+		return -EINVAL;
+
+	if (call->parm_count > VMMDEV_HGCM_MAX_PARMS)
+		return -E2BIG;
+
+	client_id = call->client_id;
+	if (client_id == 0 || client_id == U32_MAX)
+		return -EINVAL;
+
+	actual_size = sizeof(*call);
+	if (f32bit)
+		actual_size += call->parm_count *
+			       sizeof(struct vmmdev_hgcm_function_parameter32);
+	else
+		actual_size += call->parm_count *
+			       sizeof(struct vmmdev_hgcm_function_parameter);
+	if (call->hdr.size_in < actual_size) {
+		vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n",
+			  call->hdr.size_in, actual_size);
+		return -EINVAL;
+	}
+	call->hdr.size_out = actual_size;
+
+	/*
+	 * Validate the client id.
+	 */
+	mutex_lock(&gdev->session_mutex);
+	for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++)
+		if (session->hgcm_client_ids[i] == client_id)
+			break;
+	mutex_unlock(&gdev->session_mutex);
+	if (i >= ARRAY_SIZE(session->hgcm_client_ids)) {
+		vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n",
+			  client_id);
+		return -EINVAL;
+	}
+
+	if (f32bit)
+		ret = vbg_hgcm_call32(gdev, client_id,
+				      call->function, call->timeout_ms,
+				      VBG_IOCTL_HGCM_CALL_PARMS32(call),
+				      call->parm_count, &call->hdr.rc);
+	else
+		ret = vbg_hgcm_call(gdev, client_id,
+				    call->function, call->timeout_ms,
+				    VBG_IOCTL_HGCM_CALL_PARMS(call),
+				    call->parm_count, &call->hdr.rc);
+
+	if (ret == -E2BIG) {
+		/* E2BIG needs to be reported through the hdr.rc field. */
+		call->hdr.rc = VERR_OUT_OF_RANGE;
+		ret = 0;
+	}
+
+	if (ret && ret != -EINTR && ret != -ETIMEDOUT)
+		vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret);
+
+	return ret;
+}
+
+static int vbg_ioctl_log(struct vbg_ioctl_log *log)
+{
+	if (log->hdr.size_out != sizeof(log->hdr))
+		return -EINVAL;
+
+	vbg_info("%.*s", (int)(log->hdr.size_in - sizeof(log->hdr)),
+		 log->u.in.msg);
+
+	return 0;
+}
+
+static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev,
+					struct vbg_session *session,
+					struct vbg_ioctl_change_filter *filter)
+{
+	u32 or_mask, not_mask;
+
+	if (vbg_ioctl_chk(&filter->hdr, sizeof(filter->u.in), 0))
+		return -EINVAL;
+
+	or_mask = filter->u.in.or_mask;
+	not_mask = filter->u.in.not_mask;
+
+	if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
+		return -EINVAL;
+
+	return vbg_set_session_event_filter(gdev, session, or_mask, not_mask,
+					    false);
+}
+
+static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
+	     struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps)
+{
+	u32 or_mask, not_mask;
+	int ret;
+
+	if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), sizeof(caps->u.out)))
+		return -EINVAL;
+
+	or_mask = caps->u.in.or_mask;
+	not_mask = caps->u.in.not_mask;
+
+	if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
+		return -EINVAL;
+
+	ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask,
+					   false);
+	if (ret)
+		return ret;
+
+	caps->u.out.session_caps = session->guest_caps;
+	caps->u.out.global_caps = gdev->guest_caps_host;
+
+	return 0;
+}
+
+static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
+				   struct vbg_ioctl_check_balloon *balloon_info)
+{
+	if (vbg_ioctl_chk(&balloon_info->hdr, 0, sizeof(balloon_info->u.out)))
+		return -EINVAL;
+
+	balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks;
+	/*
+	 * Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
+	 * events entirely in the kernel, see vbg_core_isr().
+	 */
+	balloon_info->u.out.handle_in_r3 = false;
+
+	return 0;
+}
+
+static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
+				     struct vbg_ioctl_write_coredump *dump)
+{
+	struct vmmdev_write_core_dump *req;
+
+	if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
+		return -EINVAL;
+
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP);
+	if (!req)
+		return -ENOMEM;
+
+	req->flags = dump->u.in.flags;
+	dump->hdr.rc = vbg_req_perform(gdev, req);
+
+	kfree(req);
+	return 0;
+}
+
+/**
+ * Common IOCtl for user to kernel communication.
+ * Return: 0 or negative errno value.
+ * @session:	The client session.
+ * @req:	The requested function.
+ * @data:	The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr).
+ */
+int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
+{
+	unsigned int req_no_size = req & ~IOCSIZE_MASK;
+	struct vbg_dev *gdev = session->gdev;
+	struct vbg_ioctl_hdr *hdr = data;
+	bool f32bit = false;
+
+	hdr->rc = VINF_SUCCESS;
+	if (!hdr->size_out)
+		hdr->size_out = hdr->size_in;
+
+	/*
+	 * hdr->version and hdr->size_in / hdr->size_out minimum size are
+	 * already checked by vbg_misc_device_ioctl().
+	 */
+
+	/* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
+	if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) ||
+	    req == VBG_IOCTL_VMMDEV_REQUEST_BIG)
+		return vbg_ioctl_vmmrequest(gdev, session, data);
+
+	if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT)
+		return -EINVAL;
+
+	/* Fixed size requests. */
+	switch (req) {
+	case VBG_IOCTL_DRIVER_VERSION_INFO:
+		return vbg_ioctl_driver_version_info(data);
+	case VBG_IOCTL_HGCM_CONNECT:
+		return vbg_ioctl_hgcm_connect(gdev, session, data);
+	case VBG_IOCTL_HGCM_DISCONNECT:
+		return vbg_ioctl_hgcm_disconnect(gdev, session, data);
+	case VBG_IOCTL_WAIT_FOR_EVENTS:
+		return vbg_ioctl_wait_for_events(gdev, session, data);
+	case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS:
+		return vbg_ioctl_interrupt_all_wait_events(gdev, session, data);
+	case VBG_IOCTL_CHANGE_FILTER_MASK:
+		return vbg_ioctl_change_filter_mask(gdev, session, data);
+	case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES:
+		return vbg_ioctl_change_guest_capabilities(gdev, session, data);
+	case VBG_IOCTL_CHECK_BALLOON:
+		return vbg_ioctl_check_balloon(gdev, data);
+	case VBG_IOCTL_WRITE_CORE_DUMP:
+		return vbg_ioctl_write_core_dump(gdev, data);
+	}
+
+	/* Variable sized requests. */
+	switch (req_no_size) {
+#ifdef CONFIG_COMPAT
+	case VBG_IOCTL_HGCM_CALL_32(0):
+		f32bit = true;
+		/* Fall through */
+#endif
+	case VBG_IOCTL_HGCM_CALL(0):
+		return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
+	case VBG_IOCTL_LOG(0):
+		return vbg_ioctl_log(data);
+	}
+
+	vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req);
+	return -ENOTTY;
+}
+
+/**
+ * Report guest supported mouse-features to the host.
+ *
+ * Return: 0 or negative errno value.
+ * @gdev:		The Guest extension device.
+ * @features:		The set of features to report to the host.
+ */
+int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
+{
+	struct vmmdev_mouse_status *req;
+	int rc;
+
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS);
+	if (!req)
+		return -ENOMEM;
+
+	req->mouse_features = features;
+	req->pointer_pos_x = 0;
+	req->pointer_pos_y = 0;
+
+	rc = vbg_req_perform(gdev, req);
+	if (rc < 0)
+		vbg_err("%s error, rc: %d\n", __func__, rc);
+
+	kfree(req);
+	return vbg_status_code_to_errno(rc);
+}
+
+/** Core interrupt service routine. */
+irqreturn_t vbg_core_isr(int irq, void *dev_id)
+{
+	struct vbg_dev *gdev = dev_id;
+	struct vmmdev_events *req = gdev->ack_events_req;
+	bool mouse_position_changed = false;
+	unsigned long flags;
+	u32 events = 0;
+	int rc;
+
+	if (!gdev->mmio->V.V1_04.have_events)
+		return IRQ_NONE;
+
+	/* Get and acknowlegde events. */
+	req->header.rc = VERR_INTERNAL_ERROR;
+	req->events = 0;
+	rc = vbg_req_perform(gdev, req);
+	if (rc < 0) {
+		vbg_err("Error performing events req, rc: %d\n", rc);
+		return IRQ_NONE;
+	}
+
+	events = req->events;
+
+	if (events & VMMDEV_EVENT_MOUSE_POSITION_CHANGED) {
+		mouse_position_changed = true;
+		events &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
+	}
+
+	if (events & VMMDEV_EVENT_HGCM) {
+		wake_up(&gdev->hgcm_wq);
+		events &= ~VMMDEV_EVENT_HGCM;
+	}
+
+	if (events & VMMDEV_EVENT_BALLOON_CHANGE_REQUEST) {
+		schedule_work(&gdev->mem_balloon.work);
+		events &= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
+	}
+
+	if (events) {
+		spin_lock_irqsave(&gdev->event_spinlock, flags);
+		gdev->pending_events |= events;
+		spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+		wake_up(&gdev->event_wq);
+	}
+
+	if (mouse_position_changed)
+		vbg_linux_mouse_event(gdev);
+
+	return IRQ_HANDLED;
+}
diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
new file mode 100644
index 0000000..6c784bf
--- /dev/null
+++ b/drivers/virt/vboxguest/vboxguest_core.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
+/* Copyright (C) 2010-2016 Oracle Corporation */
+
+#ifndef __VBOXGUEST_CORE_H__
+#define __VBOXGUEST_CORE_H__
+
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/vboxguest.h>
+#include "vmmdev.h"
+
+struct vbg_session;
+
+/** VBox guest memory balloon. */
+struct vbg_mem_balloon {
+	/** Work handling VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events */
+	struct work_struct work;
+	/** Pre-allocated vmmdev_memballoon_info req for query */
+	struct vmmdev_memballoon_info *get_req;
+	/** Pre-allocated vmmdev_memballoon_change req for inflate / deflate */
+	struct vmmdev_memballoon_change *change_req;
+	/** The current number of chunks in the balloon. */
+	u32 chunks;
+	/** The maximum number of chunks in the balloon. */
+	u32 max_chunks;
+	/**
+	 * Array of pointers to page arrays. A page * array is allocated for
+	 * each chunk when inflating, and freed when the deflating.
+	 */
+	struct page ***pages;
+};
+
+/**
+ * Per bit usage tracker for a u32 mask.
+ *
+ * Used for optimal handling of guest properties and event filter.
+ */
+struct vbg_bit_usage_tracker {
+	/** Per bit usage counters. */
+	u32 per_bit_usage[32];
+	/** The current mask according to per_bit_usage. */
+	u32 mask;
+};
+
+/** VBox guest device (data) extension. */
+struct vbg_dev {
+	struct device *dev;
+	/** The base of the adapter I/O ports. */
+	u16 io_port;
+	/** Pointer to the mapping of the VMMDev adapter memory. */
+	struct vmmdev_memory *mmio;
+	/** Host version */
+	char host_version[64];
+	/** Host features */
+	unsigned int host_features;
+	/**
+	 * Dummy page and vmap address for reserved kernel virtual-address
+	 * space for the guest mappings, only used on hosts lacking vtx.
+	 */
+	struct page *guest_mappings_dummy_page;
+	void *guest_mappings;
+	/** Spinlock protecting pending_events. */
+	spinlock_t event_spinlock;
+	/** Preallocated struct vmmdev_events for the IRQ handler. */
+	struct vmmdev_events *ack_events_req;
+	/** Wait-for-event list for threads waiting for multiple events. */
+	wait_queue_head_t event_wq;
+	/** Mask of pending events. */
+	u32 pending_events;
+	/** Wait-for-event list for threads waiting on HGCM async completion. */
+	wait_queue_head_t hgcm_wq;
+	/** Pre-allocated hgcm cancel2 req. for cancellation on timeout */
+	struct vmmdev_hgcm_cancel2 *cancel_req;
+	/** Mutex protecting cancel_req accesses */
+	struct mutex cancel_req_mutex;
+	/** Pre-allocated mouse-status request for the input-device handling. */
+	struct vmmdev_mouse_status *mouse_status_req;
+	/** Input device for reporting abs mouse coordinates to the guest. */
+	struct input_dev *input;
+
+	/** Memory balloon information. */
+	struct vbg_mem_balloon mem_balloon;
+
+	/** Lock for session related items in vbg_dev and vbg_session */
+	struct mutex session_mutex;
+	/** Events we won't permit anyone to filter out. */
+	u32 fixed_events;
+	/**
+	 * Usage counters for the host events (excludes fixed events),
+	 * Protected by session_mutex.
+	 */
+	struct vbg_bit_usage_tracker event_filter_tracker;
+	/**
+	 * The event filter last reported to the host (or UINT32_MAX).
+	 * Protected by session_mutex.
+	 */
+	u32 event_filter_host;
+
+	/**
+	 * Usage counters for guest capabilities. Indexed by capability bit
+	 * number, one count per session using a capability.
+	 * Protected by session_mutex.
+	 */
+	struct vbg_bit_usage_tracker guest_caps_tracker;
+	/**
+	 * The guest capabilities last reported to the host (or UINT32_MAX).
+	 * Protected by session_mutex.
+	 */
+	u32 guest_caps_host;
+
+	/**
+	 * Heartbeat timer which fires with interval
+	 * cNsHearbeatInterval and its handler sends
+	 * VMMDEVREQ_GUEST_HEARTBEAT to VMMDev.
+	 */
+	struct timer_list heartbeat_timer;
+	/** Heartbeat timer interval in ms. */
+	int heartbeat_interval_ms;
+	/** Preallocated VMMDEVREQ_GUEST_HEARTBEAT request. */
+	struct vmmdev_request_header *guest_heartbeat_req;
+
+	/** "vboxguest" char-device */
+	struct miscdevice misc_device;
+	/** "vboxuser" char-device */
+	struct miscdevice misc_device_user;
+};
+
+/** The VBoxGuest per session data. */
+struct vbg_session {
+	/** Pointer to the device extension. */
+	struct vbg_dev *gdev;
+
+	/**
+	 * Array containing HGCM client IDs associated with this session.
+	 * These will be automatically disconnected when the session is closed.
+	 * Protected by vbg_gdev.session_mutex.
+	 */
+	u32 hgcm_client_ids[64];
+	/**
+	 * Host events requested by the session.
+	 * An event type requested in any guest session will be added to the
+	 * host filter. Protected by vbg_gdev.session_mutex.
+	 */
+	u32 event_filter;
+	/**
+	 * Guest capabilities for this session.
+	 * A capability claimed by any guest session will be reported to the
+	 * host. Protected by vbg_gdev.session_mutex.
+	 */
+	u32 guest_caps;
+	/** Does this session belong to a root process or a user one? */
+	bool user_session;
+	/** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */
+	bool cancel_waiters;
+};
+
+int  vbg_core_init(struct vbg_dev *gdev, u32 fixed_events);
+void vbg_core_exit(struct vbg_dev *gdev);
+struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user);
+void vbg_core_close_session(struct vbg_session *session);
+int  vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data);
+int  vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features);
+
+irqreturn_t vbg_core_isr(int irq, void *dev_id);
+
+void vbg_linux_mouse_event(struct vbg_dev *gdev);
+
+#endif
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
new file mode 100644
index 0000000..82e280d
--- /dev/null
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -0,0 +1,466 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * vboxguest linux pci driver, char-dev and input-device code,
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ */
+
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/vbox_utils.h>
+#include "vboxguest_core.h"
+
+/** The device name. */
+#define DEVICE_NAME		"vboxguest"
+/** The device name for the device node open to everyone. */
+#define DEVICE_NAME_USER	"vboxuser"
+/** VirtualBox PCI vendor ID. */
+#define VBOX_VENDORID		0x80ee
+/** VMMDev PCI card product ID. */
+#define VMMDEV_DEVICEID		0xcafe
+
+/** Mutex protecting the global vbg_gdev pointer used by vbg_get/put_gdev. */
+static DEFINE_MUTEX(vbg_gdev_mutex);
+/** Global vbg_gdev pointer used by vbg_get/put_gdev. */
+static struct vbg_dev *vbg_gdev;
+
+static int vbg_misc_device_open(struct inode *inode, struct file *filp)
+{
+	struct vbg_session *session;
+	struct vbg_dev *gdev;
+
+	/* misc_open sets filp->private_data to our misc device */
+	gdev = container_of(filp->private_data, struct vbg_dev, misc_device);
+
+	session = vbg_core_open_session(gdev, false);
+	if (IS_ERR(session))
+		return PTR_ERR(session);
+
+	filp->private_data = session;
+	return 0;
+}
+
+static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
+{
+	struct vbg_session *session;
+	struct vbg_dev *gdev;
+
+	/* misc_open sets filp->private_data to our misc device */
+	gdev = container_of(filp->private_data, struct vbg_dev,
+			    misc_device_user);
+
+	session = vbg_core_open_session(gdev, false);
+	if (IS_ERR(session))
+		return PTR_ERR(session);
+
+	filp->private_data = session;
+	return 0;
+}
+
+/**
+ * Close device.
+ * Return: 0 on success, negated errno on failure.
+ * @inode:		Pointer to inode info structure.
+ * @filp:		Associated file pointer.
+ */
+static int vbg_misc_device_close(struct inode *inode, struct file *filp)
+{
+	vbg_core_close_session(filp->private_data);
+	filp->private_data = NULL;
+	return 0;
+}
+
+/**
+ * Device I/O Control entry point.
+ * Return: 0 on success, negated errno on failure.
+ * @filp:		Associated file pointer.
+ * @req:		The request specified to ioctl().
+ * @arg:		The argument specified to ioctl().
+ */
+static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
+				  unsigned long arg)
+{
+	struct vbg_session *session = filp->private_data;
+	size_t returned_size, size;
+	struct vbg_ioctl_hdr hdr;
+	int ret = 0;
+	void *buf;
+
+	if (copy_from_user(&hdr, (void *)arg, sizeof(hdr)))
+		return -EFAULT;
+
+	if (hdr.version != VBG_IOCTL_HDR_VERSION)
+		return -EINVAL;
+
+	if (hdr.size_in < sizeof(hdr) ||
+	    (hdr.size_out && hdr.size_out < sizeof(hdr)))
+		return -EINVAL;
+
+	size = max(hdr.size_in, hdr.size_out);
+	if (_IOC_SIZE(req) && _IOC_SIZE(req) != size)
+		return -EINVAL;
+	if (size > SZ_16M)
+		return -E2BIG;
+
+	/* __GFP_DMA32 because IOCTL_VMMDEV_REQUEST passes this to the host */
+	buf = kmalloc(size, GFP_KERNEL | __GFP_DMA32);
+	if (!buf)
+		return -ENOMEM;
+
+	if (copy_from_user(buf, (void *)arg, hdr.size_in)) {
+		ret = -EFAULT;
+		goto out;
+	}
+	if (hdr.size_in < size)
+		memset(buf + hdr.size_in, 0, size -  hdr.size_in);
+
+	ret = vbg_core_ioctl(session, req, buf);
+	if (ret)
+		goto out;
+
+	returned_size = ((struct vbg_ioctl_hdr *)buf)->size_out;
+	if (returned_size > size) {
+		vbg_debug("%s: too much output data %zu > %zu\n",
+			  __func__, returned_size, size);
+		returned_size = size;
+	}
+	if (copy_to_user((void *)arg, buf, returned_size) != 0)
+		ret = -EFAULT;
+
+out:
+	kfree(buf);
+
+	return ret;
+}
+
+/** The file_operations structures. */
+static const struct file_operations vbg_misc_device_fops = {
+	.owner			= THIS_MODULE,
+	.open			= vbg_misc_device_open,
+	.release		= vbg_misc_device_close,
+	.unlocked_ioctl		= vbg_misc_device_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl		= vbg_misc_device_ioctl,
+#endif
+};
+static const struct file_operations vbg_misc_device_user_fops = {
+	.owner			= THIS_MODULE,
+	.open			= vbg_misc_device_user_open,
+	.release		= vbg_misc_device_close,
+	.unlocked_ioctl		= vbg_misc_device_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl		= vbg_misc_device_ioctl,
+#endif
+};
+
+/**
+ * Called when the input device is first opened.
+ *
+ * Sets up absolute mouse reporting.
+ */
+static int vbg_input_open(struct input_dev *input)
+{
+	struct vbg_dev *gdev = input_get_drvdata(input);
+	u32 feat = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE | VMMDEV_MOUSE_NEW_PROTOCOL;
+	int ret;
+
+	ret = vbg_core_set_mouse_status(gdev, feat);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+/**
+ * Called if all open handles to the input device are closed.
+ *
+ * Disables absolute reporting.
+ */
+static void vbg_input_close(struct input_dev *input)
+{
+	struct vbg_dev *gdev = input_get_drvdata(input);
+
+	vbg_core_set_mouse_status(gdev, 0);
+}
+
+/**
+ * Creates the kernel input device.
+ *
+ * Return: 0 on success, negated errno on failure.
+ */
+static int vbg_create_input_device(struct vbg_dev *gdev)
+{
+	struct input_dev *input;
+
+	input = devm_input_allocate_device(gdev->dev);
+	if (!input)
+		return -ENOMEM;
+
+	input->id.bustype = BUS_PCI;
+	input->id.vendor = VBOX_VENDORID;
+	input->id.product = VMMDEV_DEVICEID;
+	input->open = vbg_input_open;
+	input->close = vbg_input_close;
+	input->dev.parent = gdev->dev;
+	input->name = "VirtualBox mouse integration";
+
+	input_set_abs_params(input, ABS_X, VMMDEV_MOUSE_RANGE_MIN,
+			     VMMDEV_MOUSE_RANGE_MAX, 0, 0);
+	input_set_abs_params(input, ABS_Y, VMMDEV_MOUSE_RANGE_MIN,
+			     VMMDEV_MOUSE_RANGE_MAX, 0, 0);
+	input_set_capability(input, EV_KEY, BTN_MOUSE);
+	input_set_drvdata(input, gdev);
+
+	gdev->input = input;
+
+	return input_register_device(gdev->input);
+}
+
+static ssize_t host_version_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct vbg_dev *gdev = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%s\n", gdev->host_version);
+}
+
+static ssize_t host_features_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct vbg_dev *gdev = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%#x\n", gdev->host_features);
+}
+
+static DEVICE_ATTR_RO(host_version);
+static DEVICE_ATTR_RO(host_features);
+
+/**
+ * Does the PCI detection and init of the device.
+ *
+ * Return: 0 on success, negated errno on failure.
+ */
+static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
+{
+	struct device *dev = &pci->dev;
+	resource_size_t io, io_len, mmio, mmio_len;
+	struct vmmdev_memory *vmmdev;
+	struct vbg_dev *gdev;
+	int ret;
+
+	gdev = devm_kzalloc(dev, sizeof(*gdev), GFP_KERNEL);
+	if (!gdev)
+		return -ENOMEM;
+
+	ret = pci_enable_device(pci);
+	if (ret != 0) {
+		vbg_err("vboxguest: Error enabling device: %d\n", ret);
+		return ret;
+	}
+
+	ret = -ENODEV;
+
+	io = pci_resource_start(pci, 0);
+	io_len = pci_resource_len(pci, 0);
+	if (!io || !io_len) {
+		vbg_err("vboxguest: Error IO-port resource (0) is missing\n");
+		goto err_disable_pcidev;
+	}
+	if (devm_request_region(dev, io, io_len, DEVICE_NAME) == NULL) {
+		vbg_err("vboxguest: Error could not claim IO resource\n");
+		ret = -EBUSY;
+		goto err_disable_pcidev;
+	}
+
+	mmio = pci_resource_start(pci, 1);
+	mmio_len = pci_resource_len(pci, 1);
+	if (!mmio || !mmio_len) {
+		vbg_err("vboxguest: Error MMIO resource (1) is missing\n");
+		goto err_disable_pcidev;
+	}
+
+	if (devm_request_mem_region(dev, mmio, mmio_len, DEVICE_NAME) == NULL) {
+		vbg_err("vboxguest: Error could not claim MMIO resource\n");
+		ret = -EBUSY;
+		goto err_disable_pcidev;
+	}
+
+	vmmdev = devm_ioremap(dev, mmio, mmio_len);
+	if (!vmmdev) {
+		vbg_err("vboxguest: Error ioremap failed; MMIO addr=%pap size=%pap\n",
+			&mmio, &mmio_len);
+		goto err_disable_pcidev;
+	}
+
+	/* Validate MMIO region version and size. */
+	if (vmmdev->version != VMMDEV_MEMORY_VERSION ||
+	    vmmdev->size < 32 || vmmdev->size > mmio_len) {
+		vbg_err("vboxguest: Bogus VMMDev memory; version=%08x (expected %08x) size=%d (expected <= %d)\n",
+			vmmdev->version, VMMDEV_MEMORY_VERSION,
+			vmmdev->size, (int)mmio_len);
+		goto err_disable_pcidev;
+	}
+
+	gdev->io_port = io;
+	gdev->mmio = vmmdev;
+	gdev->dev = dev;
+	gdev->misc_device.minor = MISC_DYNAMIC_MINOR;
+	gdev->misc_device.name = DEVICE_NAME;
+	gdev->misc_device.fops = &vbg_misc_device_fops;
+	gdev->misc_device_user.minor = MISC_DYNAMIC_MINOR;
+	gdev->misc_device_user.name = DEVICE_NAME_USER;
+	gdev->misc_device_user.fops = &vbg_misc_device_user_fops;
+
+	ret = vbg_core_init(gdev, VMMDEV_EVENT_MOUSE_POSITION_CHANGED);
+	if (ret)
+		goto err_disable_pcidev;
+
+	ret = vbg_create_input_device(gdev);
+	if (ret) {
+		vbg_err("vboxguest: Error creating input device: %d\n", ret);
+		goto err_vbg_core_exit;
+	}
+
+	ret = devm_request_irq(dev, pci->irq, vbg_core_isr, IRQF_SHARED,
+			       DEVICE_NAME, gdev);
+	if (ret) {
+		vbg_err("vboxguest: Error requesting irq: %d\n", ret);
+		goto err_vbg_core_exit;
+	}
+
+	ret = misc_register(&gdev->misc_device);
+	if (ret) {
+		vbg_err("vboxguest: Error misc_register %s failed: %d\n",
+			DEVICE_NAME, ret);
+		goto err_vbg_core_exit;
+	}
+
+	ret = misc_register(&gdev->misc_device_user);
+	if (ret) {
+		vbg_err("vboxguest: Error misc_register %s failed: %d\n",
+			DEVICE_NAME_USER, ret);
+		goto err_unregister_misc_device;
+	}
+
+	mutex_lock(&vbg_gdev_mutex);
+	if (!vbg_gdev)
+		vbg_gdev = gdev;
+	else
+		ret = -EBUSY;
+	mutex_unlock(&vbg_gdev_mutex);
+
+	if (ret) {
+		vbg_err("vboxguest: Error more then 1 vbox guest pci device\n");
+		goto err_unregister_misc_device_user;
+	}
+
+	pci_set_drvdata(pci, gdev);
+	device_create_file(dev, &dev_attr_host_version);
+	device_create_file(dev, &dev_attr_host_features);
+
+	vbg_info("vboxguest: misc device minor %d, IRQ %d, I/O port %x, MMIO at %pap (size %pap)\n",
+		 gdev->misc_device.minor, pci->irq, gdev->io_port,
+		 &mmio, &mmio_len);
+
+	return 0;
+
+err_unregister_misc_device_user:
+	misc_deregister(&gdev->misc_device_user);
+err_unregister_misc_device:
+	misc_deregister(&gdev->misc_device);
+err_vbg_core_exit:
+	vbg_core_exit(gdev);
+err_disable_pcidev:
+	pci_disable_device(pci);
+
+	return ret;
+}
+
+static void vbg_pci_remove(struct pci_dev *pci)
+{
+	struct vbg_dev *gdev = pci_get_drvdata(pci);
+
+	mutex_lock(&vbg_gdev_mutex);
+	vbg_gdev = NULL;
+	mutex_unlock(&vbg_gdev_mutex);
+
+	device_remove_file(gdev->dev, &dev_attr_host_features);
+	device_remove_file(gdev->dev, &dev_attr_host_version);
+	misc_deregister(&gdev->misc_device_user);
+	misc_deregister(&gdev->misc_device);
+	vbg_core_exit(gdev);
+	pci_disable_device(pci);
+}
+
+struct vbg_dev *vbg_get_gdev(void)
+{
+	mutex_lock(&vbg_gdev_mutex);
+
+	/*
+	 * Note on success we keep the mutex locked until vbg_put_gdev(),
+	 * this stops vbg_pci_remove from removing the device from underneath
+	 * vboxsf. vboxsf will only hold a reference for a short while.
+	 */
+	if (vbg_gdev)
+		return vbg_gdev;
+
+	mutex_unlock(&vbg_gdev_mutex);
+	return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL(vbg_get_gdev);
+
+void vbg_put_gdev(struct vbg_dev *gdev)
+{
+	WARN_ON(gdev != vbg_gdev);
+	mutex_unlock(&vbg_gdev_mutex);
+}
+EXPORT_SYMBOL(vbg_put_gdev);
+
+/**
+ * Callback for mouse events.
+ *
+ * This is called at the end of the ISR, after leaving the event spinlock, if
+ * VMMDEV_EVENT_MOUSE_POSITION_CHANGED was raised by the host.
+ *
+ * @gdev:		The device extension.
+ */
+void vbg_linux_mouse_event(struct vbg_dev *gdev)
+{
+	int rc;
+
+	/* Report events to the kernel input device */
+	gdev->mouse_status_req->mouse_features = 0;
+	gdev->mouse_status_req->pointer_pos_x = 0;
+	gdev->mouse_status_req->pointer_pos_y = 0;
+	rc = vbg_req_perform(gdev, gdev->mouse_status_req);
+	if (rc >= 0) {
+		input_report_abs(gdev->input, ABS_X,
+				 gdev->mouse_status_req->pointer_pos_x);
+		input_report_abs(gdev->input, ABS_Y,
+				 gdev->mouse_status_req->pointer_pos_y);
+		input_sync(gdev->input);
+	}
+}
+
+static const struct pci_device_id vbg_pci_ids[] = {
+	{ .vendor = VBOX_VENDORID, .device = VMMDEV_DEVICEID },
+	{}
+};
+MODULE_DEVICE_TABLE(pci,  vbg_pci_ids);
+
+static struct pci_driver vbg_pci_driver = {
+	.name		= DEVICE_NAME,
+	.id_table	= vbg_pci_ids,
+	.probe		= vbg_pci_probe,
+	.remove		= vbg_pci_remove,
+};
+
+module_pci_driver(vbg_pci_driver);
+
+MODULE_AUTHOR("Oracle Corporation");
+MODULE_DESCRIPTION("Oracle VM VirtualBox Guest Additions for Linux Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
new file mode 100644
index 0000000..8daea691
--- /dev/null
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -0,0 +1,801 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
+/*
+ * vboxguest vmm-req and hgcm-call code, VBoxGuestR0LibHGCMInternal.cpp,
+ * VBoxGuestR0LibGenericRequest.cpp and RTErrConvertToErrno.cpp in vbox svn.
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/vbox_err.h>
+#include <linux/vbox_utils.h>
+#include "vboxguest_core.h"
+
+/* Get the pointer to the first parameter of a HGCM call request. */
+#define VMMDEV_HGCM_CALL_PARMS(a) \
+	((struct vmmdev_hgcm_function_parameter *)( \
+		(u8 *)(a) + sizeof(struct vmmdev_hgcm_call)))
+
+/* The max parameter buffer size for a user request. */
+#define VBG_MAX_HGCM_USER_PARM		(24 * SZ_1M)
+/* The max parameter buffer size for a kernel request. */
+#define VBG_MAX_HGCM_KERNEL_PARM	(16 * SZ_1M)
+
+#define VBG_DEBUG_PORT			0x504
+
+/* This protects vbg_log_buf and serializes VBG_DEBUG_PORT accesses */
+static DEFINE_SPINLOCK(vbg_log_lock);
+static char vbg_log_buf[128];
+
+#define VBG_LOG(name, pr_func) \
+void name(const char *fmt, ...)						\
+{									\
+	unsigned long flags;						\
+	va_list args;							\
+	int i, count;							\
+									\
+	va_start(args, fmt);						\
+	spin_lock_irqsave(&vbg_log_lock, flags);			\
+									\
+	count = vscnprintf(vbg_log_buf, sizeof(vbg_log_buf), fmt, args);\
+	for (i = 0; i < count; i++)					\
+		outb(vbg_log_buf[i], VBG_DEBUG_PORT);			\
+									\
+	pr_func("%s", vbg_log_buf);					\
+									\
+	spin_unlock_irqrestore(&vbg_log_lock, flags);			\
+	va_end(args);							\
+}									\
+EXPORT_SYMBOL(name)
+
+VBG_LOG(vbg_info, pr_info);
+VBG_LOG(vbg_warn, pr_warn);
+VBG_LOG(vbg_err, pr_err);
+#if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
+VBG_LOG(vbg_debug, pr_debug);
+#endif
+
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
+{
+	struct vmmdev_request_header *req;
+
+	req = kmalloc(len, GFP_KERNEL | __GFP_DMA32);
+	if (!req)
+		return NULL;
+
+	memset(req, 0xaa, len);
+
+	req->size = len;
+	req->version = VMMDEV_REQUEST_HEADER_VERSION;
+	req->request_type = req_type;
+	req->rc = VERR_GENERAL_FAILURE;
+	req->reserved1 = 0;
+	req->reserved2 = 0;
+
+	return req;
+}
+
+/* Note this function returns a VBox status code, not a negative errno!! */
+int vbg_req_perform(struct vbg_dev *gdev, void *req)
+{
+	unsigned long phys_req = virt_to_phys(req);
+
+	outl(phys_req, gdev->io_port + VMMDEV_PORT_OFF_REQUEST);
+	/*
+	 * The host changes the request as a result of the outl, make sure
+	 * the outl and any reads of the req happen in the correct order.
+	 */
+	mb();
+
+	return ((struct vmmdev_request_header *)req)->rc;
+}
+
+static bool hgcm_req_done(struct vbg_dev *gdev,
+			  struct vmmdev_hgcmreq_header *header)
+{
+	unsigned long flags;
+	bool done;
+
+	spin_lock_irqsave(&gdev->event_spinlock, flags);
+	done = header->flags & VMMDEV_HGCM_REQ_DONE;
+	spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+	return done;
+}
+
+int vbg_hgcm_connect(struct vbg_dev *gdev,
+		     struct vmmdev_hgcm_service_location *loc,
+		     u32 *client_id, int *vbox_status)
+{
+	struct vmmdev_hgcm_connect *hgcm_connect = NULL;
+	int rc;
+
+	hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
+				     VMMDEVREQ_HGCM_CONNECT);
+	if (!hgcm_connect)
+		return -ENOMEM;
+
+	hgcm_connect->header.flags = 0;
+	memcpy(&hgcm_connect->loc, loc, sizeof(*loc));
+	hgcm_connect->client_id = 0;
+
+	rc = vbg_req_perform(gdev, hgcm_connect);
+
+	if (rc == VINF_HGCM_ASYNC_EXECUTE)
+		wait_event(gdev->hgcm_wq,
+			   hgcm_req_done(gdev, &hgcm_connect->header));
+
+	if (rc >= 0) {
+		*client_id = hgcm_connect->client_id;
+		rc = hgcm_connect->header.result;
+	}
+
+	kfree(hgcm_connect);
+
+	*vbox_status = rc;
+	return 0;
+}
+EXPORT_SYMBOL(vbg_hgcm_connect);
+
+int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
+{
+	struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
+	int rc;
+
+	hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
+					VMMDEVREQ_HGCM_DISCONNECT);
+	if (!hgcm_disconnect)
+		return -ENOMEM;
+
+	hgcm_disconnect->header.flags = 0;
+	hgcm_disconnect->client_id = client_id;
+
+	rc = vbg_req_perform(gdev, hgcm_disconnect);
+
+	if (rc == VINF_HGCM_ASYNC_EXECUTE)
+		wait_event(gdev->hgcm_wq,
+			   hgcm_req_done(gdev, &hgcm_disconnect->header));
+
+	if (rc >= 0)
+		rc = hgcm_disconnect->header.result;
+
+	kfree(hgcm_disconnect);
+
+	*vbox_status = rc;
+	return 0;
+}
+EXPORT_SYMBOL(vbg_hgcm_disconnect);
+
+static u32 hgcm_call_buf_size_in_pages(void *buf, u32 len)
+{
+	u32 size = PAGE_ALIGN(len + ((unsigned long)buf & ~PAGE_MASK));
+
+	return size >> PAGE_SHIFT;
+}
+
+static void hgcm_call_add_pagelist_size(void *buf, u32 len, size_t *extra)
+{
+	u32 page_count;
+
+	page_count = hgcm_call_buf_size_in_pages(buf, len);
+	*extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
+}
+
+static int hgcm_call_preprocess_linaddr(
+	const struct vmmdev_hgcm_function_parameter *src_parm,
+	void **bounce_buf_ret, size_t *extra)
+{
+	void *buf, *bounce_buf;
+	bool copy_in;
+	u32 len;
+	int ret;
+
+	buf = (void *)src_parm->u.pointer.u.linear_addr;
+	len = src_parm->u.pointer.size;
+	copy_in = src_parm->type != VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT;
+
+	if (len > VBG_MAX_HGCM_USER_PARM)
+		return -E2BIG;
+
+	bounce_buf = kvmalloc(len, GFP_KERNEL);
+	if (!bounce_buf)
+		return -ENOMEM;
+
+	if (copy_in) {
+		ret = copy_from_user(bounce_buf, (void __user *)buf, len);
+		if (ret)
+			return -EFAULT;
+	} else {
+		memset(bounce_buf, 0, len);
+	}
+
+	*bounce_buf_ret = bounce_buf;
+	hgcm_call_add_pagelist_size(bounce_buf, len, extra);
+	return 0;
+}
+
+/**
+ * Preprocesses the HGCM call, validate parameters, alloc bounce buffers and
+ * figure out how much extra storage we need for page lists.
+ * Return: 0 or negative errno value.
+ * @src_parm:         Pointer to source function call parameters
+ * @parm_count:       Number of function call parameters.
+ * @bounce_bufs_ret:  Where to return the allocated bouncebuffer array
+ * @extra:            Where to return the extra request space needed for
+ *                    physical page lists.
+ */
+static int hgcm_call_preprocess(
+	const struct vmmdev_hgcm_function_parameter *src_parm,
+	u32 parm_count, void ***bounce_bufs_ret, size_t *extra)
+{
+	void *buf, **bounce_bufs = NULL;
+	u32 i, len;
+	int ret;
+
+	for (i = 0; i < parm_count; i++, src_parm++) {
+		switch (src_parm->type) {
+		case VMMDEV_HGCM_PARM_TYPE_32BIT:
+		case VMMDEV_HGCM_PARM_TYPE_64BIT:
+			break;
+
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR:
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
+			if (!bounce_bufs) {
+				bounce_bufs = kcalloc(parm_count,
+						      sizeof(void *),
+						      GFP_KERNEL);
+				if (!bounce_bufs)
+					return -ENOMEM;
+
+				*bounce_bufs_ret = bounce_bufs;
+			}
+
+			ret = hgcm_call_preprocess_linaddr(src_parm,
+							   &bounce_bufs[i],
+							   extra);
+			if (ret)
+				return ret;
+
+			break;
+
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
+			buf = (void *)src_parm->u.pointer.u.linear_addr;
+			len = src_parm->u.pointer.size;
+			if (WARN_ON(len > VBG_MAX_HGCM_KERNEL_PARM))
+				return -E2BIG;
+
+			hgcm_call_add_pagelist_size(buf, len, extra);
+			break;
+
+		default:
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * Translates linear address types to page list direction flags.
+ *
+ * Return: page list flags.
+ * @type:  The type.
+ */
+static u32 hgcm_call_linear_addr_type_to_pagelist_flags(
+	enum vmmdev_hgcm_function_parameter_type type)
+{
+	switch (type) {
+	default:
+		WARN_ON(1);
+		/* Fall through */
+	case VMMDEV_HGCM_PARM_TYPE_LINADDR:
+	case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
+		return VMMDEV_HGCM_F_PARM_DIRECTION_BOTH;
+
+	case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
+	case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
+		return VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST;
+
+	case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
+	case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
+		return VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST;
+	}
+}
+
+static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call,
+	struct vmmdev_hgcm_function_parameter *dst_parm, void *buf, u32 len,
+	enum vmmdev_hgcm_function_parameter_type type, u32 *off_extra)
+{
+	struct vmmdev_hgcm_pagelist *dst_pg_lst;
+	struct page *page;
+	bool is_vmalloc;
+	u32 i, page_count;
+
+	dst_parm->type = type;
+
+	if (len == 0) {
+		dst_parm->u.pointer.size = 0;
+		dst_parm->u.pointer.u.linear_addr = 0;
+		return;
+	}
+
+	dst_pg_lst = (void *)call + *off_extra;
+	page_count = hgcm_call_buf_size_in_pages(buf, len);
+	is_vmalloc = is_vmalloc_addr(buf);
+
+	dst_parm->type = VMMDEV_HGCM_PARM_TYPE_PAGELIST;
+	dst_parm->u.page_list.size = len;
+	dst_parm->u.page_list.offset = *off_extra;
+	dst_pg_lst->flags = hgcm_call_linear_addr_type_to_pagelist_flags(type);
+	dst_pg_lst->offset_first_page = (unsigned long)buf & ~PAGE_MASK;
+	dst_pg_lst->page_count = page_count;
+
+	for (i = 0; i < page_count; i++) {
+		if (is_vmalloc)
+			page = vmalloc_to_page(buf);
+		else
+			page = virt_to_page(buf);
+
+		dst_pg_lst->pages[i] = page_to_phys(page);
+		buf += PAGE_SIZE;
+	}
+
+	*off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
+}
+
+/**
+ * Initializes the call request that we're sending to the host.
+ * @call:            The call to initialize.
+ * @client_id:       The client ID of the caller.
+ * @function:        The function number of the function to call.
+ * @src_parm:        Pointer to source function call parameters.
+ * @parm_count:      Number of function call parameters.
+ * @bounce_bufs:     The bouncebuffer array.
+ */
+static void hgcm_call_init_call(
+	struct vmmdev_hgcm_call *call, u32 client_id, u32 function,
+	const struct vmmdev_hgcm_function_parameter *src_parm,
+	u32 parm_count, void **bounce_bufs)
+{
+	struct vmmdev_hgcm_function_parameter *dst_parm =
+		VMMDEV_HGCM_CALL_PARMS(call);
+	u32 i, off_extra = (uintptr_t)(dst_parm + parm_count) - (uintptr_t)call;
+	void *buf;
+
+	call->header.flags = 0;
+	call->header.result = VINF_SUCCESS;
+	call->client_id = client_id;
+	call->function = function;
+	call->parm_count = parm_count;
+
+	for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
+		switch (src_parm->type) {
+		case VMMDEV_HGCM_PARM_TYPE_32BIT:
+		case VMMDEV_HGCM_PARM_TYPE_64BIT:
+			*dst_parm = *src_parm;
+			break;
+
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR:
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
+			hgcm_call_init_linaddr(call, dst_parm, bounce_bufs[i],
+					       src_parm->u.pointer.size,
+					       src_parm->type, &off_extra);
+			break;
+
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
+			buf = (void *)src_parm->u.pointer.u.linear_addr;
+			hgcm_call_init_linaddr(call, dst_parm, buf,
+					       src_parm->u.pointer.size,
+					       src_parm->type, &off_extra);
+			break;
+
+		default:
+			WARN_ON(1);
+			dst_parm->type = VMMDEV_HGCM_PARM_TYPE_INVALID;
+		}
+	}
+}
+
+/**
+ * Tries to cancel a pending HGCM call.
+ *
+ * Return: VBox status code
+ */
+static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
+{
+	int rc;
+
+	/*
+	 * We use a pre-allocated request for cancellations, which is
+	 * protected by cancel_req_mutex. This means that all cancellations
+	 * get serialized, this should be fine since they should be rare.
+	 */
+	mutex_lock(&gdev->cancel_req_mutex);
+	gdev->cancel_req->phys_req_to_cancel = virt_to_phys(call);
+	rc = vbg_req_perform(gdev, gdev->cancel_req);
+	mutex_unlock(&gdev->cancel_req_mutex);
+
+	if (rc == VERR_NOT_IMPLEMENTED) {
+		call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
+		call->header.header.request_type = VMMDEVREQ_HGCM_CANCEL;
+
+		rc = vbg_req_perform(gdev, call);
+		if (rc == VERR_INVALID_PARAMETER)
+			rc = VERR_NOT_FOUND;
+	}
+
+	if (rc >= 0)
+		call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
+
+	return rc;
+}
+
+/**
+ * Performs the call and completion wait.
+ * Return: 0 or negative errno value.
+ * @gdev:        The VBoxGuest device extension.
+ * @call:        The call to execute.
+ * @timeout_ms:  Timeout in ms.
+ * @leak_it:     Where to return the leak it / free it, indicator.
+ *               Cancellation fun.
+ */
+static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
+			    u32 timeout_ms, bool *leak_it)
+{
+	int rc, cancel_rc, ret;
+	long timeout;
+
+	*leak_it = false;
+
+	rc = vbg_req_perform(gdev, call);
+
+	/*
+	 * If the call failed, then pretend success. Upper layers will
+	 * interpret the result code in the packet.
+	 */
+	if (rc < 0) {
+		call->header.result = rc;
+		return 0;
+	}
+
+	if (rc != VINF_HGCM_ASYNC_EXECUTE)
+		return 0;
+
+	/* Host decided to process the request asynchronously, wait for it */
+	if (timeout_ms == U32_MAX)
+		timeout = MAX_SCHEDULE_TIMEOUT;
+	else
+		timeout = msecs_to_jiffies(timeout_ms);
+
+	timeout = wait_event_interruptible_timeout(
+					gdev->hgcm_wq,
+					hgcm_req_done(gdev, &call->header),
+					timeout);
+
+	/* timeout > 0 means hgcm_req_done has returned true, so success */
+	if (timeout > 0)
+		return 0;
+
+	if (timeout == 0)
+		ret = -ETIMEDOUT;
+	else
+		ret = -EINTR;
+
+	/* Cancel the request */
+	cancel_rc = hgcm_cancel_call(gdev, call);
+	if (cancel_rc >= 0)
+		return ret;
+
+	/*
+	 * Failed to cancel, this should mean that the cancel has lost the
+	 * race with normal completion, wait while the host completes it.
+	 */
+	if (cancel_rc == VERR_NOT_FOUND || cancel_rc == VERR_SEM_DESTROYED)
+		timeout = msecs_to_jiffies(500);
+	else
+		timeout = msecs_to_jiffies(2000);
+
+	timeout = wait_event_timeout(gdev->hgcm_wq,
+				     hgcm_req_done(gdev, &call->header),
+				     timeout);
+
+	if (WARN_ON(timeout == 0)) {
+		/* We really should never get here */
+		vbg_err("%s: Call timedout and cancellation failed, leaking the request\n",
+			__func__);
+		*leak_it = true;
+		return ret;
+	}
+
+	/* The call has completed normally after all */
+	return 0;
+}
+
+/**
+ * Copies the result of the call back to the caller info structure and user
+ * buffers.
+ * Return: 0 or negative errno value.
+ * @call:            HGCM call request.
+ * @dst_parm:        Pointer to function call parameters destination.
+ * @parm_count:      Number of function call parameters.
+ * @bounce_bufs:     The bouncebuffer array.
+ */
+static int hgcm_call_copy_back_result(
+	const struct vmmdev_hgcm_call *call,
+	struct vmmdev_hgcm_function_parameter *dst_parm,
+	u32 parm_count, void **bounce_bufs)
+{
+	const struct vmmdev_hgcm_function_parameter *src_parm =
+		VMMDEV_HGCM_CALL_PARMS(call);
+	void __user *p;
+	int ret;
+	u32 i;
+
+	/* Copy back parameters. */
+	for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
+		switch (dst_parm->type) {
+		case VMMDEV_HGCM_PARM_TYPE_32BIT:
+		case VMMDEV_HGCM_PARM_TYPE_64BIT:
+			*dst_parm = *src_parm;
+			break;
+
+		case VMMDEV_HGCM_PARM_TYPE_PAGELIST:
+			dst_parm->u.page_list.size = src_parm->u.page_list.size;
+			break;
+
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
+			dst_parm->u.pointer.size = src_parm->u.pointer.size;
+			break;
+
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR:
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
+			dst_parm->u.pointer.size = src_parm->u.pointer.size;
+
+			p = (void __user *)dst_parm->u.pointer.u.linear_addr;
+			ret = copy_to_user(p, bounce_bufs[i],
+					   min(src_parm->u.pointer.size,
+					       dst_parm->u.pointer.size));
+			if (ret)
+				return -EFAULT;
+			break;
+
+		default:
+			WARN_ON(1);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
+		  u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
+		  u32 parm_count, int *vbox_status)
+{
+	struct vmmdev_hgcm_call *call;
+	void **bounce_bufs = NULL;
+	bool leak_it;
+	size_t size;
+	int i, ret;
+
+	size = sizeof(struct vmmdev_hgcm_call) +
+		   parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
+	/*
+	 * Validate and buffer the parameters for the call. This also increases
+	 * call_size with the amount of extra space needed for page lists.
+	 */
+	ret = hgcm_call_preprocess(parms, parm_count, &bounce_bufs, &size);
+	if (ret) {
+		/* Even on error bounce bufs may still have been allocated */
+		goto free_bounce_bufs;
+	}
+
+	call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL);
+	if (!call) {
+		ret = -ENOMEM;
+		goto free_bounce_bufs;
+	}
+
+	hgcm_call_init_call(call, client_id, function, parms, parm_count,
+			    bounce_bufs);
+
+	ret = vbg_hgcm_do_call(gdev, call, timeout_ms, &leak_it);
+	if (ret == 0) {
+		*vbox_status = call->header.result;
+		ret = hgcm_call_copy_back_result(call, parms, parm_count,
+						 bounce_bufs);
+	}
+
+	if (!leak_it)
+		kfree(call);
+
+free_bounce_bufs:
+	if (bounce_bufs) {
+		for (i = 0; i < parm_count; i++)
+			kvfree(bounce_bufs[i]);
+		kfree(bounce_bufs);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(vbg_hgcm_call);
+
+#ifdef CONFIG_COMPAT
+int vbg_hgcm_call32(
+	struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
+	struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
+	int *vbox_status)
+{
+	struct vmmdev_hgcm_function_parameter *parm64 = NULL;
+	u32 i, size;
+	int ret = 0;
+
+	/* KISS allocate a temporary request and convert the parameters. */
+	size = parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
+	parm64 = kzalloc(size, GFP_KERNEL);
+	if (!parm64)
+		return -ENOMEM;
+
+	for (i = 0; i < parm_count; i++) {
+		switch (parm32[i].type) {
+		case VMMDEV_HGCM_PARM_TYPE_32BIT:
+			parm64[i].type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+			parm64[i].u.value32 = parm32[i].u.value32;
+			break;
+
+		case VMMDEV_HGCM_PARM_TYPE_64BIT:
+			parm64[i].type = VMMDEV_HGCM_PARM_TYPE_64BIT;
+			parm64[i].u.value64 = parm32[i].u.value64;
+			break;
+
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR:
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
+			parm64[i].type = parm32[i].type;
+			parm64[i].u.pointer.size = parm32[i].u.pointer.size;
+			parm64[i].u.pointer.u.linear_addr =
+			    parm32[i].u.pointer.u.linear_addr;
+			break;
+
+		default:
+			ret = -EINVAL;
+		}
+		if (ret < 0)
+			goto out_free;
+	}
+
+	ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms,
+			    parm64, parm_count, vbox_status);
+	if (ret < 0)
+		goto out_free;
+
+	/* Copy back. */
+	for (i = 0; i < parm_count; i++, parm32++, parm64++) {
+		switch (parm64[i].type) {
+		case VMMDEV_HGCM_PARM_TYPE_32BIT:
+			parm32[i].u.value32 = parm64[i].u.value32;
+			break;
+
+		case VMMDEV_HGCM_PARM_TYPE_64BIT:
+			parm32[i].u.value64 = parm64[i].u.value64;
+			break;
+
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR:
+		case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
+			parm32[i].u.pointer.size = parm64[i].u.pointer.size;
+			break;
+
+		default:
+			WARN_ON(1);
+			ret = -EINVAL;
+		}
+	}
+
+out_free:
+	kfree(parm64);
+	return ret;
+}
+#endif
+
+static const int vbg_status_code_to_errno_table[] = {
+	[-VERR_ACCESS_DENIED]                            = -EPERM,
+	[-VERR_FILE_NOT_FOUND]                           = -ENOENT,
+	[-VERR_PROCESS_NOT_FOUND]                        = -ESRCH,
+	[-VERR_INTERRUPTED]                              = -EINTR,
+	[-VERR_DEV_IO_ERROR]                             = -EIO,
+	[-VERR_TOO_MUCH_DATA]                            = -E2BIG,
+	[-VERR_BAD_EXE_FORMAT]                           = -ENOEXEC,
+	[-VERR_INVALID_HANDLE]                           = -EBADF,
+	[-VERR_TRY_AGAIN]                                = -EAGAIN,
+	[-VERR_NO_MEMORY]                                = -ENOMEM,
+	[-VERR_INVALID_POINTER]                          = -EFAULT,
+	[-VERR_RESOURCE_BUSY]                            = -EBUSY,
+	[-VERR_ALREADY_EXISTS]                           = -EEXIST,
+	[-VERR_NOT_SAME_DEVICE]                          = -EXDEV,
+	[-VERR_NOT_A_DIRECTORY]                          = -ENOTDIR,
+	[-VERR_PATH_NOT_FOUND]                           = -ENOTDIR,
+	[-VERR_IS_A_DIRECTORY]                           = -EISDIR,
+	[-VERR_INVALID_PARAMETER]                        = -EINVAL,
+	[-VERR_TOO_MANY_OPEN_FILES]                      = -ENFILE,
+	[-VERR_INVALID_FUNCTION]                         = -ENOTTY,
+	[-VERR_SHARING_VIOLATION]                        = -ETXTBSY,
+	[-VERR_FILE_TOO_BIG]                             = -EFBIG,
+	[-VERR_DISK_FULL]                                = -ENOSPC,
+	[-VERR_SEEK_ON_DEVICE]                           = -ESPIPE,
+	[-VERR_WRITE_PROTECT]                            = -EROFS,
+	[-VERR_BROKEN_PIPE]                              = -EPIPE,
+	[-VERR_DEADLOCK]                                 = -EDEADLK,
+	[-VERR_FILENAME_TOO_LONG]                        = -ENAMETOOLONG,
+	[-VERR_FILE_LOCK_FAILED]                         = -ENOLCK,
+	[-VERR_NOT_IMPLEMENTED]                          = -ENOSYS,
+	[-VERR_NOT_SUPPORTED]                            = -ENOSYS,
+	[-VERR_DIR_NOT_EMPTY]                            = -ENOTEMPTY,
+	[-VERR_TOO_MANY_SYMLINKS]                        = -ELOOP,
+	[-VERR_NO_DATA]                                  = -ENODATA,
+	[-VERR_NET_NO_NETWORK]                           = -ENONET,
+	[-VERR_NET_NOT_UNIQUE_NAME]                      = -ENOTUNIQ,
+	[-VERR_NO_TRANSLATION]                           = -EILSEQ,
+	[-VERR_NET_NOT_SOCKET]                           = -ENOTSOCK,
+	[-VERR_NET_DEST_ADDRESS_REQUIRED]                = -EDESTADDRREQ,
+	[-VERR_NET_MSG_SIZE]                             = -EMSGSIZE,
+	[-VERR_NET_PROTOCOL_TYPE]                        = -EPROTOTYPE,
+	[-VERR_NET_PROTOCOL_NOT_AVAILABLE]               = -ENOPROTOOPT,
+	[-VERR_NET_PROTOCOL_NOT_SUPPORTED]               = -EPROTONOSUPPORT,
+	[-VERR_NET_SOCKET_TYPE_NOT_SUPPORTED]            = -ESOCKTNOSUPPORT,
+	[-VERR_NET_OPERATION_NOT_SUPPORTED]              = -EOPNOTSUPP,
+	[-VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED]        = -EPFNOSUPPORT,
+	[-VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED]         = -EAFNOSUPPORT,
+	[-VERR_NET_ADDRESS_IN_USE]                       = -EADDRINUSE,
+	[-VERR_NET_ADDRESS_NOT_AVAILABLE]                = -EADDRNOTAVAIL,
+	[-VERR_NET_DOWN]                                 = -ENETDOWN,
+	[-VERR_NET_UNREACHABLE]                          = -ENETUNREACH,
+	[-VERR_NET_CONNECTION_RESET]                     = -ENETRESET,
+	[-VERR_NET_CONNECTION_ABORTED]                   = -ECONNABORTED,
+	[-VERR_NET_CONNECTION_RESET_BY_PEER]             = -ECONNRESET,
+	[-VERR_NET_NO_BUFFER_SPACE]                      = -ENOBUFS,
+	[-VERR_NET_ALREADY_CONNECTED]                    = -EISCONN,
+	[-VERR_NET_NOT_CONNECTED]                        = -ENOTCONN,
+	[-VERR_NET_SHUTDOWN]                             = -ESHUTDOWN,
+	[-VERR_NET_TOO_MANY_REFERENCES]                  = -ETOOMANYREFS,
+	[-VERR_TIMEOUT]                                  = -ETIMEDOUT,
+	[-VERR_NET_CONNECTION_REFUSED]                   = -ECONNREFUSED,
+	[-VERR_NET_HOST_DOWN]                            = -EHOSTDOWN,
+	[-VERR_NET_HOST_UNREACHABLE]                     = -EHOSTUNREACH,
+	[-VERR_NET_ALREADY_IN_PROGRESS]                  = -EALREADY,
+	[-VERR_NET_IN_PROGRESS]                          = -EINPROGRESS,
+	[-VERR_MEDIA_NOT_PRESENT]                        = -ENOMEDIUM,
+	[-VERR_MEDIA_NOT_RECOGNIZED]                     = -EMEDIUMTYPE,
+};
+
+int vbg_status_code_to_errno(int rc)
+{
+	if (rc >= 0)
+		return 0;
+
+	rc = -rc;
+	if (rc >= ARRAY_SIZE(vbg_status_code_to_errno_table) ||
+	    vbg_status_code_to_errno_table[rc] == 0) {
+		vbg_warn("%s: Unhandled err %d\n", __func__, -rc);
+		return -EPROTO;
+	}
+
+	return vbg_status_code_to_errno_table[rc];
+}
+EXPORT_SYMBOL(vbg_status_code_to_errno);
diff --git a/drivers/virt/vboxguest/vboxguest_version.h b/drivers/virt/vboxguest/vboxguest_version.h
new file mode 100644
index 0000000..77f0c8f
--- /dev/null
+++ b/drivers/virt/vboxguest/vboxguest_version.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
+/*
+ * VBox Guest additions version info, this is used by the host to determine
+ * supported guest-addition features in some cases. So this will need to be
+ * synced with vbox upstreams versioning scheme when we implement / port
+ * new features from the upstream out-of-tree vboxguest driver.
+ */
+
+#ifndef __VBOX_VERSION_H__
+#define __VBOX_VERSION_H__
+
+/* Last synced October 4th 2017 */
+#define VBG_VERSION_MAJOR 5
+#define VBG_VERSION_MINOR 2
+#define VBG_VERSION_BUILD 0
+#define VBG_SVN_REV 68940
+#define VBG_VERSION_STRING "5.2.0"
+
+#endif
diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h
new file mode 100644
index 0000000..5e2ae97
--- /dev/null
+++ b/drivers/virt/vboxguest/vmmdev.h
@@ -0,0 +1,449 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
+/*
+ * Virtual Device for Guest <-> VMM/Host communication interface
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ */
+
+#ifndef __VBOX_VMMDEV_H__
+#define __VBOX_VMMDEV_H__
+
+#include <asm/bitsperlong.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+#include <linux/vbox_vmmdev_types.h>
+
+/* Port for generic request interface (relative offset). */
+#define VMMDEV_PORT_OFF_REQUEST                             0
+
+/** Layout of VMMDEV RAM region that contains information for guest. */
+struct vmmdev_memory {
+	/** The size of this structure. */
+	u32 size;
+	/** The structure version. (VMMDEV_MEMORY_VERSION) */
+	u32 version;
+
+	union {
+		struct {
+			/** Flag telling that VMMDev has events pending. */
+			u8 have_events;
+			/** Explicit padding, MBZ. */
+			u8 padding[3];
+		} V1_04;
+
+		struct {
+			/** Pending events flags, set by host. */
+			u32 host_events;
+			/** Mask of events the guest wants, set by guest. */
+			u32 guest_event_mask;
+		} V1_03;
+	} V;
+
+	/* struct vbva_memory, not used */
+};
+VMMDEV_ASSERT_SIZE(vmmdev_memory, 8 + 8);
+
+/** Version of vmmdev_memory structure (vmmdev_memory::version). */
+#define VMMDEV_MEMORY_VERSION   (1)
+
+/* Host mouse capabilities has been changed. */
+#define VMMDEV_EVENT_MOUSE_CAPABILITIES_CHANGED             BIT(0)
+/* HGCM event. */
+#define VMMDEV_EVENT_HGCM                                   BIT(1)
+/* A display change request has been issued. */
+#define VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST                 BIT(2)
+/* Credentials are available for judgement. */
+#define VMMDEV_EVENT_JUDGE_CREDENTIALS                      BIT(3)
+/* The guest has been restored. */
+#define VMMDEV_EVENT_RESTORED                               BIT(4)
+/* Seamless mode state changed. */
+#define VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST           BIT(5)
+/* Memory balloon size changed. */
+#define VMMDEV_EVENT_BALLOON_CHANGE_REQUEST                 BIT(6)
+/* Statistics interval changed. */
+#define VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST     BIT(7)
+/* VRDP status changed. */
+#define VMMDEV_EVENT_VRDP                                   BIT(8)
+/* New mouse position data available. */
+#define VMMDEV_EVENT_MOUSE_POSITION_CHANGED                 BIT(9)
+/* CPU hotplug event occurred. */
+#define VMMDEV_EVENT_CPU_HOTPLUG                            BIT(10)
+/* The mask of valid events, for sanity checking. */
+#define VMMDEV_EVENT_VALID_EVENT_MASK                       0x000007ffU
+
+/*
+ * Additions are allowed to work only if additions_major == vmmdev_current &&
+ * additions_minor <= vmmdev_current. Additions version is reported to host
+ * (VMMDev) by VMMDEVREQ_REPORT_GUEST_INFO.
+ */
+#define VMMDEV_VERSION                      0x00010004
+#define VMMDEV_VERSION_MAJOR                (VMMDEV_VERSION >> 16)
+#define VMMDEV_VERSION_MINOR                (VMMDEV_VERSION & 0xffff)
+
+/* Maximum request packet size. */
+#define VMMDEV_MAX_VMMDEVREQ_SIZE           1048576
+
+/* Version of vmmdev_request_header structure. */
+#define VMMDEV_REQUEST_HEADER_VERSION       0x10001
+
+/** struct vmmdev_request_header - Generic VMMDev request header. */
+struct vmmdev_request_header {
+	/** IN: Size of the structure in bytes (including body). */
+	u32 size;
+	/** IN: Version of the structure.  */
+	u32 version;
+	/** IN: Type of the request. */
+	enum vmmdev_request_type request_type;
+	/** OUT: Return code. */
+	s32 rc;
+	/** Reserved field no.1. MBZ. */
+	u32 reserved1;
+	/** Reserved field no.2. MBZ. */
+	u32 reserved2;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24);
+
+/**
+ * struct vmmdev_mouse_status - Mouse status request structure.
+ *
+ * Used by VMMDEVREQ_GET_MOUSE_STATUS and VMMDEVREQ_SET_MOUSE_STATUS.
+ */
+struct vmmdev_mouse_status {
+	/** header */
+	struct vmmdev_request_header header;
+	/** Mouse feature mask. See VMMDEV_MOUSE_*. */
+	u32 mouse_features;
+	/** Mouse x position. */
+	s32 pointer_pos_x;
+	/** Mouse y position. */
+	s32 pointer_pos_y;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_mouse_status, 24 + 12);
+
+/* The guest can (== wants to) handle absolute coordinates.  */
+#define VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE                     BIT(0)
+/*
+ * The host can (== wants to) send absolute coordinates.
+ * (Input not captured.)
+ */
+#define VMMDEV_MOUSE_HOST_WANTS_ABSOLUTE                    BIT(1)
+/*
+ * The guest can *NOT* switch to software cursor and therefore depends on the
+ * host cursor.
+ *
+ * When guest additions are installed and the host has promised to display the
+ * cursor itself, the guest installs a hardware mouse driver. Don't ask the
+ * guest to switch to a software cursor then.
+ */
+#define VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR                BIT(2)
+/* The host does NOT provide support for drawing the cursor itself. */
+#define VMMDEV_MOUSE_HOST_CANNOT_HWPOINTER                  BIT(3)
+/* The guest can read VMMDev events to find out about pointer movement */
+#define VMMDEV_MOUSE_NEW_PROTOCOL                           BIT(4)
+/*
+ * If the guest changes the status of the VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR
+ * bit, the host will honour this.
+ */
+#define VMMDEV_MOUSE_HOST_RECHECKS_NEEDS_HOST_CURSOR        BIT(5)
+/*
+ * The host supplies an absolute pointing device.  The Guest Additions may
+ * wish to use this to decide whether to install their own driver.
+ */
+#define VMMDEV_MOUSE_HOST_HAS_ABS_DEV                       BIT(6)
+
+/* The minimum value our pointing device can return. */
+#define VMMDEV_MOUSE_RANGE_MIN 0
+/* The maximum value our pointing device can return. */
+#define VMMDEV_MOUSE_RANGE_MAX 0xFFFF
+
+/**
+ * struct vmmdev_host_version - VirtualBox host version request structure.
+ *
+ * VBG uses this to detect the precense of new features in the interface.
+ */
+struct vmmdev_host_version {
+	/** Header. */
+	struct vmmdev_request_header header;
+	/** Major version. */
+	u16 major;
+	/** Minor version. */
+	u16 minor;
+	/** Build number. */
+	u32 build;
+	/** SVN revision. */
+	u32 revision;
+	/** Feature mask. */
+	u32 features;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_host_version, 24 + 16);
+
+/* Physical page lists are supported by HGCM. */
+#define VMMDEV_HVF_HGCM_PHYS_PAGE_LIST  BIT(0)
+
+/**
+ * struct vmmdev_mask - Structure to set / clear bits in a mask used for
+ * VMMDEVREQ_SET_GUEST_CAPABILITIES and VMMDEVREQ_CTL_GUEST_FILTER_MASK.
+ */
+struct vmmdev_mask {
+	/** Header. */
+	struct vmmdev_request_header header;
+	/** Mask of bits to be set. */
+	u32 or_mask;
+	/** Mask of bits to be cleared. */
+	u32 not_mask;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_mask, 24 + 8);
+
+/* The guest supports seamless display rendering. */
+#define VMMDEV_GUEST_SUPPORTS_SEAMLESS                      BIT(0)
+/* The guest supports mapping guest to host windows. */
+#define VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING     BIT(1)
+/*
+ * The guest graphical additions are active.
+ * Used for fast activation and deactivation of certain graphical operations
+ * (e.g. resizing & seamless). The legacy VMMDEVREQ_REPORT_GUEST_CAPABILITIES
+ * request sets this automatically, but VMMDEVREQ_SET_GUEST_CAPABILITIES does
+ * not.
+ */
+#define VMMDEV_GUEST_SUPPORTS_GRAPHICS                      BIT(2)
+
+/** struct vmmdev_hypervisorinfo - Hypervisor info structure. */
+struct vmmdev_hypervisorinfo {
+	/** Header. */
+	struct vmmdev_request_header header;
+	/**
+	 * Guest virtual address of proposed hypervisor start.
+	 * Not used by VMMDEVREQ_GET_HYPERVISOR_INFO.
+	 */
+	u32 hypervisor_start;
+	/** Hypervisor size in bytes. */
+	u32 hypervisor_size;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_hypervisorinfo, 24 + 8);
+
+/** struct vmmdev_events - Pending events structure. */
+struct vmmdev_events {
+	/** Header. */
+	struct vmmdev_request_header header;
+	/** OUT: Pending event mask. */
+	u32 events;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_events, 24 + 4);
+
+#define VMMDEV_OSTYPE_LINUX26		0x53000
+#define VMMDEV_OSTYPE_X64		BIT(8)
+
+/** struct vmmdev_guestinfo - Guest information report. */
+struct vmmdev_guest_info {
+	/** Header. */
+	struct vmmdev_request_header header;
+	/**
+	 * The VMMDev interface version expected by additions.
+	 * *Deprecated*, do not use anymore! Will be removed.
+	 */
+	u32 interface_version;
+	/** Guest OS type. */
+	u32 os_type;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8);
+
+/** struct vmmdev_guestinfo2 - Guest information report, version 2. */
+struct vmmdev_guest_info2 {
+	/** Header. */
+	struct vmmdev_request_header header;
+	/** Major version. */
+	u16 additions_major;
+	/** Minor version. */
+	u16 additions_minor;
+	/** Build number. */
+	u32 additions_build;
+	/** SVN revision. */
+	u32 additions_revision;
+	/** Feature mask, currently unused. */
+	u32 additions_features;
+	/**
+	 * The intentional meaning of this field was:
+	 * Some additional information, for example 'Beta 1' or something like
+	 * that.
+	 *
+	 * The way it was implemented was implemented: VBG_VERSION_STRING.
+	 *
+	 * This means the first three members are duplicated in this field (if
+	 * the guest build config is sane). So, the user must check this and
+	 * chop it off before usage. There is, because of the Main code's blind
+	 * trust in the field's content, no way back.
+	 */
+	char name[128];
+};
+VMMDEV_ASSERT_SIZE(vmmdev_guest_info2, 24 + 144);
+
+enum vmmdev_guest_facility_type {
+	VBOXGUEST_FACILITY_TYPE_UNKNOWN          = 0,
+	VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER = 20,
+	/* VBoxGINA / VBoxCredProv / pam_vbox. */
+	VBOXGUEST_FACILITY_TYPE_AUTO_LOGON       = 90,
+	VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE     = 100,
+	/* VBoxTray (Windows), VBoxClient (Linux, Unix). */
+	VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT = 101,
+	VBOXGUEST_FACILITY_TYPE_SEAMLESS         = 1000,
+	VBOXGUEST_FACILITY_TYPE_GRAPHICS         = 1100,
+	VBOXGUEST_FACILITY_TYPE_ALL              = 0x7ffffffe,
+	/* Ensure the enum is a 32 bit data-type */
+	VBOXGUEST_FACILITY_TYPE_SIZEHACK         = 0x7fffffff
+};
+
+enum vmmdev_guest_facility_status {
+	VBOXGUEST_FACILITY_STATUS_INACTIVE    = 0,
+	VBOXGUEST_FACILITY_STATUS_PAUSED      = 1,
+	VBOXGUEST_FACILITY_STATUS_PRE_INIT    = 20,
+	VBOXGUEST_FACILITY_STATUS_INIT        = 30,
+	VBOXGUEST_FACILITY_STATUS_ACTIVE      = 50,
+	VBOXGUEST_FACILITY_STATUS_TERMINATING = 100,
+	VBOXGUEST_FACILITY_STATUS_TERMINATED  = 101,
+	VBOXGUEST_FACILITY_STATUS_FAILED      = 800,
+	VBOXGUEST_FACILITY_STATUS_UNKNOWN     = 999,
+	/* Ensure the enum is a 32 bit data-type */
+	VBOXGUEST_FACILITY_STATUS_SIZEHACK    = 0x7fffffff
+};
+
+/** struct vmmdev_guest_status - Guest Additions status structure. */
+struct vmmdev_guest_status {
+	/** Header. */
+	struct vmmdev_request_header header;
+	/** Facility the status is indicated for. */
+	enum vmmdev_guest_facility_type facility;
+	/** Current guest status. */
+	enum vmmdev_guest_facility_status status;
+	/** Flags, not used at the moment. */
+	u32 flags;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_guest_status, 24 + 12);
+
+#define VMMDEV_MEMORY_BALLOON_CHUNK_SIZE             (1048576)
+#define VMMDEV_MEMORY_BALLOON_CHUNK_PAGES            (1048576 / 4096)
+
+/** struct vmmdev_memballoon_info - Memory-balloon info structure. */
+struct vmmdev_memballoon_info {
+	/** Header. */
+	struct vmmdev_request_header header;
+	/** Balloon size in megabytes. */
+	u32 balloon_chunks;
+	/** Guest ram size in megabytes. */
+	u32 phys_mem_chunks;
+	/**
+	 * Setting this to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST indicates that
+	 * the request is a response to that event.
+	 * (Don't confuse this with VMMDEVREQ_ACKNOWLEDGE_EVENTS.)
+	 */
+	u32 event_ack;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_memballoon_info, 24 + 12);
+
+/** struct vmmdev_memballoon_change - Change the size of the balloon. */
+struct vmmdev_memballoon_change {
+	/** Header. */
+	struct vmmdev_request_header header;
+	/** The number of pages in the array. */
+	u32 pages;
+	/** true = inflate, false = deflate.  */
+	u32 inflate;
+	/** Physical address (u64) of each page. */
+	u64 phys_page[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES];
+};
+
+/** struct vmmdev_write_core_dump - Write Core Dump request data. */
+struct vmmdev_write_core_dump {
+	/** Header. */
+	struct vmmdev_request_header header;
+	/** Flags (reserved, MBZ). */
+	u32 flags;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_write_core_dump, 24 + 4);
+
+/** struct vmmdev_heartbeat - Heart beat check state structure. */
+struct vmmdev_heartbeat {
+	/** Header. */
+	struct vmmdev_request_header header;
+	/** OUT: Guest heartbeat interval in nanosec. */
+	u64 interval_ns;
+	/** Heartbeat check flag. */
+	u8 enabled;
+	/** Explicit padding, MBZ. */
+	u8 padding[3];
+} __packed;
+VMMDEV_ASSERT_SIZE(vmmdev_heartbeat, 24 + 12);
+
+#define VMMDEV_HGCM_REQ_DONE      BIT(0)
+#define VMMDEV_HGCM_REQ_CANCELLED BIT(1)
+
+/** struct vmmdev_hgcmreq_header - vmmdev HGCM requests header. */
+struct vmmdev_hgcmreq_header {
+	/** Request header. */
+	struct vmmdev_request_header header;
+
+	/** HGCM flags. */
+	u32 flags;
+
+	/** Result code. */
+	s32 result;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_hgcmreq_header, 24 + 8);
+
+/** struct vmmdev_hgcm_connect - HGCM connect request structure. */
+struct vmmdev_hgcm_connect {
+	/** HGCM request header. */
+	struct vmmdev_hgcmreq_header header;
+
+	/** IN: Description of service to connect to. */
+	struct vmmdev_hgcm_service_location loc;
+
+	/** OUT: Client identifier assigned by local instance of HGCM. */
+	u32 client_id;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_hgcm_connect, 32 + 132 + 4);
+
+/** struct vmmdev_hgcm_disconnect - HGCM disconnect request structure. */
+struct vmmdev_hgcm_disconnect {
+	/** HGCM request header. */
+	struct vmmdev_hgcmreq_header header;
+
+	/** IN: Client identifier. */
+	u32 client_id;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_hgcm_disconnect, 32 + 4);
+
+#define VMMDEV_HGCM_MAX_PARMS 32
+
+/** struct vmmdev_hgcm_call - HGCM call request structure. */
+struct vmmdev_hgcm_call {
+	/* request header */
+	struct vmmdev_hgcmreq_header header;
+
+	/** IN: Client identifier. */
+	u32 client_id;
+	/** IN: Service function number. */
+	u32 function;
+	/** IN: Number of parameters. */
+	u32 parm_count;
+	/** Parameters follow in form: HGCMFunctionParameter32|64 parms[X]; */
+};
+VMMDEV_ASSERT_SIZE(vmmdev_hgcm_call, 32 + 12);
+
+/**
+ * struct vmmdev_hgcm_cancel2 - HGCM cancel request structure, version 2.
+ *
+ * After the request header.rc will be:
+ *
+ * VINF_SUCCESS when cancelled.
+ * VERR_NOT_FOUND if the specified request cannot be found.
+ * VERR_INVALID_PARAMETER if the address is invalid valid.
+ */
+struct vmmdev_hgcm_cancel2 {
+	/** Header. */
+	struct vmmdev_request_header header;
+	/** The physical address of the request to cancel. */
+	u32 phys_req_to_cancel;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_hgcm_cancel2, 24 + 4);
+
+#endif
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index 8124622..92500f6 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -1290,7 +1290,7 @@ struct vme_error_handler *vme_register_error_handler(
 {
 	struct vme_error_handler *handler;
 
-	handler = kmalloc(sizeof(*handler), GFP_KERNEL);
+	handler = kmalloc(sizeof(*handler), GFP_ATOMIC);
 	if (!handler)
 		return NULL;
 
diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h
index aa66c87..3694821 100644
--- a/include/linux/fpga/fpga-bridge.h
+++ b/include/linux/fpga/fpga-bridge.h
@@ -1,10 +1,11 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/device.h>
-#include <linux/fpga/fpga-mgr.h>
 
 #ifndef _LINUX_FPGA_BRIDGE_H
 #define _LINUX_FPGA_BRIDGE_H
 
+#include <linux/device.h>
+#include <linux/fpga/fpga-mgr.h>
+
 struct fpga_bridge;
 
 /**
@@ -12,11 +13,13 @@ struct fpga_bridge;
  * @enable_show: returns the FPGA bridge's status
  * @enable_set: set a FPGA bridge as enabled or disabled
  * @fpga_bridge_remove: set FPGA into a specific state during driver remove
+ * @groups: optional attribute groups.
  */
 struct fpga_bridge_ops {
 	int (*enable_show)(struct fpga_bridge *bridge);
 	int (*enable_set)(struct fpga_bridge *bridge, bool enable);
 	void (*fpga_bridge_remove)(struct fpga_bridge *bridge);
+	const struct attribute_group **groups;
 };
 
 /**
@@ -43,6 +46,8 @@ struct fpga_bridge {
 
 struct fpga_bridge *of_fpga_bridge_get(struct device_node *node,
 				       struct fpga_image_info *info);
+struct fpga_bridge *fpga_bridge_get(struct device *dev,
+				    struct fpga_image_info *info);
 void fpga_bridge_put(struct fpga_bridge *bridge);
 int fpga_bridge_enable(struct fpga_bridge *bridge);
 int fpga_bridge_disable(struct fpga_bridge *bridge);
@@ -50,9 +55,12 @@ int fpga_bridge_disable(struct fpga_bridge *bridge);
 int fpga_bridges_enable(struct list_head *bridge_list);
 int fpga_bridges_disable(struct list_head *bridge_list);
 void fpga_bridges_put(struct list_head *bridge_list);
-int fpga_bridge_get_to_list(struct device_node *np,
+int fpga_bridge_get_to_list(struct device *dev,
 			    struct fpga_image_info *info,
 			    struct list_head *bridge_list);
+int of_fpga_bridge_get_to_list(struct device_node *np,
+			       struct fpga_image_info *info,
+			       struct list_head *bridge_list);
 
 int fpga_bridge_register(struct device *dev, const char *name,
 			 const struct fpga_bridge_ops *br_ops, void *priv);
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index bfa14bc..3c6de23 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -1,7 +1,8 @@
 /*
  * FPGA Framework
  *
- *  Copyright (C) 2013-2015 Altera Corporation
+ *  Copyright (C) 2013-2016 Altera Corporation
+ *  Copyright (C) 2017 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -15,12 +16,12 @@
  * You should have received a copy of the GNU General Public License along with
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-
 #ifndef _LINUX_FPGA_MGR_H
 #define _LINUX_FPGA_MGR_H
 
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
 struct fpga_manager;
 struct sg_table;
 
@@ -83,12 +84,26 @@ enum fpga_mgr_states {
  * @disable_timeout_us: maximum time to disable traffic through bridge (uSec)
  * @config_complete_timeout_us: maximum time for FPGA to switch to operating
  *	   status in the write_complete op.
+ * @firmware_name: name of FPGA image firmware file
+ * @sgt: scatter/gather table containing FPGA image
+ * @buf: contiguous buffer containing FPGA image
+ * @count: size of buf
+ * @dev: device that owns this
+ * @overlay: Device Tree overlay
  */
 struct fpga_image_info {
 	u32 flags;
 	u32 enable_timeout_us;
 	u32 disable_timeout_us;
 	u32 config_complete_timeout_us;
+	char *firmware_name;
+	struct sg_table *sgt;
+	const char *buf;
+	size_t count;
+	struct device *dev;
+#ifdef CONFIG_OF
+	struct device_node *overlay;
+#endif
 };
 
 /**
@@ -100,6 +115,7 @@ struct fpga_image_info {
  * @write_sg: write the scatter list of configuration data to the FPGA
  * @write_complete: set FPGA to operating state after writing is done
  * @fpga_remove: optional: Set FPGA into a specific state during driver remove
+ * @groups: optional attribute groups.
  *
  * fpga_manager_ops are the low level functions implemented by a specific
  * fpga manager driver.  The optional ones are tested for NULL before being
@@ -116,6 +132,7 @@ struct fpga_manager_ops {
 	int (*write_complete)(struct fpga_manager *mgr,
 			      struct fpga_image_info *info);
 	void (*fpga_remove)(struct fpga_manager *mgr);
+	const struct attribute_group **groups;
 };
 
 /**
@@ -138,14 +155,14 @@ struct fpga_manager {
 
 #define to_fpga_manager(d) container_of(d, struct fpga_manager, dev)
 
-int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
-		      const char *buf, size_t count);
-int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info,
-			 struct sg_table *sgt);
+struct fpga_image_info *fpga_image_info_alloc(struct device *dev);
 
-int fpga_mgr_firmware_load(struct fpga_manager *mgr,
-			   struct fpga_image_info *info,
-			   const char *image_name);
+void fpga_image_info_free(struct fpga_image_info *info);
+
+int fpga_mgr_load(struct fpga_manager *mgr, struct fpga_image_info *info);
+
+int fpga_mgr_lock(struct fpga_manager *mgr);
+void fpga_mgr_unlock(struct fpga_manager *mgr);
 
 struct fpga_manager *of_fpga_mgr_get(struct device_node *node);
 
diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h
new file mode 100644
index 0000000..b652031
--- /dev/null
+++ b/include/linux/fpga/fpga-region.h
@@ -0,0 +1,40 @@
+#ifndef _FPGA_REGION_H
+#define _FPGA_REGION_H
+
+#include <linux/device.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/fpga/fpga-bridge.h>
+
+/**
+ * struct fpga_region - FPGA Region structure
+ * @dev: FPGA Region device
+ * @mutex: enforces exclusive reference to region
+ * @bridge_list: list of FPGA bridges specified in region
+ * @mgr: FPGA manager
+ * @info: FPGA image info
+ * @priv: private data
+ * @get_bridges: optional function to get bridges to a list
+ * @groups: optional attribute groups.
+ */
+struct fpga_region {
+	struct device dev;
+	struct mutex mutex; /* for exclusive reference to region */
+	struct list_head bridge_list;
+	struct fpga_manager *mgr;
+	struct fpga_image_info *info;
+	void *priv;
+	int (*get_bridges)(struct fpga_region *region);
+	const struct attribute_group **groups;
+};
+
+#define to_fpga_region(d) container_of(d, struct fpga_region, dev)
+
+struct fpga_region *fpga_region_class_find(
+	struct device *start, const void *data,
+	int (*match)(struct device *, const void *));
+
+int fpga_region_program_fpga(struct fpga_region *region);
+int fpga_region_register(struct device *dev, struct fpga_region *region);
+int fpga_region_unregister(struct fpga_region *region);
+
+#endif /* _FPGA_REGION_H */
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index abb6dc2..48fb2b4 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -229,6 +229,12 @@ struct hda_device_id {
 	unsigned long driver_data;
 };
 
+struct sdw_device_id {
+	__u16 mfg_id;
+	__u16 part_id;
+	kernel_ulong_t driver_data;
+};
+
 /*
  * Struct used for matching a device
  */
@@ -452,6 +458,19 @@ struct spi_device_id {
 	kernel_ulong_t driver_data;	/* Data private to the driver */
 };
 
+/* SLIMbus */
+
+#define SLIMBUS_NAME_SIZE	32
+#define SLIMBUS_MODULE_PREFIX	"slim:"
+
+struct slim_device_id {
+	__u16 manf_id, prod_code;
+	__u16 dev_index, instance;
+
+	/* Data private to the driver */
+	kernel_ulong_t driver_data;
+};
+
 #define SPMI_NAME_SIZE	32
 #define SPMI_MODULE_PREFIX "spmi:"
 
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 15eddc1..b220773 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -24,6 +24,7 @@ struct module;
 struct device;
 struct i2c_client;
 struct irq_domain;
+struct slim_device;
 struct spi_device;
 struct spmi_device;
 struct regmap;
@@ -499,6 +500,10 @@ struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
 				 const struct regmap_config *config,
 				 struct lock_class_key *lock_key,
 				 const char *lock_name);
+struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
+				 const struct regmap_config *config,
+				 struct lock_class_key *lock_key,
+				 const char *lock_name);
 struct regmap *__regmap_init_spi(struct spi_device *dev,
 				 const struct regmap_config *config,
 				 struct lock_class_key *lock_key,
@@ -616,6 +621,19 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
 				i2c, config)
 
 /**
+ * regmap_init_slimbus() - Initialise register map
+ *
+ * @slimbus: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_slimbus(slimbus, config)				\
+	__regmap_lockdep_wrapper(__regmap_init_slimbus, #config,	\
+				slimbus, config)
+
+/**
  * regmap_init_spi() - Initialise register map
  *
  * @dev: Device that will be interacted with
diff --git a/include/linux/siox.h b/include/linux/siox.h
new file mode 100644
index 0000000..d79624e
--- /dev/null
+++ b/include/linux/siox.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2015 Pengutronix, Uwe Kleine-König <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#include <linux/device.h>
+
+#define to_siox_device(_dev)	container_of((_dev), struct siox_device, dev)
+struct siox_device {
+	struct list_head node; /* node in smaster->devices */
+	struct siox_master *smaster;
+	struct device dev;
+
+	const char *type;
+	size_t inbytes;
+	size_t outbytes;
+	u8 statustype;
+
+	u8 status_read_clean;
+	u8 status_written;
+	u8 status_written_lastcycle;
+	bool connected;
+
+	/* statistics */
+	unsigned int watchdog_errors;
+	unsigned int status_errors;
+
+	struct kernfs_node *status_errors_kn;
+	struct kernfs_node *watchdog_kn;
+	struct kernfs_node *watchdog_errors_kn;
+	struct kernfs_node *connected_kn;
+};
+
+bool siox_device_synced(struct siox_device *sdevice);
+bool siox_device_connected(struct siox_device *sdevice);
+
+struct siox_driver {
+	int (*probe)(struct siox_device *sdevice);
+	int (*remove)(struct siox_device *sdevice);
+	void (*shutdown)(struct siox_device *sdevice);
+
+	/*
+	 * buf is big enough to hold sdev->inbytes - 1 bytes, the status byte
+	 * is in the scope of the framework.
+	 */
+	int (*set_data)(struct siox_device *sdevice, u8 status, u8 buf[]);
+	/*
+	 * buf is big enough to hold sdev->outbytes - 1 bytes, the status byte
+	 * is in the scope of the framework
+	 */
+	int (*get_data)(struct siox_device *sdevice, const u8 buf[]);
+
+	struct device_driver driver;
+};
+
+static inline struct siox_driver *to_siox_driver(struct device_driver *driver)
+{
+	if (driver)
+		return container_of(driver, struct siox_driver, driver);
+	else
+		return NULL;
+}
+
+int __siox_driver_register(struct siox_driver *sdriver, struct module *owner);
+
+static inline int siox_driver_register(struct siox_driver *sdriver)
+{
+	return __siox_driver_register(sdriver, THIS_MODULE);
+}
+
+static inline void siox_driver_unregister(struct siox_driver *sdriver)
+{
+	return driver_unregister(&sdriver->driver);
+}
diff --git a/include/linux/slimbus.h b/include/linux/slimbus.h
new file mode 100644
index 0000000..c36cf121
--- /dev/null
+++ b/include/linux/slimbus.h
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ */
+
+#ifndef _LINUX_SLIMBUS_H
+#define _LINUX_SLIMBUS_H
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/mod_devicetable.h>
+
+extern struct bus_type slimbus_bus;
+
+/**
+ * struct slim_eaddr - Enumeration address for a SLIMbus device
+ * @manf_id: Manufacturer Id for the device
+ * @prod_code: Product code
+ * @dev_index: Device index
+ * @instance: Instance value
+ */
+struct slim_eaddr {
+	u16 manf_id;
+	u16 prod_code;
+	u8 dev_index;
+	u8 instance;
+} __packed;
+
+/**
+ * enum slim_device_status - slim device status
+ * @SLIM_DEVICE_STATUS_DOWN: Slim device is absent or not reported yet.
+ * @SLIM_DEVICE_STATUS_UP: Slim device is announced on the bus.
+ * @SLIM_DEVICE_STATUS_RESERVED: Reserved for future use.
+ */
+enum slim_device_status {
+	SLIM_DEVICE_STATUS_DOWN = 0,
+	SLIM_DEVICE_STATUS_UP,
+	SLIM_DEVICE_STATUS_RESERVED,
+};
+
+struct slim_controller;
+
+/**
+ * struct slim_device - Slim device handle.
+ * @dev: Driver model representation of the device.
+ * @e_addr: Enumeration address of this device.
+ * @status: slim device status
+ * @ctrl: slim controller instance.
+ * @laddr: 1-byte Logical address of this device.
+ * @is_laddr_valid: indicates if the laddr is valid or not
+ *
+ * This is the client/device handle returned when a SLIMbus
+ * device is registered with a controller.
+ * Pointer to this structure is used by client-driver as a handle.
+ */
+struct slim_device {
+	struct device		dev;
+	struct slim_eaddr	e_addr;
+	struct slim_controller	*ctrl;
+	enum slim_device_status	status;
+	u8			laddr;
+	bool			is_laddr_valid;
+};
+
+#define to_slim_device(d) container_of(d, struct slim_device, dev)
+
+/**
+ * struct slim_driver - SLIMbus 'generic device' (slave) device driver
+ *				(similar to 'spi_device' on SPI)
+ * @probe: Binds this driver to a SLIMbus device.
+ * @remove: Unbinds this driver from the SLIMbus device.
+ * @shutdown: Standard shutdown callback used during powerdown/halt.
+ * @device_status: This callback is called when
+ *	- The device reports present and gets a laddr assigned
+ *	- The device reports absent, or the bus goes down.
+ * @driver: SLIMbus device drivers should initialize name and owner field of
+ *	    this structure
+ * @id_table: List of SLIMbus devices supported by this driver
+ */
+
+struct slim_driver {
+	int	(*probe)(struct slim_device *sl);
+	void	(*remove)(struct slim_device *sl);
+	void	(*shutdown)(struct slim_device *sl);
+	int	(*device_status)(struct slim_device *sl,
+				 enum slim_device_status s);
+	struct device_driver		driver;
+	const struct slim_device_id	*id_table;
+};
+#define to_slim_driver(d) container_of(d, struct slim_driver, driver)
+
+/**
+ * struct slim_val_inf - Slimbus value or information element
+ * @start_offset: Specifies starting offset in information/value element map
+ * @rbuf: buffer to read the values
+ * @wbuf: buffer to write
+ * @num_bytes: upto 16. This ensures that the message will fit the slicesize
+ *		per SLIMbus spec
+ * @comp: completion for asynchronous operations, valid only if TID is
+ *	  required for transaction, like REQUEST operations.
+ *	  Rest of the transactions are synchronous anyway.
+ */
+struct slim_val_inf {
+	u16			start_offset;
+	u8			num_bytes;
+	u8			*rbuf;
+	const u8		*wbuf;
+	struct	completion	*comp;
+};
+
+/*
+ * use a macro to avoid include chaining to get THIS_MODULE
+ */
+#define slim_driver_register(drv) \
+	__slim_driver_register(drv, THIS_MODULE)
+int __slim_driver_register(struct slim_driver *drv, struct module *owner);
+void slim_driver_unregister(struct slim_driver *drv);
+
+/**
+ * module_slim_driver() - Helper macro for registering a SLIMbus driver
+ * @__slim_driver: slimbus_driver struct
+ *
+ * Helper macro for SLIMbus drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_slim_driver(__slim_driver) \
+	module_driver(__slim_driver, slim_driver_register, \
+			slim_driver_unregister)
+
+static inline void *slim_get_devicedata(const struct slim_device *dev)
+{
+	return dev_get_drvdata(&dev->dev);
+}
+
+static inline void slim_set_devicedata(struct slim_device *dev, void *data)
+{
+	dev_set_drvdata(&dev->dev, data);
+}
+
+struct slim_device *slim_get_device(struct slim_controller *ctrl,
+				    struct slim_eaddr *e_addr);
+int slim_get_logical_addr(struct slim_device *sbdev);
+
+/* Information Element management messages */
+#define SLIM_MSG_MC_REQUEST_INFORMATION          0x20
+#define SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION    0x21
+#define SLIM_MSG_MC_REPLY_INFORMATION            0x24
+#define SLIM_MSG_MC_CLEAR_INFORMATION            0x28
+#define SLIM_MSG_MC_REPORT_INFORMATION           0x29
+
+/* Value Element management messages */
+#define SLIM_MSG_MC_REQUEST_VALUE                0x60
+#define SLIM_MSG_MC_REQUEST_CHANGE_VALUE         0x61
+#define SLIM_MSG_MC_REPLY_VALUE                  0x64
+#define SLIM_MSG_MC_CHANGE_VALUE                 0x68
+
+int slim_xfer_msg(struct slim_device *sbdev, struct slim_val_inf *msg,
+		  u8 mc);
+int slim_readb(struct slim_device *sdev, u32 addr);
+int slim_writeb(struct slim_device *sdev, u32 addr, u8 value);
+int slim_read(struct slim_device *sdev, u32 addr, size_t count, u8 *val);
+int slim_write(struct slim_device *sdev, u32 addr, size_t count, u8 *val);
+#endif /* _LINUX_SLIMBUS_H */
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
new file mode 100644
index 0000000..e91fdcf
--- /dev/null
+++ b/include/linux/soundwire/sdw.h
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+#ifndef __SOUNDWIRE_H
+#define __SOUNDWIRE_H
+
+struct sdw_bus;
+struct sdw_slave;
+
+/* SDW spec defines and enums, as defined by MIPI 1.1. Spec */
+
+/* SDW Broadcast Device Number */
+#define SDW_BROADCAST_DEV_NUM		15
+
+/* SDW Enumeration Device Number */
+#define SDW_ENUM_DEV_NUM		0
+
+/* SDW Group Device Numbers */
+#define SDW_GROUP12_DEV_NUM		12
+#define SDW_GROUP13_DEV_NUM		13
+
+/* SDW Master Device Number, not supported yet */
+#define SDW_MASTER_DEV_NUM		14
+
+#define SDW_NUM_DEV_ID_REGISTERS	6
+
+#define SDW_MAX_DEVICES			11
+
+/**
+ * enum sdw_slave_status - Slave status
+ * @SDW_SLAVE_UNATTACHED: Slave is not attached with the bus.
+ * @SDW_SLAVE_ATTACHED: Slave is attached with bus.
+ * @SDW_SLAVE_ALERT: Some alert condition on the Slave
+ * @SDW_SLAVE_RESERVED: Reserved for future use
+ */
+enum sdw_slave_status {
+	SDW_SLAVE_UNATTACHED = 0,
+	SDW_SLAVE_ATTACHED = 1,
+	SDW_SLAVE_ALERT = 2,
+	SDW_SLAVE_RESERVED = 3,
+};
+
+/**
+ * enum sdw_command_response - Command response as defined by SDW spec
+ * @SDW_CMD_OK: cmd was successful
+ * @SDW_CMD_IGNORED: cmd was ignored
+ * @SDW_CMD_FAIL: cmd was NACKed
+ * @SDW_CMD_TIMEOUT: cmd timedout
+ * @SDW_CMD_FAIL_OTHER: cmd failed due to other reason than above
+ *
+ * NOTE: The enum is different than actual Spec as response in the Spec is
+ * combination of ACK/NAK bits
+ *
+ * SDW_CMD_TIMEOUT/FAIL_OTHER is defined for SW use, not in spec
+ */
+enum sdw_command_response {
+	SDW_CMD_OK = 0,
+	SDW_CMD_IGNORED = 1,
+	SDW_CMD_FAIL = 2,
+	SDW_CMD_TIMEOUT = 3,
+	SDW_CMD_FAIL_OTHER = 4,
+};
+
+/*
+ * SDW properties, defined in MIPI DisCo spec v1.0
+ */
+enum sdw_clk_stop_reset_behave {
+	SDW_CLK_STOP_KEEP_STATUS = 1,
+};
+
+/**
+ * enum sdw_p15_behave - Slave Port 15 behaviour when the Master attempts a
+ * read
+ * @SDW_P15_READ_IGNORED: Read is ignored
+ * @SDW_P15_CMD_OK: Command is ok
+ */
+enum sdw_p15_behave {
+	SDW_P15_READ_IGNORED = 0,
+	SDW_P15_CMD_OK = 1,
+};
+
+/**
+ * enum sdw_dpn_type - Data port types
+ * @SDW_DPN_FULL: Full Data Port is supported
+ * @SDW_DPN_SIMPLE: Simplified Data Port as defined in spec.
+ * DPN_SampleCtrl2, DPN_OffsetCtrl2, DPN_HCtrl and DPN_BlockCtrl3
+ * are not implemented.
+ * @SDW_DPN_REDUCED: Reduced Data Port as defined in spec.
+ * DPN_SampleCtrl2, DPN_HCtrl are not implemented.
+ */
+enum sdw_dpn_type {
+	SDW_DPN_FULL = 0,
+	SDW_DPN_SIMPLE = 1,
+	SDW_DPN_REDUCED = 2,
+};
+
+/**
+ * enum sdw_clk_stop_mode - Clock Stop modes
+ * @SDW_CLK_STOP_MODE0: Slave can continue operation seamlessly on clock
+ * restart
+ * @SDW_CLK_STOP_MODE1: Slave may have entered a deeper power-saving mode,
+ * not capable of continuing operation seamlessly when the clock restarts
+ */
+enum sdw_clk_stop_mode {
+	SDW_CLK_STOP_MODE0 = 0,
+	SDW_CLK_STOP_MODE1 = 1,
+};
+
+/**
+ * struct sdw_dp0_prop - DP0 properties
+ * @max_word: Maximum number of bits in a Payload Channel Sample, 1 to 64
+ * (inclusive)
+ * @min_word: Minimum number of bits in a Payload Channel Sample, 1 to 64
+ * (inclusive)
+ * @num_words: number of wordlengths supported
+ * @words: wordlengths supported
+ * @flow_controlled: Slave implementation results in an OK_NotReady
+ * response
+ * @simple_ch_prep_sm: If channel prepare sequence is required
+ * @device_interrupts: If implementation-defined interrupts are supported
+ *
+ * The wordlengths are specified by Spec as max, min AND number of
+ * discrete values, implementation can define based on the wordlengths they
+ * support
+ */
+struct sdw_dp0_prop {
+	u32 max_word;
+	u32 min_word;
+	u32 num_words;
+	u32 *words;
+	bool flow_controlled;
+	bool simple_ch_prep_sm;
+	bool device_interrupts;
+};
+
+/**
+ * struct sdw_dpn_audio_mode - Audio mode properties for DPn
+ * @bus_min_freq: Minimum bus frequency, in Hz
+ * @bus_max_freq: Maximum bus frequency, in Hz
+ * @bus_num_freq: Number of discrete frequencies supported
+ * @bus_freq: Discrete bus frequencies, in Hz
+ * @min_freq: Minimum sampling frequency, in Hz
+ * @max_freq: Maximum sampling bus frequency, in Hz
+ * @num_freq: Number of discrete sampling frequency supported
+ * @freq: Discrete sampling frequencies, in Hz
+ * @prep_ch_behave: Specifies the dependencies between Channel Prepare
+ * sequence and bus clock configuration
+ * If 0, Channel Prepare can happen at any Bus clock rate
+ * If 1, Channel Prepare sequence shall happen only after Bus clock is
+ * changed to a frequency supported by this mode or compatible modes
+ * described by the next field
+ * @glitchless: Bitmap describing possible glitchless transitions from this
+ * Audio Mode to other Audio Modes
+ */
+struct sdw_dpn_audio_mode {
+	u32 bus_min_freq;
+	u32 bus_max_freq;
+	u32 bus_num_freq;
+	u32 *bus_freq;
+	u32 max_freq;
+	u32 min_freq;
+	u32 num_freq;
+	u32 *freq;
+	u32 prep_ch_behave;
+	u32 glitchless;
+};
+
+/**
+ * struct sdw_dpn_prop - Data Port DPn properties
+ * @num: port number
+ * @max_word: Maximum number of bits in a Payload Channel Sample, 1 to 64
+ * (inclusive)
+ * @min_word: Minimum number of bits in a Payload Channel Sample, 1 to 64
+ * (inclusive)
+ * @num_words: Number of discrete supported wordlengths
+ * @words: Discrete supported wordlength
+ * @type: Data port type. Full, Simplified or Reduced
+ * @max_grouping: Maximum number of samples that can be grouped together for
+ * a full data port
+ * @simple_ch_prep_sm: If the port supports simplified channel prepare state
+ * machine
+ * @ch_prep_timeout: Port-specific timeout value, in milliseconds
+ * @device_interrupts: If set, each bit corresponds to support for
+ * implementation-defined interrupts
+ * @max_ch: Maximum channels supported
+ * @min_ch: Minimum channels supported
+ * @num_ch: Number of discrete channels supported
+ * @ch: Discrete channels supported
+ * @num_ch_combinations: Number of channel combinations supported
+ * @ch_combinations: Channel combinations supported
+ * @modes: SDW mode supported
+ * @max_async_buffer: Number of samples that this port can buffer in
+ * asynchronous modes
+ * @block_pack_mode: Type of block port mode supported
+ * @port_encoding: Payload Channel Sample encoding schemes supported
+ * @audio_modes: Audio modes supported
+ */
+struct sdw_dpn_prop {
+	u32 num;
+	u32 max_word;
+	u32 min_word;
+	u32 num_words;
+	u32 *words;
+	enum sdw_dpn_type type;
+	u32 max_grouping;
+	bool simple_ch_prep_sm;
+	u32 ch_prep_timeout;
+	u32 device_interrupts;
+	u32 max_ch;
+	u32 min_ch;
+	u32 num_ch;
+	u32 *ch;
+	u32 num_ch_combinations;
+	u32 *ch_combinations;
+	u32 modes;
+	u32 max_async_buffer;
+	bool block_pack_mode;
+	u32 port_encoding;
+	struct sdw_dpn_audio_mode *audio_modes;
+};
+
+/**
+ * struct sdw_slave_prop - SoundWire Slave properties
+ * @mipi_revision: Spec version of the implementation
+ * @wake_capable: Wake-up events are supported
+ * @test_mode_capable: If test mode is supported
+ * @clk_stop_mode1: Clock-Stop Mode 1 is supported
+ * @simple_clk_stop_capable: Simple clock mode is supported
+ * @clk_stop_timeout: Worst-case latency of the Clock Stop Prepare State
+ * Machine transitions, in milliseconds
+ * @ch_prep_timeout: Worst-case latency of the Channel Prepare State Machine
+ * transitions, in milliseconds
+ * @reset_behave: Slave keeps the status of the SlaveStopClockPrepare
+ * state machine (P=1 SCSP_SM) after exit from clock-stop mode1
+ * @high_PHY_capable: Slave is HighPHY capable
+ * @paging_support: Slave implements paging registers SCP_AddrPage1 and
+ * SCP_AddrPage2
+ * @bank_delay_support: Slave implements bank delay/bridge support registers
+ * SCP_BankDelay and SCP_NextFrame
+ * @p15_behave: Slave behavior when the Master attempts a read to the Port15
+ * alias
+ * @lane_control_support: Slave supports lane control
+ * @master_count: Number of Masters present on this Slave
+ * @source_ports: Bitmap identifying source ports
+ * @sink_ports: Bitmap identifying sink ports
+ * @dp0_prop: Data Port 0 properties
+ * @src_dpn_prop: Source Data Port N properties
+ * @sink_dpn_prop: Sink Data Port N properties
+ */
+struct sdw_slave_prop {
+	u32 mipi_revision;
+	bool wake_capable;
+	bool test_mode_capable;
+	bool clk_stop_mode1;
+	bool simple_clk_stop_capable;
+	u32 clk_stop_timeout;
+	u32 ch_prep_timeout;
+	enum sdw_clk_stop_reset_behave reset_behave;
+	bool high_PHY_capable;
+	bool paging_support;
+	bool bank_delay_support;
+	enum sdw_p15_behave p15_behave;
+	bool lane_control_support;
+	u32 master_count;
+	u32 source_ports;
+	u32 sink_ports;
+	struct sdw_dp0_prop *dp0_prop;
+	struct sdw_dpn_prop *src_dpn_prop;
+	struct sdw_dpn_prop *sink_dpn_prop;
+};
+
+/**
+ * struct sdw_master_prop - Master properties
+ * @revision: MIPI spec version of the implementation
+ * @master_count: Number of masters
+ * @clk_stop_mode: Bitmap for Clock Stop modes supported
+ * @max_freq: Maximum Bus clock frequency, in Hz
+ * @num_clk_gears: Number of clock gears supported
+ * @clk_gears: Clock gears supported
+ * @num_freq: Number of clock frequencies supported, in Hz
+ * @freq: Clock frequencies supported, in Hz
+ * @default_frame_rate: Controller default Frame rate, in Hz
+ * @default_row: Number of rows
+ * @default_col: Number of columns
+ * @dynamic_frame: Dynamic frame supported
+ * @err_threshold: Number of times that software may retry sending a single
+ * command
+ * @dpn_prop: Data Port N properties
+ */
+struct sdw_master_prop {
+	u32 revision;
+	u32 master_count;
+	enum sdw_clk_stop_mode clk_stop_mode;
+	u32 max_freq;
+	u32 num_clk_gears;
+	u32 *clk_gears;
+	u32 num_freq;
+	u32 *freq;
+	u32 default_frame_rate;
+	u32 default_row;
+	u32 default_col;
+	bool dynamic_frame;
+	u32 err_threshold;
+	struct sdw_dpn_prop *dpn_prop;
+};
+
+int sdw_master_read_prop(struct sdw_bus *bus);
+int sdw_slave_read_prop(struct sdw_slave *slave);
+
+/*
+ * SDW Slave Structures and APIs
+ */
+
+/**
+ * struct sdw_slave_id - Slave ID
+ * @mfg_id: MIPI Manufacturer ID
+ * @part_id: Device Part ID
+ * @class_id: MIPI Class ID, unused now.
+ * Currently a placeholder in MIPI SoundWire Spec
+ * @unique_id: Device unique ID
+ * @sdw_version: SDW version implemented
+ *
+ * The order of the IDs here does not follow the DisCo spec definitions
+ */
+struct sdw_slave_id {
+	__u16 mfg_id;
+	__u16 part_id;
+	__u8 class_id;
+	__u8 unique_id:4;
+	__u8 sdw_version:4;
+};
+
+/**
+ * struct sdw_slave_intr_status - Slave interrupt status
+ * @control_port: control port status
+ * @port: data port status
+ */
+struct sdw_slave_intr_status {
+	u8 control_port;
+	u8 port[15];
+};
+
+/**
+ * struct sdw_slave_ops - Slave driver callback ops
+ * @read_prop: Read Slave properties
+ * @interrupt_callback: Device interrupt notification (invoked in thread
+ * context)
+ * @update_status: Update Slave status
+ */
+struct sdw_slave_ops {
+	int (*read_prop)(struct sdw_slave *sdw);
+	int (*interrupt_callback)(struct sdw_slave *slave,
+			struct sdw_slave_intr_status *status);
+	int (*update_status)(struct sdw_slave *slave,
+			enum sdw_slave_status status);
+};
+
+/**
+ * struct sdw_slave - SoundWire Slave
+ * @id: MIPI device ID
+ * @dev: Linux device
+ * @status: Status reported by the Slave
+ * @bus: Bus handle
+ * @ops: Slave callback ops
+ * @prop: Slave properties
+ * @node: node for bus list
+ * @port_ready: Port ready completion flag for each Slave port
+ * @dev_num: Device Number assigned by Bus
+ */
+struct sdw_slave {
+	struct sdw_slave_id id;
+	struct device dev;
+	enum sdw_slave_status status;
+	struct sdw_bus *bus;
+	const struct sdw_slave_ops *ops;
+	struct sdw_slave_prop prop;
+	struct list_head node;
+	struct completion *port_ready;
+	u16 dev_num;
+};
+
+#define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev)
+
+struct sdw_driver {
+	const char *name;
+
+	int (*probe)(struct sdw_slave *sdw,
+			const struct sdw_device_id *id);
+	int (*remove)(struct sdw_slave *sdw);
+	void (*shutdown)(struct sdw_slave *sdw);
+
+	const struct sdw_device_id *id_table;
+	const struct sdw_slave_ops *ops;
+
+	struct device_driver driver;
+};
+
+#define SDW_SLAVE_ENTRY(_mfg_id, _part_id, _drv_data) \
+	{ .mfg_id = (_mfg_id), .part_id = (_part_id), \
+	  .driver_data = (unsigned long)(_drv_data) }
+
+int sdw_handle_slave_status(struct sdw_bus *bus,
+			enum sdw_slave_status status[]);
+
+/*
+ * SDW master structures and APIs
+ */
+
+struct sdw_msg;
+
+/**
+ * struct sdw_defer - SDW deffered message
+ * @length: message length
+ * @complete: message completion
+ * @msg: SDW message
+ */
+struct sdw_defer {
+	int length;
+	struct completion complete;
+	struct sdw_msg *msg;
+};
+
+/**
+ * struct sdw_master_ops - Master driver ops
+ * @read_prop: Read Master properties
+ * @xfer_msg: Transfer message callback
+ * @xfer_msg_defer: Defer version of transfer message callback
+ * @reset_page_addr: Reset the SCP page address registers
+ */
+struct sdw_master_ops {
+	int (*read_prop)(struct sdw_bus *bus);
+
+	enum sdw_command_response (*xfer_msg)
+			(struct sdw_bus *bus, struct sdw_msg *msg);
+	enum sdw_command_response (*xfer_msg_defer)
+			(struct sdw_bus *bus, struct sdw_msg *msg,
+			struct sdw_defer *defer);
+	enum sdw_command_response (*reset_page_addr)
+			(struct sdw_bus *bus, unsigned int dev_num);
+};
+
+/**
+ * struct sdw_bus - SoundWire bus
+ * @dev: Master linux device
+ * @link_id: Link id number, can be 0 to N, unique for each Master
+ * @slaves: list of Slaves on this bus
+ * @assigned: Bitmap for Slave device numbers.
+ * Bit set implies used number, bit clear implies unused number.
+ * @bus_lock: bus lock
+ * @msg_lock: message lock
+ * @ops: Master callback ops
+ * @prop: Master properties
+ * @defer_msg: Defer message
+ * @clk_stop_timeout: Clock stop timeout computed
+ */
+struct sdw_bus {
+	struct device *dev;
+	unsigned int link_id;
+	struct list_head slaves;
+	DECLARE_BITMAP(assigned, SDW_MAX_DEVICES);
+	struct mutex bus_lock;
+	struct mutex msg_lock;
+	const struct sdw_master_ops *ops;
+	struct sdw_master_prop prop;
+	struct sdw_defer defer_msg;
+	unsigned int clk_stop_timeout;
+};
+
+int sdw_add_bus_master(struct sdw_bus *bus);
+void sdw_delete_bus_master(struct sdw_bus *bus);
+
+/* messaging and data APIs */
+
+int sdw_read(struct sdw_slave *slave, u32 addr);
+int sdw_write(struct sdw_slave *slave, u32 addr, u8 value);
+int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
+int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
+
+#endif /* __SOUNDWIRE_H */
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
new file mode 100644
index 0000000..4b37528
--- /dev/null
+++ b/include/linux/soundwire/sdw_intel.h
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+#ifndef __SDW_INTEL_H
+#define __SDW_INTEL_H
+
+/**
+ * struct sdw_intel_res - Soundwire Intel resource structure
+ * @mmio_base: mmio base of SoundWire registers
+ * @irq: interrupt number
+ * @handle: ACPI parent handle
+ * @parent: parent device
+ */
+struct sdw_intel_res {
+	void __iomem *mmio_base;
+	int irq;
+	acpi_handle handle;
+	struct device *parent;
+};
+
+void *sdw_intel_init(acpi_handle *parent_handle, struct sdw_intel_res *res);
+void sdw_intel_exit(void *arg);
+
+#endif
diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h
new file mode 100644
index 0000000..df472b1
--- /dev/null
+++ b/include/linux/soundwire/sdw_registers.h
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+#ifndef __SDW_REGISTERS_H
+#define __SDW_REGISTERS_H
+
+/*
+ * typically we define register and shifts but if one observes carefully,
+ * the shift can be generated from MASKS using few bit primitaives like ffs
+ * etc, so we use that and avoid defining shifts
+ */
+#define SDW_REG_SHIFT(n)			(ffs(n) - 1)
+
+/*
+ * SDW registers as defined by MIPI 1.1 Spec
+ */
+#define SDW_REGADDR				GENMASK(14, 0)
+#define SDW_SCP_ADDRPAGE2_MASK			GENMASK(22, 15)
+#define SDW_SCP_ADDRPAGE1_MASK			GENMASK(30, 23)
+
+#define SDW_REG_NO_PAGE				0x00008000
+#define SDW_REG_OPTIONAL_PAGE			0x00010000
+#define SDW_REG_MAX				0x80000000
+
+#define SDW_DPN_SIZE				0x100
+#define SDW_BANK1_OFFSET			0x10
+
+/*
+ * DP0 Interrupt register & bits
+ *
+ * Spec treats Status (RO) and Clear (WC) as separate but they are same
+ * address, so treat as same register with WC.
+ */
+
+/* both INT and STATUS register are same */
+#define SDW_DP0_INT				0x0
+#define SDW_DP0_INTMASK				0x1
+#define SDW_DP0_PORTCTRL			0x2
+#define SDW_DP0_BLOCKCTRL1			0x3
+#define SDW_DP0_PREPARESTATUS			0x4
+#define SDW_DP0_PREPARECTRL			0x5
+
+#define SDW_DP0_INT_TEST_FAIL			BIT(0)
+#define SDW_DP0_INT_PORT_READY			BIT(1)
+#define SDW_DP0_INT_BRA_FAILURE			BIT(2)
+#define SDW_DP0_INT_IMPDEF1			BIT(5)
+#define SDW_DP0_INT_IMPDEF2			BIT(6)
+#define SDW_DP0_INT_IMPDEF3			BIT(7)
+
+#define SDW_DP0_PORTCTRL_DATAMODE		GENMASK(3, 2)
+#define SDW_DP0_PORTCTRL_NXTINVBANK		BIT(4)
+#define SDW_DP0_PORTCTRL_BPT_PAYLD		GENMASK(7, 6)
+
+#define SDW_DP0_CHANNELEN			0x20
+#define SDW_DP0_SAMPLECTRL1			0x22
+#define SDW_DP0_SAMPLECTRL2			0x23
+#define SDW_DP0_OFFSETCTRL1			0x24
+#define SDW_DP0_OFFSETCTRL2			0x25
+#define SDW_DP0_HCTRL				0x26
+#define SDW_DP0_LANECTRL			0x28
+
+/* Both INT and STATUS register are same */
+#define SDW_SCP_INT1				0x40
+#define SDW_SCP_INTMASK1			0x41
+
+#define SDW_SCP_INT1_PARITY			BIT(0)
+#define SDW_SCP_INT1_BUS_CLASH			BIT(1)
+#define SDW_SCP_INT1_IMPL_DEF			BIT(2)
+#define SDW_SCP_INT1_SCP2_CASCADE		BIT(7)
+#define SDW_SCP_INT1_PORT0_3			GENMASK(6, 3)
+
+#define SDW_SCP_INTSTAT2			0x42
+#define SDW_SCP_INTSTAT2_SCP3_CASCADE		BIT(7)
+#define SDW_SCP_INTSTAT2_PORT4_10		GENMASK(6, 0)
+
+
+#define SDW_SCP_INTSTAT3			0x43
+#define SDW_SCP_INTSTAT3_PORT11_14		GENMASK(3, 0)
+
+/* Number of interrupt status registers */
+#define SDW_NUM_INT_STAT_REGISTERS		3
+
+/* Number of interrupt clear registers */
+#define SDW_NUM_INT_CLEAR_REGISTERS		1
+
+#define SDW_SCP_CTRL				0x44
+#define SDW_SCP_CTRL_CLK_STP_NOW		BIT(1)
+#define SDW_SCP_CTRL_FORCE_RESET		BIT(7)
+
+#define SDW_SCP_STAT				0x44
+#define SDW_SCP_STAT_CLK_STP_NF			BIT(0)
+#define SDW_SCP_STAT_HPHY_NOK			BIT(5)
+#define SDW_SCP_STAT_CURR_BANK			BIT(6)
+
+#define SDW_SCP_SYSTEMCTRL			0x45
+#define SDW_SCP_SYSTEMCTRL_CLK_STP_PREP		BIT(0)
+#define SDW_SCP_SYSTEMCTRL_CLK_STP_MODE		BIT(2)
+#define SDW_SCP_SYSTEMCTRL_WAKE_UP_EN		BIT(3)
+#define SDW_SCP_SYSTEMCTRL_HIGH_PHY		BIT(4)
+
+#define SDW_SCP_SYSTEMCTRL_CLK_STP_MODE0	0
+#define SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1	BIT(2)
+
+#define SDW_SCP_DEVNUMBER			0x46
+#define SDW_SCP_HIGH_PHY_CHECK			0x47
+#define SDW_SCP_ADDRPAGE1			0x48
+#define SDW_SCP_ADDRPAGE2			0x49
+#define SDW_SCP_KEEPEREN			0x4A
+#define SDW_SCP_BANKDELAY			0x4B
+#define SDW_SCP_TESTMODE			0x4F
+#define SDW_SCP_DEVID_0				0x50
+#define SDW_SCP_DEVID_1				0x51
+#define SDW_SCP_DEVID_2				0x52
+#define SDW_SCP_DEVID_3				0x53
+#define SDW_SCP_DEVID_4				0x54
+#define SDW_SCP_DEVID_5				0x55
+
+/* Banked Registers */
+#define SDW_SCP_FRAMECTRL_B0			0x60
+#define SDW_SCP_FRAMECTRL_B1			(0x60 + SDW_BANK1_OFFSET)
+#define SDW_SCP_NEXTFRAME_B0			0x61
+#define SDW_SCP_NEXTFRAME_B1			(0x61 + SDW_BANK1_OFFSET)
+
+/* Both INT and STATUS register is same */
+#define SDW_DPN_INT(n)				(0x0 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_INTMASK(n)			(0x1 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_PORTCTRL(n)			(0x2 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_BLOCKCTRL1(n)			(0x3 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_PREPARESTATUS(n)		(0x4 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_PREPARECTRL(n)			(0x5 + SDW_DPN_SIZE * (n))
+
+#define SDW_DPN_INT_TEST_FAIL			BIT(0)
+#define SDW_DPN_INT_PORT_READY			BIT(1)
+#define SDW_DPN_INT_IMPDEF1			BIT(5)
+#define SDW_DPN_INT_IMPDEF2			BIT(6)
+#define SDW_DPN_INT_IMPDEF3			BIT(7)
+
+#define SDW_DPN_PORTCTRL_FLOWMODE		GENMASK(1, 0)
+#define SDW_DPN_PORTCTRL_DATAMODE		GENMASK(3, 2)
+#define SDW_DPN_PORTCTRL_NXTINVBANK		BIT(4)
+
+#define SDW_DPN_BLOCKCTRL1_WDLEN		GENMASK(5, 0)
+
+#define SDW_DPN_PREPARECTRL_CH_PREP		GENMASK(7, 0)
+
+#define SDW_DPN_CHANNELEN_B0(n)			(0x20 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_CHANNELEN_B1(n)			(0x30 + SDW_DPN_SIZE * (n))
+
+#define SDW_DPN_BLOCKCTRL2_B0(n)		(0x21 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_BLOCKCTRL2_B1(n)		(0x31 + SDW_DPN_SIZE * (n))
+
+#define SDW_DPN_SAMPLECTRL1_B0(n)		(0x22 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_SAMPLECTRL1_B1(n)		(0x32 + SDW_DPN_SIZE * (n))
+
+#define SDW_DPN_SAMPLECTRL2_B0(n)		(0x23 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_SAMPLECTRL2_B1(n)		(0x33 + SDW_DPN_SIZE * (n))
+
+#define SDW_DPN_OFFSETCTRL1_B0(n)		(0x24 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_OFFSETCTRL1_B1(n)		(0x34 + SDW_DPN_SIZE * (n))
+
+#define SDW_DPN_OFFSETCTRL2_B0(n)		(0x25 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_OFFSETCTRL2_B1(n)		(0x35 + SDW_DPN_SIZE * (n))
+
+#define SDW_DPN_HCTRL_B0(n)			(0x26 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_HCTRL_B1(n)			(0x36 + SDW_DPN_SIZE * (n))
+
+#define SDW_DPN_BLOCKCTRL3_B0(n)		(0x27 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_BLOCKCTRL3_B1(n)		(0x37 + SDW_DPN_SIZE * (n))
+
+#define SDW_DPN_LANECTRL_B0(n)			(0x28 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_LANECTRL_B1(n)			(0x38 + SDW_DPN_SIZE * (n))
+
+#define SDW_DPN_SAMPLECTRL_LOW			GENMASK(7, 0)
+#define SDW_DPN_SAMPLECTRL_HIGH			GENMASK(15, 8)
+
+#define SDW_DPN_HCTRL_HSTART			GENMASK(7, 4)
+#define SDW_DPN_HCTRL_HSTOP			GENMASK(3, 0)
+
+#define SDW_NUM_CASC_PORT_INTSTAT1		4
+#define SDW_CASC_PORT_START_INTSTAT1		0
+#define SDW_CASC_PORT_MASK_INTSTAT1		0x8
+#define SDW_CASC_PORT_REG_OFFSET_INTSTAT1	0x0
+
+#define SDW_NUM_CASC_PORT_INTSTAT2		7
+#define SDW_CASC_PORT_START_INTSTAT2		4
+#define SDW_CASC_PORT_MASK_INTSTAT2		1
+#define SDW_CASC_PORT_REG_OFFSET_INTSTAT2	1
+
+#define SDW_NUM_CASC_PORT_INTSTAT3		4
+#define SDW_CASC_PORT_START_INTSTAT3		11
+#define SDW_CASC_PORT_MASK_INTSTAT3		1
+#define SDW_CASC_PORT_REG_OFFSET_INTSTAT3	2
+
+#endif /* __SDW_REGISTERS_H */
diff --git a/include/linux/soundwire/sdw_type.h b/include/linux/soundwire/sdw_type.h
new file mode 100644
index 0000000..9fd553e
--- /dev/null
+++ b/include/linux/soundwire/sdw_type.h
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2015-17 Intel Corporation.
+
+#ifndef __SOUNDWIRE_TYPES_H
+#define __SOUNDWIRE_TYPES_H
+
+extern struct bus_type sdw_bus_type;
+
+#define drv_to_sdw_driver(_drv) container_of(_drv, struct sdw_driver, driver)
+
+#define sdw_register_driver(drv) \
+	__sdw_register_driver(drv, THIS_MODULE)
+
+int __sdw_register_driver(struct sdw_driver *drv, struct module *);
+void sdw_unregister_driver(struct sdw_driver *drv);
+
+int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size);
+
+#endif /* __SOUNDWIRE_TYPES_H */
diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h
new file mode 100644
index 0000000..c71def6
--- /dev/null
+++ b/include/linux/vbox_utils.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
+/* Copyright (C) 2006-2016 Oracle Corporation */
+
+#ifndef __VBOX_UTILS_H__
+#define __VBOX_UTILS_H__
+
+#include <linux/printk.h>
+#include <linux/vbox_vmmdev_types.h>
+
+struct vbg_dev;
+
+/**
+ * vboxguest logging functions, these log both to the backdoor and call
+ * the equivalent kernel pr_foo function.
+ */
+__printf(1, 2) void vbg_info(const char *fmt, ...);
+__printf(1, 2) void vbg_warn(const char *fmt, ...);
+__printf(1, 2) void vbg_err(const char *fmt, ...);
+
+/* Only use backdoor logging for non-dynamic debug builds */
+#if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
+__printf(1, 2) void vbg_debug(const char *fmt, ...);
+#else
+#define vbg_debug pr_debug
+#endif
+
+/**
+ * Allocate memory for generic request and initialize the request header.
+ *
+ * Return: the allocated memory
+ * @len:		Size of memory block required for the request.
+ * @req_type:		The generic request type.
+ */
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
+
+/**
+ * Perform a generic request.
+ *
+ * Return: VBox status code
+ * @gdev:		The Guest extension device.
+ * @req:		Pointer to the request structure.
+ */
+int vbg_req_perform(struct vbg_dev *gdev, void *req);
+
+int vbg_hgcm_connect(struct vbg_dev *gdev,
+		     struct vmmdev_hgcm_service_location *loc,
+		     u32 *client_id, int *vbox_status);
+
+int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status);
+
+int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
+		  u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
+		  u32 parm_count, int *vbox_status);
+
+int vbg_hgcm_call32(
+	struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
+	struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
+	int *vbox_status);
+
+/**
+ * Convert a VirtualBox status code to a standard Linux kernel return value.
+ * Return: 0 or negative errno value.
+ * @rc:			VirtualBox status code to convert.
+ */
+int vbg_status_code_to_errno(int rc);
+
+/**
+ * Helper for the vboxsf driver to get a reference to the guest device.
+ * Return: a pointer to the gdev; or a ERR_PTR value on error.
+ */
+struct vbg_dev *vbg_get_gdev(void);
+
+/**
+ * Helper for the vboxsf driver to put a guest device reference.
+ * @gdev:		Reference returned by vbg_get_gdev to put.
+ */
+void vbg_put_gdev(struct vbg_dev *gdev);
+
+#endif
diff --git a/include/trace/events/siox.h b/include/trace/events/siox.h
new file mode 100644
index 0000000..68a43fc
--- /dev/null
+++ b/include/trace/events/siox.h
@@ -0,0 +1,66 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM siox
+
+#if !defined(_TRACE_SIOX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SIOX_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(siox_set_data,
+	    TP_PROTO(const struct siox_master *smaster,
+		     const struct siox_device *sdevice,
+		     unsigned int devno, size_t bufoffset),
+	    TP_ARGS(smaster, sdevice, devno, bufoffset),
+	    TP_STRUCT__entry(
+			     __field(int, busno)
+			     __field(unsigned int, devno)
+			     __field(size_t, inbytes)
+			     __dynamic_array(u8, buf, sdevice->inbytes)
+			    ),
+	    TP_fast_assign(
+			   __entry->busno = smaster->busno;
+			   __entry->devno = devno;
+			   __entry->inbytes = sdevice->inbytes;
+			   memcpy(__get_dynamic_array(buf),
+				  smaster->buf + bufoffset, sdevice->inbytes);
+			  ),
+	    TP_printk("siox-%d-%u [%*phD]",
+		      __entry->busno,
+		      __entry->devno,
+		      (int)__entry->inbytes, __get_dynamic_array(buf)
+		     )
+);
+
+TRACE_EVENT(siox_get_data,
+	    TP_PROTO(const struct siox_master *smaster,
+		     const struct siox_device *sdevice,
+		     unsigned int devno, u8 status_clean,
+		     size_t bufoffset),
+	    TP_ARGS(smaster, sdevice, devno, status_clean, bufoffset),
+	    TP_STRUCT__entry(
+			     __field(int, busno)
+			     __field(unsigned int, devno)
+			     __field(u8, status_clean)
+			     __field(size_t, outbytes)
+			     __dynamic_array(u8, buf, sdevice->outbytes)
+			    ),
+	    TP_fast_assign(
+			   __entry->busno = smaster->busno;
+			   __entry->devno = devno;
+			   __entry->status_clean = status_clean;
+			   __entry->outbytes = sdevice->outbytes;
+			   memcpy(__get_dynamic_array(buf),
+				  smaster->buf + bufoffset, sdevice->outbytes);
+			  ),
+	    TP_printk("siox-%d-%u (%02hhx) [%*phD]",
+		      __entry->busno,
+		      __entry->devno,
+		      __entry->status_clean,
+		      (int)__entry->outbytes, __get_dynamic_array(buf)
+		     )
+);
+
+#endif /* if !defined(_TRACE_SIOX_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/linux/lp.h b/include/uapi/linux/lp.h
index dafcfe4..8589a27 100644
--- a/include/uapi/linux/lp.h
+++ b/include/uapi/linux/lp.h
@@ -8,6 +8,8 @@
 #ifndef _UAPI_LINUX_LP_H
 #define _UAPI_LINUX_LP_H
 
+#include <linux/types.h>
+#include <linux/ioctl.h>
 
 /*
  * Per POSIX guidelines, this module reserves the LP and lp prefixes
@@ -88,7 +90,15 @@
 #define LPGETSTATS  0x060d  /* get statistics (struct lp_stats) */
 #endif
 #define LPGETFLAGS  0x060e  /* get status flags */
-#define LPSETTIMEOUT 0x060f /* set parport timeout */
+#define LPSETTIMEOUT_OLD 0x060f /* set parport timeout */
+#define LPSETTIMEOUT_NEW \
+	_IOW(0x6, 0xf, __s64[2]) /* set parport timeout */
+#if __BITS_PER_LONG == 64
+#define LPSETTIMEOUT LPSETTIMEOUT_OLD
+#else
+#define LPSETTIMEOUT (sizeof(time_t) > sizeof(__kernel_long_t) ? \
+	LPSETTIMEOUT_NEW : LPSETTIMEOUT_OLD)
+#endif
 
 /* timeout for printk'ing a timeout, in jiffies (100ths of a second).
    This is also used for re-checking error conditions if LP_ABORT is
diff --git a/include/uapi/linux/vbox_err.h b/include/uapi/linux/vbox_err.h
new file mode 100644
index 0000000..7eae536
--- /dev/null
+++ b/include/uapi/linux/vbox_err.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright (C) 2017 Oracle Corporation */
+
+#ifndef __UAPI_VBOX_ERR_H__
+#define __UAPI_VBOX_ERR_H__
+
+#define VINF_SUCCESS                        0
+#define VERR_GENERAL_FAILURE                (-1)
+#define VERR_INVALID_PARAMETER              (-2)
+#define VERR_INVALID_MAGIC                  (-3)
+#define VERR_INVALID_HANDLE                 (-4)
+#define VERR_LOCK_FAILED                    (-5)
+#define VERR_INVALID_POINTER                (-6)
+#define VERR_IDT_FAILED                     (-7)
+#define VERR_NO_MEMORY                      (-8)
+#define VERR_ALREADY_LOADED                 (-9)
+#define VERR_PERMISSION_DENIED              (-10)
+#define VERR_VERSION_MISMATCH               (-11)
+#define VERR_NOT_IMPLEMENTED                (-12)
+#define VERR_INVALID_FLAGS                  (-13)
+
+#define VERR_NOT_EQUAL                      (-18)
+#define VERR_NOT_SYMLINK                    (-19)
+#define VERR_NO_TMP_MEMORY                  (-20)
+#define VERR_INVALID_FMODE                  (-21)
+#define VERR_WRONG_ORDER                    (-22)
+#define VERR_NO_TLS_FOR_SELF                (-23)
+#define VERR_FAILED_TO_SET_SELF_TLS         (-24)
+#define VERR_NO_CONT_MEMORY                 (-26)
+#define VERR_NO_PAGE_MEMORY                 (-27)
+#define VERR_THREAD_IS_DEAD                 (-29)
+#define VERR_THREAD_NOT_WAITABLE            (-30)
+#define VERR_PAGE_TABLE_NOT_PRESENT         (-31)
+#define VERR_INVALID_CONTEXT                (-32)
+#define VERR_TIMER_BUSY                     (-33)
+#define VERR_ADDRESS_CONFLICT               (-34)
+#define VERR_UNRESOLVED_ERROR               (-35)
+#define VERR_INVALID_FUNCTION               (-36)
+#define VERR_NOT_SUPPORTED                  (-37)
+#define VERR_ACCESS_DENIED                  (-38)
+#define VERR_INTERRUPTED                    (-39)
+#define VERR_TIMEOUT                        (-40)
+#define VERR_BUFFER_OVERFLOW                (-41)
+#define VERR_TOO_MUCH_DATA                  (-42)
+#define VERR_MAX_THRDS_REACHED              (-43)
+#define VERR_MAX_PROCS_REACHED              (-44)
+#define VERR_SIGNAL_REFUSED                 (-45)
+#define VERR_SIGNAL_PENDING                 (-46)
+#define VERR_SIGNAL_INVALID                 (-47)
+#define VERR_STATE_CHANGED                  (-48)
+#define VERR_INVALID_UUID_FORMAT            (-49)
+#define VERR_PROCESS_NOT_FOUND              (-50)
+#define VERR_PROCESS_RUNNING                (-51)
+#define VERR_TRY_AGAIN                      (-52)
+#define VERR_PARSE_ERROR                    (-53)
+#define VERR_OUT_OF_RANGE                   (-54)
+#define VERR_NUMBER_TOO_BIG                 (-55)
+#define VERR_NO_DIGITS                      (-56)
+#define VERR_NEGATIVE_UNSIGNED              (-57)
+#define VERR_NO_TRANSLATION                 (-58)
+
+#define VERR_NOT_FOUND                      (-78)
+#define VERR_INVALID_STATE                  (-79)
+#define VERR_OUT_OF_RESOURCES               (-80)
+
+#define VERR_FILE_NOT_FOUND                 (-102)
+#define VERR_PATH_NOT_FOUND                 (-103)
+#define VERR_INVALID_NAME                   (-104)
+#define VERR_ALREADY_EXISTS                 (-105)
+#define VERR_TOO_MANY_OPEN_FILES            (-106)
+#define VERR_SEEK                           (-107)
+#define VERR_NEGATIVE_SEEK                  (-108)
+#define VERR_SEEK_ON_DEVICE                 (-109)
+#define VERR_EOF                            (-110)
+#define VERR_READ_ERROR                     (-111)
+#define VERR_WRITE_ERROR                    (-112)
+#define VERR_WRITE_PROTECT                  (-113)
+#define VERR_SHARING_VIOLATION              (-114)
+#define VERR_FILE_LOCK_FAILED               (-115)
+#define VERR_FILE_LOCK_VIOLATION            (-116)
+#define VERR_CANT_CREATE                    (-117)
+#define VERR_CANT_DELETE_DIRECTORY          (-118)
+#define VERR_NOT_SAME_DEVICE                (-119)
+#define VERR_FILENAME_TOO_LONG              (-120)
+#define VERR_MEDIA_NOT_PRESENT              (-121)
+#define VERR_MEDIA_NOT_RECOGNIZED           (-122)
+#define VERR_FILE_NOT_LOCKED                (-123)
+#define VERR_FILE_LOCK_LOST                 (-124)
+#define VERR_DIR_NOT_EMPTY                  (-125)
+#define VERR_NOT_A_DIRECTORY                (-126)
+#define VERR_IS_A_DIRECTORY                 (-127)
+#define VERR_FILE_TOO_BIG                   (-128)
+
+#define VERR_NET_IO_ERROR                       (-400)
+#define VERR_NET_OUT_OF_RESOURCES               (-401)
+#define VERR_NET_HOST_NOT_FOUND                 (-402)
+#define VERR_NET_PATH_NOT_FOUND                 (-403)
+#define VERR_NET_PRINT_ERROR                    (-404)
+#define VERR_NET_NO_NETWORK                     (-405)
+#define VERR_NET_NOT_UNIQUE_NAME                (-406)
+
+#define VERR_NET_IN_PROGRESS                    (-436)
+#define VERR_NET_ALREADY_IN_PROGRESS            (-437)
+#define VERR_NET_NOT_SOCKET                     (-438)
+#define VERR_NET_DEST_ADDRESS_REQUIRED          (-439)
+#define VERR_NET_MSG_SIZE                       (-440)
+#define VERR_NET_PROTOCOL_TYPE                  (-441)
+#define VERR_NET_PROTOCOL_NOT_AVAILABLE         (-442)
+#define VERR_NET_PROTOCOL_NOT_SUPPORTED         (-443)
+#define VERR_NET_SOCKET_TYPE_NOT_SUPPORTED      (-444)
+#define VERR_NET_OPERATION_NOT_SUPPORTED        (-445)
+#define VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED  (-446)
+#define VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED   (-447)
+#define VERR_NET_ADDRESS_IN_USE                 (-448)
+#define VERR_NET_ADDRESS_NOT_AVAILABLE          (-449)
+#define VERR_NET_DOWN                           (-450)
+#define VERR_NET_UNREACHABLE                    (-451)
+#define VERR_NET_CONNECTION_RESET               (-452)
+#define VERR_NET_CONNECTION_ABORTED             (-453)
+#define VERR_NET_CONNECTION_RESET_BY_PEER       (-454)
+#define VERR_NET_NO_BUFFER_SPACE                (-455)
+#define VERR_NET_ALREADY_CONNECTED              (-456)
+#define VERR_NET_NOT_CONNECTED                  (-457)
+#define VERR_NET_SHUTDOWN                       (-458)
+#define VERR_NET_TOO_MANY_REFERENCES            (-459)
+#define VERR_NET_CONNECTION_TIMED_OUT           (-460)
+#define VERR_NET_CONNECTION_REFUSED             (-461)
+#define VERR_NET_HOST_DOWN                      (-464)
+#define VERR_NET_HOST_UNREACHABLE               (-465)
+#define VERR_NET_PROTOCOL_ERROR                 (-466)
+#define VERR_NET_INCOMPLETE_TX_PACKET           (-467)
+
+/* misc. unsorted codes */
+#define VERR_RESOURCE_BUSY                      (-138)
+#define VERR_DISK_FULL                          (-152)
+#define VERR_TOO_MANY_SYMLINKS                  (-156)
+#define VERR_NO_MORE_FILES                      (-201)
+#define VERR_INTERNAL_ERROR                     (-225)
+#define VERR_INTERNAL_ERROR_2                   (-226)
+#define VERR_INTERNAL_ERROR_3                   (-227)
+#define VERR_INTERNAL_ERROR_4                   (-228)
+#define VERR_DEV_IO_ERROR                       (-250)
+#define VERR_IO_BAD_LENGTH                      (-255)
+#define VERR_BROKEN_PIPE                        (-301)
+#define VERR_NO_DATA                            (-304)
+#define VERR_SEM_DESTROYED                      (-363)
+#define VERR_DEADLOCK                           (-365)
+#define VERR_BAD_EXE_FORMAT                     (-608)
+#define VINF_HGCM_ASYNC_EXECUTE                 (2903)
+
+#endif
diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h
new file mode 100644
index 0000000..0e68024
--- /dev/null
+++ b/include/uapi/linux/vbox_vmmdev_types.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
+/*
+ * Virtual Device for Guest <-> VMM/Host communication, type definitions
+ * which are also used for the vboxguest ioctl interface / by vboxsf
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ */
+
+#ifndef __UAPI_VBOX_VMMDEV_TYPES_H__
+#define __UAPI_VBOX_VMMDEV_TYPES_H__
+
+#include <asm/bitsperlong.h>
+#include <linux/types.h>
+
+/*
+ * We cannot use linux' compiletime_assert here because it expects to be used
+ * inside a function only. Use a typedef to a char array with a negative size.
+ */
+#define VMMDEV_ASSERT_SIZE(type, size) \
+	typedef char type ## _asrt_size[1 - 2*!!(sizeof(struct type) != (size))]
+
+/** enum vmmdev_request_type - VMMDev request types. */
+enum vmmdev_request_type {
+	VMMDEVREQ_INVALID_REQUEST              =  0,
+	VMMDEVREQ_GET_MOUSE_STATUS             =  1,
+	VMMDEVREQ_SET_MOUSE_STATUS             =  2,
+	VMMDEVREQ_SET_POINTER_SHAPE            =  3,
+	VMMDEVREQ_GET_HOST_VERSION             =  4,
+	VMMDEVREQ_IDLE                         =  5,
+	VMMDEVREQ_GET_HOST_TIME                = 10,
+	VMMDEVREQ_GET_HYPERVISOR_INFO          = 20,
+	VMMDEVREQ_SET_HYPERVISOR_INFO          = 21,
+	VMMDEVREQ_REGISTER_PATCH_MEMORY        = 22, /* since version 3.0.6 */
+	VMMDEVREQ_DEREGISTER_PATCH_MEMORY      = 23, /* since version 3.0.6 */
+	VMMDEVREQ_SET_POWER_STATUS             = 30,
+	VMMDEVREQ_ACKNOWLEDGE_EVENTS           = 41,
+	VMMDEVREQ_CTL_GUEST_FILTER_MASK        = 42,
+	VMMDEVREQ_REPORT_GUEST_INFO            = 50,
+	VMMDEVREQ_REPORT_GUEST_INFO2           = 58, /* since version 3.2.0 */
+	VMMDEVREQ_REPORT_GUEST_STATUS          = 59, /* since version 3.2.8 */
+	VMMDEVREQ_REPORT_GUEST_USER_STATE      = 74, /* since version 4.3 */
+	/* Retrieve a display resize request sent by the host, deprecated. */
+	VMMDEVREQ_GET_DISPLAY_CHANGE_REQ       = 51,
+	VMMDEVREQ_VIDEMODE_SUPPORTED           = 52,
+	VMMDEVREQ_GET_HEIGHT_REDUCTION         = 53,
+	/**
+	 * @VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2:
+	 * Retrieve a display resize request sent by the host.
+	 *
+	 * Queries a display resize request sent from the host.  If the
+	 * event_ack member is sent to true and there is an unqueried request
+	 * available for one of the virtual display then that request will
+	 * be returned.  If several displays have unqueried requests the lowest
+	 * numbered display will be chosen first.  Only the most recent unseen
+	 * request for each display is remembered.
+	 * If event_ack is set to false, the last host request queried with
+	 * event_ack set is resent, or failing that the most recent received
+	 * from the host.  If no host request was ever received then all zeros
+	 * are returned.
+	 */
+	VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2      = 54,
+	VMMDEVREQ_REPORT_GUEST_CAPABILITIES    = 55,
+	VMMDEVREQ_SET_GUEST_CAPABILITIES       = 56,
+	VMMDEVREQ_VIDEMODE_SUPPORTED2          = 57, /* since version 3.2.0 */
+	VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX     = 80, /* since version 4.2.4 */
+	VMMDEVREQ_HGCM_CONNECT                 = 60,
+	VMMDEVREQ_HGCM_DISCONNECT              = 61,
+	VMMDEVREQ_HGCM_CALL32                  = 62,
+	VMMDEVREQ_HGCM_CALL64                  = 63,
+	VMMDEVREQ_HGCM_CANCEL                  = 64,
+	VMMDEVREQ_HGCM_CANCEL2                 = 65,
+	VMMDEVREQ_VIDEO_ACCEL_ENABLE           = 70,
+	VMMDEVREQ_VIDEO_ACCEL_FLUSH            = 71,
+	VMMDEVREQ_VIDEO_SET_VISIBLE_REGION     = 72,
+	VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ      = 73,
+	VMMDEVREQ_QUERY_CREDENTIALS            = 100,
+	VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT = 101,
+	VMMDEVREQ_REPORT_GUEST_STATS           = 110,
+	VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ    = 111,
+	VMMDEVREQ_GET_STATISTICS_CHANGE_REQ    = 112,
+	VMMDEVREQ_CHANGE_MEMBALLOON            = 113,
+	VMMDEVREQ_GET_VRDPCHANGE_REQ           = 150,
+	VMMDEVREQ_LOG_STRING                   = 200,
+	VMMDEVREQ_GET_CPU_HOTPLUG_REQ          = 210,
+	VMMDEVREQ_SET_CPU_HOTPLUG_STATUS       = 211,
+	VMMDEVREQ_REGISTER_SHARED_MODULE       = 212,
+	VMMDEVREQ_UNREGISTER_SHARED_MODULE     = 213,
+	VMMDEVREQ_CHECK_SHARED_MODULES         = 214,
+	VMMDEVREQ_GET_PAGE_SHARING_STATUS      = 215,
+	VMMDEVREQ_DEBUG_IS_PAGE_SHARED         = 216,
+	VMMDEVREQ_GET_SESSION_ID               = 217, /* since version 3.2.8 */
+	VMMDEVREQ_WRITE_COREDUMP               = 218,
+	VMMDEVREQ_GUEST_HEARTBEAT              = 219,
+	VMMDEVREQ_HEARTBEAT_CONFIGURE          = 220,
+	/* Ensure the enum is a 32 bit data-type */
+	VMMDEVREQ_SIZEHACK                     = 0x7fffffff
+};
+
+#if __BITS_PER_LONG == 64
+#define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL64
+#else
+#define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL32
+#endif
+
+/** HGCM service location types. */
+enum vmmdev_hgcm_service_location_type {
+	VMMDEV_HGCM_LOC_INVALID    = 0,
+	VMMDEV_HGCM_LOC_LOCALHOST  = 1,
+	VMMDEV_HGCM_LOC_LOCALHOST_EXISTING = 2,
+	/* Ensure the enum is a 32 bit data-type */
+	VMMDEV_HGCM_LOC_SIZEHACK   = 0x7fffffff
+};
+
+/** HGCM host service location. */
+struct vmmdev_hgcm_service_location_localhost {
+	/** Service name */
+	char service_name[128];
+};
+VMMDEV_ASSERT_SIZE(vmmdev_hgcm_service_location_localhost, 128);
+
+/** HGCM service location. */
+struct vmmdev_hgcm_service_location {
+	/** Type of the location. */
+	enum vmmdev_hgcm_service_location_type type;
+
+	union {
+		struct vmmdev_hgcm_service_location_localhost localhost;
+	} u;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_hgcm_service_location, 128 + 4);
+
+/** HGCM function parameter type. */
+enum vmmdev_hgcm_function_parameter_type {
+	VMMDEV_HGCM_PARM_TYPE_INVALID            = 0,
+	VMMDEV_HGCM_PARM_TYPE_32BIT              = 1,
+	VMMDEV_HGCM_PARM_TYPE_64BIT              = 2,
+	/** Deprecated Doesn't work, use PAGELIST. */
+	VMMDEV_HGCM_PARM_TYPE_PHYSADDR           = 3,
+	/** In and Out, user-memory */
+	VMMDEV_HGCM_PARM_TYPE_LINADDR            = 4,
+	/** In, user-memory  (read;  host<-guest) */
+	VMMDEV_HGCM_PARM_TYPE_LINADDR_IN         = 5,
+	/** Out, user-memory (write; host->guest) */
+	VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT        = 6,
+	/** In and Out, kernel-memory */
+	VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL     = 7,
+	/** In, kernel-memory  (read;  host<-guest) */
+	VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN  = 8,
+	/** Out, kernel-memory (write; host->guest) */
+	VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT = 9,
+	/** Physical addresses of locked pages for a buffer. */
+	VMMDEV_HGCM_PARM_TYPE_PAGELIST           = 10,
+	/* Ensure the enum is a 32 bit data-type */
+	VMMDEV_HGCM_PARM_TYPE_SIZEHACK           = 0x7fffffff
+};
+
+/** HGCM function parameter, 32-bit client. */
+struct vmmdev_hgcm_function_parameter32 {
+	enum vmmdev_hgcm_function_parameter_type type;
+	union {
+		__u32 value32;
+		__u64 value64;
+		struct {
+			__u32 size;
+			union {
+				__u32 phys_addr;
+				__u32 linear_addr;
+			} u;
+		} pointer;
+		struct {
+			/** Size of the buffer described by the page list. */
+			__u32 size;
+			/** Relative to the request header. */
+			__u32 offset;
+		} page_list;
+	} u;
+} __packed;
+VMMDEV_ASSERT_SIZE(vmmdev_hgcm_function_parameter32, 4 + 8);
+
+/** HGCM function parameter, 64-bit client. */
+struct vmmdev_hgcm_function_parameter64 {
+	enum vmmdev_hgcm_function_parameter_type type;
+	union {
+		__u32 value32;
+		__u64 value64;
+		struct {
+			__u32 size;
+			union {
+				__u64 phys_addr;
+				__u64 linear_addr;
+			} u;
+		} __packed pointer;
+		struct {
+			/** Size of the buffer described by the page list. */
+			__u32 size;
+			/** Relative to the request header. */
+			__u32 offset;
+		} page_list;
+	} __packed u;
+} __packed;
+VMMDEV_ASSERT_SIZE(vmmdev_hgcm_function_parameter64, 4 + 12);
+
+#if __BITS_PER_LONG == 64
+#define vmmdev_hgcm_function_parameter vmmdev_hgcm_function_parameter64
+#else
+#define vmmdev_hgcm_function_parameter vmmdev_hgcm_function_parameter32
+#endif
+
+#define VMMDEV_HGCM_F_PARM_DIRECTION_NONE      0x00000000U
+#define VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST   0x00000001U
+#define VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST 0x00000002U
+#define VMMDEV_HGCM_F_PARM_DIRECTION_BOTH      0x00000003U
+
+/**
+ * struct vmmdev_hgcm_pagelist - VMMDEV_HGCM_PARM_TYPE_PAGELIST parameters
+ * point to this structure to actually describe the buffer.
+ */
+struct vmmdev_hgcm_pagelist {
+	__u32 flags;             /** VMMDEV_HGCM_F_PARM_*. */
+	__u16 offset_first_page; /** Data offset in the first page. */
+	__u16 page_count;        /** Number of pages. */
+	__u64 pages[1];          /** Page addresses. */
+};
+VMMDEV_ASSERT_SIZE(vmmdev_hgcm_pagelist, 4 + 2 + 2 + 8);
+
+#endif
diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h
new file mode 100644
index 0000000..612f0c7
--- /dev/null
+++ b/include/uapi/linux/vboxguest.h
@@ -0,0 +1,330 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
+/*
+ * VBoxGuest - VirtualBox Guest Additions Driver Interface.
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ */
+
+#ifndef __UAPI_VBOXGUEST_H__
+#define __UAPI_VBOXGUEST_H__
+
+#include <asm/bitsperlong.h>
+#include <linux/ioctl.h>
+#include <linux/vbox_err.h>
+#include <linux/vbox_vmmdev_types.h>
+
+/* Version of vbg_ioctl_hdr structure. */
+#define VBG_IOCTL_HDR_VERSION		0x10001
+/* Default request type.  Use this for non-VMMDev requests. */
+#define VBG_IOCTL_HDR_TYPE_DEFAULT		0
+
+/**
+ * Common ioctl header.
+ *
+ * This is a mirror of vmmdev_request_header to prevent duplicating data and
+ * needing to verify things multiple times.
+ */
+struct vbg_ioctl_hdr {
+	/** IN: The request input size, and output size if size_out is zero. */
+	__u32 size_in;
+	/** IN: Structure version (VBG_IOCTL_HDR_VERSION) */
+	__u32 version;
+	/** IN: The VMMDev request type or VBG_IOCTL_HDR_TYPE_DEFAULT. */
+	__u32 type;
+	/**
+	 * OUT: The VBox status code of the operation, out direction only.
+	 * This is a VINF_ or VERR_ value as defined in vbox_err.h.
+	 */
+	__s32 rc;
+	/** IN: Output size. Set to zero to use size_in as output size. */
+	__u32 size_out;
+	/** Reserved, MBZ. */
+	__u32 reserved;
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_hdr, 24);
+
+
+/*
+ * The VBoxGuest I/O control version.
+ *
+ * As usual, the high word contains the major version and changes to it
+ * signifies incompatible changes.
+ *
+ * The lower word is the minor version number, it is increased when new
+ * functions are added or existing changed in a backwards compatible manner.
+ */
+#define VBG_IOC_VERSION		0x00010000u
+
+/**
+ * VBG_IOCTL_DRIVER_VERSION_INFO data structure
+ *
+ * Note VBG_IOCTL_DRIVER_VERSION_INFO may switch the session to a backwards
+ * compatible interface version if uClientVersion indicates older client code.
+ */
+struct vbg_ioctl_driver_version_info {
+	/** The header. */
+	struct vbg_ioctl_hdr hdr;
+	union {
+		struct {
+			/** Requested interface version (VBG_IOC_VERSION). */
+			__u32 req_version;
+			/**
+			 * Minimum interface version number (typically the
+			 * major version part of VBG_IOC_VERSION).
+			 */
+			__u32 min_version;
+			/** Reserved, MBZ. */
+			__u32 reserved1;
+			/** Reserved, MBZ. */
+			__u32 reserved2;
+		} in;
+		struct {
+			/** Version for this session (typ. VBG_IOC_VERSION). */
+			__u32 session_version;
+			/** Version of the IDC interface (VBG_IOC_VERSION). */
+			__u32 driver_version;
+			/** The SVN revision of the driver, or 0. */
+			__u32 driver_revision;
+			/** Reserved \#1 (zero until defined). */
+			__u32 reserved1;
+			/** Reserved \#2 (zero until defined). */
+			__u32 reserved2;
+		} out;
+	} u;
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_driver_version_info, 24 + 20);
+
+#define VBG_IOCTL_DRIVER_VERSION_INFO \
+	_IOWR('V', 0, struct vbg_ioctl_driver_version_info)
+
+
+/* IOCTL to perform a VMM Device request less than 1KB in size. */
+#define VBG_IOCTL_VMMDEV_REQUEST(s)	_IOC(_IOC_READ | _IOC_WRITE, 'V', 2, s)
+
+
+/* IOCTL to perform a VMM Device request larger then 1KB. */
+#define VBG_IOCTL_VMMDEV_REQUEST_BIG	_IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0)
+
+
+/** VBG_IOCTL_HGCM_CONNECT data structure. */
+struct vbg_ioctl_hgcm_connect {
+	struct vbg_ioctl_hdr hdr;
+	union {
+		struct {
+			struct vmmdev_hgcm_service_location loc;
+		} in;
+		struct {
+			__u32 client_id;
+		} out;
+	} u;
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_hgcm_connect, 24 + 132);
+
+#define VBG_IOCTL_HGCM_CONNECT \
+	_IOWR('V', 4, struct vbg_ioctl_hgcm_connect)
+
+
+/** VBG_IOCTL_HGCM_DISCONNECT data structure. */
+struct vbg_ioctl_hgcm_disconnect {
+	struct vbg_ioctl_hdr hdr;
+	union {
+		struct {
+			__u32 client_id;
+		} in;
+	} u;
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_hgcm_disconnect, 24 + 4);
+
+#define VBG_IOCTL_HGCM_DISCONNECT \
+	_IOWR('V', 5, struct vbg_ioctl_hgcm_disconnect)
+
+
+/** VBG_IOCTL_HGCM_CALL data structure. */
+struct vbg_ioctl_hgcm_call {
+	/** The header. */
+	struct vbg_ioctl_hdr hdr;
+	/** Input: The id of the caller. */
+	__u32 client_id;
+	/** Input: Function number. */
+	__u32 function;
+	/**
+	 * Input: How long to wait (milliseconds) for completion before
+	 * cancelling the call. Set to -1 to wait indefinitely.
+	 */
+	__u32 timeout_ms;
+	/** Interruptable flag, ignored for userspace calls. */
+	__u8 interruptible;
+	/** Explicit padding, MBZ. */
+	__u8 reserved;
+	/**
+	 * Input: How many parameters following this structure.
+	 *
+	 * The parameters are either HGCMFunctionParameter64 or 32,
+	 * depending on whether we're receiving a 64-bit or 32-bit request.
+	 *
+	 * The current maximum is 61 parameters (given a 1KB max request size,
+	 * and a 64-bit parameter size of 16 bytes).
+	 */
+	__u16 parm_count;
+	/*
+	 * Parameters follow in form:
+	 * struct hgcm_function_parameter<32|64> parms[parm_count]
+	 */
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_hgcm_call, 24 + 16);
+
+#define VBG_IOCTL_HGCM_CALL_32(s)	_IOC(_IOC_READ | _IOC_WRITE, 'V', 6, s)
+#define VBG_IOCTL_HGCM_CALL_64(s)	_IOC(_IOC_READ | _IOC_WRITE, 'V', 7, s)
+#if __BITS_PER_LONG == 64
+#define VBG_IOCTL_HGCM_CALL(s)		VBG_IOCTL_HGCM_CALL_64(s)
+#else
+#define VBG_IOCTL_HGCM_CALL(s)		VBG_IOCTL_HGCM_CALL_32(s)
+#endif
+
+
+/** VBG_IOCTL_LOG data structure. */
+struct vbg_ioctl_log {
+	/** The header. */
+	struct vbg_ioctl_hdr hdr;
+	union {
+		struct {
+			/**
+			 * The log message, this may be zero terminated. If it
+			 * is not zero terminated then the length is determined
+			 * from the input size.
+			 */
+			char msg[1];
+		} in;
+	} u;
+};
+
+#define VBG_IOCTL_LOG(s)		_IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s)
+
+
+/** VBG_IOCTL_WAIT_FOR_EVENTS data structure. */
+struct vbg_ioctl_wait_for_events {
+	/** The header. */
+	struct vbg_ioctl_hdr hdr;
+	union {
+		struct {
+			/** Timeout in milliseconds. */
+			__u32 timeout_ms;
+			/** Events to wait for. */
+			__u32 events;
+		} in;
+		struct {
+			/** Events that occurred. */
+			__u32 events;
+		} out;
+	} u;
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_wait_for_events, 24 + 8);
+
+#define VBG_IOCTL_WAIT_FOR_EVENTS \
+	_IOWR('V', 10, struct vbg_ioctl_wait_for_events)
+
+
+/*
+ * IOCTL to VBoxGuest to interrupt (cancel) any pending
+ * VBG_IOCTL_WAIT_FOR_EVENTS and return.
+ *
+ * Handled inside the vboxguest driver and not seen by the host at all.
+ * After calling this, VBG_IOCTL_WAIT_FOR_EVENTS should no longer be called in
+ * the same session. Any VBOXGUEST_IOCTL_WAITEVENT calls in the same session
+ * done after calling this will directly exit with -EINTR.
+ */
+#define VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS \
+	_IOWR('V', 11, struct vbg_ioctl_hdr)
+
+
+/** VBG_IOCTL_CHANGE_FILTER_MASK data structure. */
+struct vbg_ioctl_change_filter {
+	/** The header. */
+	struct vbg_ioctl_hdr hdr;
+	union {
+		struct {
+			/** Flags to set. */
+			__u32 or_mask;
+			/** Flags to remove. */
+			__u32 not_mask;
+		} in;
+	} u;
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_change_filter, 24 + 8);
+
+/* IOCTL to VBoxGuest to control the event filter mask. */
+#define VBG_IOCTL_CHANGE_FILTER_MASK \
+	_IOWR('V', 12, struct vbg_ioctl_change_filter)
+
+
+/** VBG_IOCTL_CHANGE_GUEST_CAPABILITIES data structure. */
+struct vbg_ioctl_set_guest_caps {
+	/** The header. */
+	struct vbg_ioctl_hdr hdr;
+	union {
+		struct {
+			/** Capabilities to set (VMMDEV_GUEST_SUPPORTS_XXX). */
+			__u32 or_mask;
+			/** Capabilities to drop (VMMDEV_GUEST_SUPPORTS_XXX). */
+			__u32 not_mask;
+		} in;
+		struct {
+			/** Capabilities held by the session after the call. */
+			__u32 session_caps;
+			/** Capabilities for all the sessions after the call. */
+			__u32 global_caps;
+		} out;
+	} u;
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_set_guest_caps, 24 + 8);
+
+#define VBG_IOCTL_CHANGE_GUEST_CAPABILITIES \
+	_IOWR('V', 14, struct vbg_ioctl_set_guest_caps)
+
+
+/** VBG_IOCTL_CHECK_BALLOON data structure. */
+struct vbg_ioctl_check_balloon {
+	/** The header. */
+	struct vbg_ioctl_hdr hdr;
+	union {
+		struct {
+			/** The size of the balloon in chunks of 1MB. */
+			__u32 balloon_chunks;
+			/**
+			 * false = handled in R0, no further action required.
+			 *  true = allocate balloon memory in R3.
+			 */
+			__u8 handle_in_r3;
+			/** Explicit padding, MBZ. */
+			__u8 padding[3];
+		} out;
+	} u;
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_check_balloon, 24 + 8);
+
+/*
+ * IOCTL to check memory ballooning.
+ *
+ * The guest kernel module will ask the host for the current size of the
+ * balloon and adjust the size. Or it will set handle_in_r3 = true and R3 is
+ * responsible for allocating memory and calling VBG_IOCTL_CHANGE_BALLOON.
+ */
+#define VBG_IOCTL_CHECK_BALLOON \
+	_IOWR('V', 17, struct vbg_ioctl_check_balloon)
+
+
+/** VBG_IOCTL_WRITE_CORE_DUMP data structure. */
+struct vbg_ioctl_write_coredump {
+	struct vbg_ioctl_hdr hdr;
+	union {
+		struct {
+			__u32 flags; /** Flags (reserved, MBZ). */
+		} in;
+	} u;
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_write_coredump, 24 + 4);
+
+#define VBG_IOCTL_WRITE_CORE_DUMP \
+	_IOWR('V', 19, struct vbg_ioctl_write_coredump)
+
+#endif
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index 9826b9a6..9fad6af 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -203,6 +203,10 @@ int main(void)
 	DEVID_FIELD(hda_device_id, rev_id);
 	DEVID_FIELD(hda_device_id, api_version);
 
+	DEVID(sdw_device_id);
+	DEVID_FIELD(sdw_device_id, mfg_id);
+	DEVID_FIELD(sdw_device_id, part_id);
+
 	DEVID(fsl_mc_device_id);
 	DEVID_FIELD(fsl_mc_device_id, vendor);
 	DEVID_FIELD(fsl_mc_device_id, obj_type);
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 6ef6e63..b9beeaa 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -1289,6 +1289,21 @@ static int do_hda_entry(const char *filename, void *symval, char *alias)
 }
 ADD_TO_DEVTABLE("hdaudio", hda_device_id, do_hda_entry);
 
+/* Looks like: sdw:mNpN */
+static int do_sdw_entry(const char *filename, void *symval, char *alias)
+{
+	DEF_FIELD(symval, sdw_device_id, mfg_id);
+	DEF_FIELD(symval, sdw_device_id, part_id);
+
+	strcpy(alias, "sdw:");
+	ADD(alias, "m", mfg_id != 0, mfg_id);
+	ADD(alias, "p", part_id != 0, part_id);
+
+	add_wildcard(alias);
+	return 1;
+}
+ADD_TO_DEVTABLE("sdw", sdw_device_id, do_sdw_entry);
+
 /* Looks like: fsl-mc:vNdN */
 static int do_fsl_mc_entry(const char *filename, void *symval,
 			   char *alias)
diff --git a/security/Kconfig b/security/Kconfig
index a623d13..5ea8914 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -153,6 +153,7 @@
 	bool "Harden memory copies between kernel and userspace"
 	depends on HAVE_HARDENED_USERCOPY_ALLOCATOR
 	select BUG
+	imply STRICT_DEVMEM
 	help
 	  This option checks for obviously wrong memory regions when
 	  copying memory to/from the kernel (via copy_to_user() and
diff --git a/tools/hv/Makefile b/tools/hv/Makefile
index 3150381..1139d71 100644
--- a/tools/hv/Makefile
+++ b/tools/hv/Makefile
@@ -7,9 +7,30 @@
 
 CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
 
-all: hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
+sbindir ?= /usr/sbin
+libexecdir ?= /usr/libexec
+sharedstatedir ?= /var/lib
+
+ALL_PROGRAMS := hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
+
+ALL_SCRIPTS := hv_get_dhcp_info.sh hv_get_dns_info.sh hv_set_ifconfig.sh
+
+all: $(ALL_PROGRAMS)
+
 %: %.c
 	$(CC) $(CFLAGS) -o $@ $^
 
 clean:
 	$(RM) hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
+
+install: all
+	install -d -m 755 $(DESTDIR)$(sbindir); \
+	install -d -m 755 $(DESTDIR)$(libexecdir)/hypervkvpd; \
+	install -d -m 755 $(DESTDIR)$(sharedstatedir); \
+	for program in $(ALL_PROGRAMS); do \
+		install $$program -m 755 $(DESTDIR)$(sbindir);	\
+	done; \
+	install -m 755 lsvmbus $(DESTDIR)$(sbindir); \
+	for script in $(ALL_SCRIPTS); do \
+		install $$script -m 755 $(DESTDIR)$(libexecdir)/hypervkvpd/$${script%.sh}; \
+	done