aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/drm.tmpl39
-rw-r--r--Documentation/devicetree/bindings/iommu/rockchip,iommu.txt26
-rw-r--r--Documentation/devicetree/bindings/video/rockchip-drm.txt19
-rw-r--r--Documentation/devicetree/bindings/video/rockchip-vop.txt58
-rw-r--r--MAINTAINERS38
-rw-r--r--Makefile2
-rw-r--r--arch/s390/kernel/nmi.c8
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c2
-rw-r--r--block/bio-integrity.c13
-rw-r--r--drivers/acpi/video.c3
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/sata_fsl.c2
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c168
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c7
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c25
-rw-r--r--drivers/gpu/drm/drm_edid.c187
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c134
-rw-r--r--drivers/gpu/drm/drm_fops.c1
-rw-r--r--drivers/gpu/drm/drm_irq.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c4
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c24
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c92
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h4
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/cikd.h8
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c14
-rw-r--r--drivers/gpu/drm/radeon/r100.c10
-rw-r--r--drivers/gpu/drm/radeon/r200.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon.h43
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c130
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c107
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c74
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/sid.h8
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig17
-rw-r--r--drivers/gpu/drm/rockchip/Makefile8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c551
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h68
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c201
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.h28
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c210
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h21
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c294
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h54
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c1455
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h201
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c39
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c2
-rw-r--r--drivers/i2c/busses/i2c-cadence.c11
-rw-r--r--drivers/i2c/busses/i2c-davinci.c8
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c10
-rw-r--r--drivers/input/evdev.c2
-rw-r--r--drivers/iommu/Kconfig12
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/rockchip-iommu.c1038
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c6
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-core.c10
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c2
-rw-r--r--drivers/media/usb/s2255/s2255drv.c2
-rw-r--r--drivers/net/bonding/bond_netlink.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c8
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c96
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c18
-rw-r--r--drivers/net/xen-netfront.c5
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/pci/host/pci-tegra.c28
-rw-r--r--drivers/watchdog/s3c2410_wdt.c2
-rw-r--r--fs/fat/namei_vfat.c20
-rw-r--r--fs/jbd2/journal.c5
-rw-r--r--include/drm/drm_crtc.h38
-rw-r--r--include/drm/drm_displayid.h76
-rw-r--r--include/drm/drm_dp_mst_helper.h4
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_fb_helper.h6
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h9
-rw-r--r--include/uapi/linux/Kbuild2
-rw-r--r--ipc/sem.c15
-rw-r--r--kernel/sched/core.c8
-rw-r--r--lib/genalloc.c1
-rw-r--r--lib/show_mem.c2
-rw-r--r--mm/frontswap.c4
-rw-r--r--mm/memory.c26
-rw-r--r--mm/mmap.c10
-rw-r--r--mm/rmap.c6
-rw-r--r--mm/slab.c2
-rw-r--r--mm/vmpressure.c8
-rw-r--r--net/core/rtnetlink.c1
-rw-r--r--security/keys/internal.h1
-rw-r--r--security/keys/keyctl.c56
-rw-r--r--security/keys/keyring.c10
-rw-r--r--security/keys/request_key.c2
-rw-r--r--security/keys/request_key_auth.c1
-rw-r--r--sound/pci/hda/patch_realtek.c2
128 files changed, 5635 insertions, 596 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 56e2a9b65c68..4b592ffbafee 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -1947,10 +1947,16 @@ void intel_crt_init(struct drm_device *dev)
1947 and then retrieves a list of modes by calling the connector 1947 and then retrieves a list of modes by calling the connector
1948 <methodname>get_modes</methodname> helper operation. 1948 <methodname>get_modes</methodname> helper operation.
1949 </para> 1949 </para>
1950 <para>
1951 If the helper operation returns no mode, and if the connector status
1952 is connector_status_connected, standard VESA DMT modes up to
1953 1024x768 are automatically added to the modes list by a call to
1954 <function>drm_add_modes_noedid</function>.
1955 </para>
1950 <para> 1956 <para>
1951 The function filters out modes larger than 1957 The function then filters out modes larger than
1952 <parameter>max_width</parameter> and <parameter>max_height</parameter> 1958 <parameter>max_width</parameter> and <parameter>max_height</parameter>
1953 if specified. It then calls the optional connector 1959 if specified. It finally calls the optional connector
1954 <methodname>mode_valid</methodname> helper operation for each mode in 1960 <methodname>mode_valid</methodname> helper operation for each mode in
1955 the probed list to check whether the mode is valid for the connector. 1961 the probed list to check whether the mode is valid for the connector.
1956 </para> 1962 </para>
@@ -2090,12 +2096,20 @@ void intel_crt_init(struct drm_device *dev)
2090 <synopsis>int (*get_modes)(struct drm_connector *connector);</synopsis> 2096 <synopsis>int (*get_modes)(struct drm_connector *connector);</synopsis>
2091 <para> 2097 <para>
2092 Fill the connector's <structfield>probed_modes</structfield> list 2098 Fill the connector's <structfield>probed_modes</structfield> list
2093 by parsing EDID data with <function>drm_add_edid_modes</function> or 2099 by parsing EDID data with <function>drm_add_edid_modes</function>,
2094 calling <function>drm_mode_probed_add</function> directly for every 2100 adding standard VESA DMT modes with <function>drm_add_modes_noedid</function>,
2101 or calling <function>drm_mode_probed_add</function> directly for every
2095 supported mode and return the number of modes it has detected. This 2102 supported mode and return the number of modes it has detected. This
2096 operation is mandatory. 2103 operation is mandatory.
2097 </para> 2104 </para>
2098 <para> 2105 <para>
2106 Note that the caller function will automatically add standard VESA
2107 DMT modes up to 1024x768 if the <methodname>get_modes</methodname>
2108 helper operation returns no mode and if the connector status is
2109 connector_status_connected. There is no need to call
2110 <function>drm_add_edid_modes</function> manually in that case.
2111 </para>
2112 <para>
2099 When adding modes manually the driver creates each mode with a call to 2113 When adding modes manually the driver creates each mode with a call to
2100 <function>drm_mode_create</function> and must fill the following fields. 2114 <function>drm_mode_create</function> and must fill the following fields.
2101 <itemizedlist> 2115 <itemizedlist>
@@ -2292,7 +2306,7 @@ void intel_crt_init(struct drm_device *dev)
2292 <function>drm_helper_probe_single_connector_modes</function>. 2306 <function>drm_helper_probe_single_connector_modes</function>.
2293 </para> 2307 </para>
2294 <para> 2308 <para>
2295 When parsing EDID data, <function>drm_add_edid_modes</function> fill the 2309 When parsing EDID data, <function>drm_add_edid_modes</function> fills the
2296 connector <structfield>display_info</structfield> 2310 connector <structfield>display_info</structfield>
2297 <structfield>width_mm</structfield> and 2311 <structfield>width_mm</structfield> and
2298 <structfield>height_mm</structfield> fields. When creating modes 2312 <structfield>height_mm</structfield> fields. When creating modes
@@ -2412,6 +2426,10 @@ void intel_crt_init(struct drm_device *dev)
2412!Edrivers/gpu/drm/drm_plane_helper.c 2426!Edrivers/gpu/drm/drm_plane_helper.c
2413!Pdrivers/gpu/drm/drm_plane_helper.c overview 2427!Pdrivers/gpu/drm/drm_plane_helper.c overview
2414 </sect2> 2428 </sect2>
2429 <sect2>
2430 <title>Tile group</title>
2431!Pdrivers/gpu/drm/drm_crtc.c Tile group
2432 </sect2>
2415 </sect1> 2433 </sect1>
2416 2434
2417 <!-- Internals: kms properties --> 2435 <!-- Internals: kms properties -->
@@ -2546,8 +2564,8 @@ void intel_crt_init(struct drm_device *dev)
2546 <td valign="top" >Description/Restrictions</td> 2564 <td valign="top" >Description/Restrictions</td>
2547 </tr> 2565 </tr>
2548 <tr> 2566 <tr>
2549 <td rowspan="23" valign="top" >DRM</td> 2567 <td rowspan="25" valign="top" >DRM</td>
2550 <td rowspan="3" valign="top" >Generic</td> 2568 <td rowspan="4" valign="top" >Generic</td>
2551 <td valign="top" >“EDID”</td> 2569 <td valign="top" >“EDID”</td>
2552 <td valign="top" >BLOB | IMMUTABLE</td> 2570 <td valign="top" >BLOB | IMMUTABLE</td>
2553 <td valign="top" >0</td> 2571 <td valign="top" >0</td>
@@ -2569,6 +2587,13 @@ void intel_crt_init(struct drm_device *dev)
2569 <td valign="top" >Contains topology path to a connector.</td> 2587 <td valign="top" >Contains topology path to a connector.</td>
2570 </tr> 2588 </tr>
2571 <tr> 2589 <tr>
2590 <td valign="top" >“TILE”</td>
2591 <td valign="top" >BLOB | IMMUTABLE</td>
2592 <td valign="top" >0</td>
2593 <td valign="top" >Connector</td>
2594 <td valign="top" >Contains tiling information for a connector.</td>
2595 </tr>
2596 <tr>
2572 <td rowspan="1" valign="top" >Plane</td> 2597 <td rowspan="1" valign="top" >Plane</td>
2573 <td valign="top" >“type”</td> 2598 <td valign="top" >“type”</td>
2574 <td valign="top" >ENUM | IMMUTABLE</td> 2599 <td valign="top" >ENUM | IMMUTABLE</td>
diff --git a/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt
new file mode 100644
index 000000000000..9a55ac3735e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt
@@ -0,0 +1,26 @@
1Rockchip IOMMU
2==============
3
4A Rockchip DRM iommu translates io virtual addresses to physical addresses for
5its master device. Each slave device is bound to a single master device, and
6shares its clocks, power domain and irq.
7
8Required properties:
9- compatible : Should be "rockchip,iommu"
10- reg : Address space for the configuration registers
11- interrupts : Interrupt specifier for the IOMMU instance
12- interrupt-names : Interrupt name for the IOMMU instance
13- #iommu-cells : Should be <0>. This indicates the iommu is a
14 "single-master" device, and needs no additional information
15 to associate with its master device. See:
16 Documentation/devicetree/bindings/iommu/iommu.txt
17
18Example:
19
20 vopl_mmu: iommu@ff940300 {
21 compatible = "rockchip,iommu";
22 reg = <0xff940300 0x100>;
23 interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
24 interrupt-names = "vopl_mmu";
25 #iommu-cells = <0>;
26 };
diff --git a/Documentation/devicetree/bindings/video/rockchip-drm.txt b/Documentation/devicetree/bindings/video/rockchip-drm.txt
new file mode 100644
index 000000000000..7fff582495a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/rockchip-drm.txt
@@ -0,0 +1,19 @@
1Rockchip DRM master device
2================================
3
4The Rockchip DRM master device is a virtual device needed to list all
5vop devices or other display interface nodes that comprise the
6graphics subsystem.
7
8Required properties:
9- compatible: Should be "rockchip,display-subsystem"
10- ports: Should contain a list of phandles pointing to display interface port
11 of vop devices. vop definitions as defined in
12 Documentation/devicetree/bindings/video/rockchip-vop.txt
13
14example:
15
16display-subsystem {
17 compatible = "rockchip,display-subsystem";
18 ports = <&vopl_out>, <&vopb_out>;
19};
diff --git a/Documentation/devicetree/bindings/video/rockchip-vop.txt b/Documentation/devicetree/bindings/video/rockchip-vop.txt
new file mode 100644
index 000000000000..d15351f2313d
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/rockchip-vop.txt
@@ -0,0 +1,58 @@
1device-tree bindings for rockchip soc display controller (vop)
2
3VOP (Visual Output Processor) is the Display Controller for the Rockchip
4series of SoCs which transfers the image data from a video memory
5buffer to an external LCD interface.
6
7Required properties:
8- compatible: value should be one of the following
9 "rockchip,rk3288-vop";
10
11- interrupts: should contain a list of all VOP IP block interrupts in the
12 order: VSYNC, LCD_SYSTEM. The interrupt specifier
13 format depends on the interrupt controller used.
14
15- clocks: must include clock specifiers corresponding to entries in the
16 clock-names property.
17
18- clock-names: Must contain
19 aclk_vop: for ddr buffer transfer.
20 hclk_vop: for ahb bus to R/W the phy regs.
21 dclk_vop: pixel clock.
22
23- resets: Must contain an entry for each entry in reset-names.
24 See ../reset/reset.txt for details.
25- reset-names: Must include the following entries:
26 - axi
27 - ahb
28 - dclk
29
30- iommus: required a iommu node
31
32- port: A port node with endpoint definitions as defined in
33 Documentation/devicetree/bindings/media/video-interfaces.txt.
34
35Example:
36SoC specific DT entry:
37 vopb: vopb@ff930000 {
38 compatible = "rockchip,rk3288-vop";
39 reg = <0xff930000 0x19c>;
40 interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
41 clocks = <&cru ACLK_VOP0>, <&cru DCLK_VOP0>, <&cru HCLK_VOP0>;
42 clock-names = "aclk_vop", "dclk_vop", "hclk_vop";
43 resets = <&cru SRST_LCDC1_AXI>, <&cru SRST_LCDC1_AHB>, <&cru SRST_LCDC1_DCLK>;
44 reset-names = "axi", "ahb", "dclk";
45 iommus = <&vopb_mmu>;
46 vopb_out: port {
47 #address-cells = <1>;
48 #size-cells = <0>;
49 vopb_out_edp: endpoint@0 {
50 reg = <0>;
51 remote-endpoint=<&edp_in_vopb>;
52 };
53 vopb_out_hdmi: endpoint@1 {
54 reg = <1>;
55 remote-endpoint=<&hdmi_in_vopb>;
56 };
57 };
58 };
diff --git a/MAINTAINERS b/MAINTAINERS
index 55d3e9b93338..296c02d39c29 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1838,7 +1838,7 @@ F: include/net/ax25.h
1838F: net/ax25/ 1838F: net/ax25/
1839 1839
1840AZ6007 DVB DRIVER 1840AZ6007 DVB DRIVER
1841M: Mauro Carvalho Chehab <m.chehab@samsung.com> 1841M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
1842L: linux-media@vger.kernel.org 1842L: linux-media@vger.kernel.org
1843W: http://linuxtv.org 1843W: http://linuxtv.org
1844T: git git://linuxtv.org/media_tree.git 1844T: git git://linuxtv.org/media_tree.git
@@ -2208,7 +2208,7 @@ F: Documentation/filesystems/btrfs.txt
2208F: fs/btrfs/ 2208F: fs/btrfs/
2209 2209
2210BTTV VIDEO4LINUX DRIVER 2210BTTV VIDEO4LINUX DRIVER
2211M: Mauro Carvalho Chehab <m.chehab@samsung.com> 2211M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
2212L: linux-media@vger.kernel.org 2212L: linux-media@vger.kernel.org
2213W: http://linuxtv.org 2213W: http://linuxtv.org
2214T: git git://linuxtv.org/media_tree.git 2214T: git git://linuxtv.org/media_tree.git
@@ -2729,7 +2729,7 @@ F: drivers/media/common/cx2341x*
2729F: include/media/cx2341x* 2729F: include/media/cx2341x*
2730 2730
2731CX88 VIDEO4LINUX DRIVER 2731CX88 VIDEO4LINUX DRIVER
2732M: Mauro Carvalho Chehab <m.chehab@samsung.com> 2732M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
2733L: linux-media@vger.kernel.org 2733L: linux-media@vger.kernel.org
2734W: http://linuxtv.org 2734W: http://linuxtv.org
2735T: git git://linuxtv.org/media_tree.git 2735T: git git://linuxtv.org/media_tree.git
@@ -3419,7 +3419,7 @@ F: fs/ecryptfs/
3419EDAC-CORE 3419EDAC-CORE
3420M: Doug Thompson <dougthompson@xmission.com> 3420M: Doug Thompson <dougthompson@xmission.com>
3421M: Borislav Petkov <bp@alien8.de> 3421M: Borislav Petkov <bp@alien8.de>
3422M: Mauro Carvalho Chehab <m.chehab@samsung.com> 3422M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
3423L: linux-edac@vger.kernel.org 3423L: linux-edac@vger.kernel.org
3424W: bluesmoke.sourceforge.net 3424W: bluesmoke.sourceforge.net
3425S: Supported 3425S: Supported
@@ -3468,7 +3468,7 @@ S: Maintained
3468F: drivers/edac/e7xxx_edac.c 3468F: drivers/edac/e7xxx_edac.c
3469 3469
3470EDAC-GHES 3470EDAC-GHES
3471M: Mauro Carvalho Chehab <m.chehab@samsung.com> 3471M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
3472L: linux-edac@vger.kernel.org 3472L: linux-edac@vger.kernel.org
3473W: bluesmoke.sourceforge.net 3473W: bluesmoke.sourceforge.net
3474S: Maintained 3474S: Maintained
@@ -3496,21 +3496,21 @@ S: Maintained
3496F: drivers/edac/i5000_edac.c 3496F: drivers/edac/i5000_edac.c
3497 3497
3498EDAC-I5400 3498EDAC-I5400
3499M: Mauro Carvalho Chehab <m.chehab@samsung.com> 3499M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
3500L: linux-edac@vger.kernel.org 3500L: linux-edac@vger.kernel.org
3501W: bluesmoke.sourceforge.net 3501W: bluesmoke.sourceforge.net
3502S: Maintained 3502S: Maintained
3503F: drivers/edac/i5400_edac.c 3503F: drivers/edac/i5400_edac.c
3504 3504
3505EDAC-I7300 3505EDAC-I7300
3506M: Mauro Carvalho Chehab <m.chehab@samsung.com> 3506M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
3507L: linux-edac@vger.kernel.org 3507L: linux-edac@vger.kernel.org
3508W: bluesmoke.sourceforge.net 3508W: bluesmoke.sourceforge.net
3509S: Maintained 3509S: Maintained
3510F: drivers/edac/i7300_edac.c 3510F: drivers/edac/i7300_edac.c
3511 3511
3512EDAC-I7CORE 3512EDAC-I7CORE
3513M: Mauro Carvalho Chehab <m.chehab@samsung.com> 3513M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
3514L: linux-edac@vger.kernel.org 3514L: linux-edac@vger.kernel.org
3515W: bluesmoke.sourceforge.net 3515W: bluesmoke.sourceforge.net
3516S: Maintained 3516S: Maintained
@@ -3553,7 +3553,7 @@ S: Maintained
3553F: drivers/edac/r82600_edac.c 3553F: drivers/edac/r82600_edac.c
3554 3554
3555EDAC-SBRIDGE 3555EDAC-SBRIDGE
3556M: Mauro Carvalho Chehab <m.chehab@samsung.com> 3556M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
3557L: linux-edac@vger.kernel.org 3557L: linux-edac@vger.kernel.org
3558W: bluesmoke.sourceforge.net 3558W: bluesmoke.sourceforge.net
3559S: Maintained 3559S: Maintained
@@ -3613,7 +3613,7 @@ S: Maintained
3613F: drivers/net/ethernet/ibm/ehea/ 3613F: drivers/net/ethernet/ibm/ehea/
3614 3614
3615EM28XX VIDEO4LINUX DRIVER 3615EM28XX VIDEO4LINUX DRIVER
3616M: Mauro Carvalho Chehab <m.chehab@samsung.com> 3616M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
3617L: linux-media@vger.kernel.org 3617L: linux-media@vger.kernel.org
3618W: http://linuxtv.org 3618W: http://linuxtv.org
3619T: git git://linuxtv.org/media_tree.git 3619T: git git://linuxtv.org/media_tree.git
@@ -5979,7 +5979,7 @@ S: Maintained
5979F: drivers/media/radio/radio-maxiradio* 5979F: drivers/media/radio/radio-maxiradio*
5980 5980
5981MEDIA INPUT INFRASTRUCTURE (V4L/DVB) 5981MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
5982M: Mauro Carvalho Chehab <m.chehab@samsung.com> 5982M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
5983P: LinuxTV.org Project 5983P: LinuxTV.org Project
5984L: linux-media@vger.kernel.org 5984L: linux-media@vger.kernel.org
5985W: http://linuxtv.org 5985W: http://linuxtv.org
@@ -8030,7 +8030,7 @@ S: Odd Fixes
8030F: drivers/media/i2c/saa6588* 8030F: drivers/media/i2c/saa6588*
8031 8031
8032SAA7134 VIDEO4LINUX DRIVER 8032SAA7134 VIDEO4LINUX DRIVER
8033M: Mauro Carvalho Chehab <m.chehab@samsung.com> 8033M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
8034L: linux-media@vger.kernel.org 8034L: linux-media@vger.kernel.org
8035W: http://linuxtv.org 8035W: http://linuxtv.org
8036T: git git://linuxtv.org/media_tree.git 8036T: git git://linuxtv.org/media_tree.git
@@ -8488,7 +8488,7 @@ S: Maintained
8488F: drivers/media/radio/si4713/radio-usb-si4713.c 8488F: drivers/media/radio/si4713/radio-usb-si4713.c
8489 8489
8490SIANO DVB DRIVER 8490SIANO DVB DRIVER
8491M: Mauro Carvalho Chehab <m.chehab@samsung.com> 8491M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
8492L: linux-media@vger.kernel.org 8492L: linux-media@vger.kernel.org
8493W: http://linuxtv.org 8493W: http://linuxtv.org
8494T: git git://linuxtv.org/media_tree.git 8494T: git git://linuxtv.org/media_tree.git
@@ -8699,7 +8699,9 @@ S: Maintained
8699F: drivers/leds/leds-net48xx.c 8699F: drivers/leds/leds-net48xx.c
8700 8700
8701SOFTLOGIC 6x10 MPEG CODEC 8701SOFTLOGIC 6x10 MPEG CODEC
8702M: Ismael Luceno <ismael.luceno@corp.bluecherry.net> 8702M: Bluecherry Maintainers <maintainers@bluecherrydvr.com>
8703M: Andrey Utkin <andrey.utkin@corp.bluecherry.net>
8704M: Andrey Utkin <andrey.krieger.utkin@gmail.com>
8703L: linux-media@vger.kernel.org 8705L: linux-media@vger.kernel.org
8704S: Supported 8706S: Supported
8705F: drivers/media/pci/solo6x10/ 8707F: drivers/media/pci/solo6x10/
@@ -9173,7 +9175,7 @@ S: Maintained
9173F: drivers/media/i2c/tda9840* 9175F: drivers/media/i2c/tda9840*
9174 9176
9175TEA5761 TUNER DRIVER 9177TEA5761 TUNER DRIVER
9176M: Mauro Carvalho Chehab <m.chehab@samsung.com> 9178M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
9177L: linux-media@vger.kernel.org 9179L: linux-media@vger.kernel.org
9178W: http://linuxtv.org 9180W: http://linuxtv.org
9179T: git git://linuxtv.org/media_tree.git 9181T: git git://linuxtv.org/media_tree.git
@@ -9181,7 +9183,7 @@ S: Odd fixes
9181F: drivers/media/tuners/tea5761.* 9183F: drivers/media/tuners/tea5761.*
9182 9184
9183TEA5767 TUNER DRIVER 9185TEA5767 TUNER DRIVER
9184M: Mauro Carvalho Chehab <m.chehab@samsung.com> 9186M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
9185L: linux-media@vger.kernel.org 9187L: linux-media@vger.kernel.org
9186W: http://linuxtv.org 9188W: http://linuxtv.org
9187T: git git://linuxtv.org/media_tree.git 9189T: git git://linuxtv.org/media_tree.git
@@ -9493,7 +9495,7 @@ F: include/linux/shmem_fs.h
9493F: mm/shmem.c 9495F: mm/shmem.c
9494 9496
9495TM6000 VIDEO4LINUX DRIVER 9497TM6000 VIDEO4LINUX DRIVER
9496M: Mauro Carvalho Chehab <m.chehab@samsung.com> 9498M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
9497L: linux-media@vger.kernel.org 9499L: linux-media@vger.kernel.org
9498W: http://linuxtv.org 9500W: http://linuxtv.org
9499T: git git://linuxtv.org/media_tree.git 9501T: git git://linuxtv.org/media_tree.git
@@ -10314,7 +10316,7 @@ S: Maintained
10314F: arch/x86/kernel/cpu/mcheck/* 10316F: arch/x86/kernel/cpu/mcheck/*
10315 10317
10316XC2028/3028 TUNER DRIVER 10318XC2028/3028 TUNER DRIVER
10317M: Mauro Carvalho Chehab <m.chehab@samsung.com> 10319M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
10318L: linux-media@vger.kernel.org 10320L: linux-media@vger.kernel.org
10319W: http://linuxtv.org 10321W: http://linuxtv.org
10320T: git git://linuxtv.org/media_tree.git 10322T: git git://linuxtv.org/media_tree.git
diff --git a/Makefile b/Makefile
index ce70361f766e..fd80c6e9bc23 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 18 2PATCHLEVEL = 18
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc7 4EXTRAVERSION =
5NAME = Diseased Newt 5NAME = Diseased Newt
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index dd1c24ceda50..3f51cf4e8f02 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -54,12 +54,8 @@ void s390_handle_mcck(void)
54 */ 54 */
55 local_irq_save(flags); 55 local_irq_save(flags);
56 local_mcck_disable(); 56 local_mcck_disable();
57 /* 57 mcck = *this_cpu_ptr(&cpu_mcck);
58 * Ummm... Does this make sense at all? Copying the percpu struct 58 memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
59 * and then zapping it one statement later?
60 */
61 memcpy(&mcck, this_cpu_ptr(&cpu_mcck), sizeof(mcck));
62 memset(&mcck, 0, sizeof(struct mcck_struct));
63 clear_cpu_flag(CIF_MCCK_PENDING); 59 clear_cpu_flag(CIF_MCCK_PENDING);
64 local_mcck_enable(); 60 local_mcck_enable();
65 local_irq_restore(flags); 61 local_irq_restore(flags);
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index be1e07d4b596..45abc363dd3e 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -76,7 +76,7 @@ suffix-$(CONFIG_KERNEL_XZ) := xz
76suffix-$(CONFIG_KERNEL_LZO) := lzo 76suffix-$(CONFIG_KERNEL_LZO) := lzo
77suffix-$(CONFIG_KERNEL_LZ4) := lz4 77suffix-$(CONFIG_KERNEL_LZ4) := lz4
78 78
79RUN_SIZE = $(shell objdump -h vmlinux | \ 79RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \
80 perl $(srctree)/arch/x86/tools/calc_run_size.pl) 80 perl $(srctree)/arch/x86/tools/calc_run_size.pl)
81quiet_cmd_mkpiggy = MKPIGGY $@ 81quiet_cmd_mkpiggy = MKPIGGY $@
82 cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false ) 82 cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 2ce9051174e6..08fe6e8a726e 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -465,6 +465,7 @@ static void mc_bp_resume(void)
465 465
466 if (uci->valid && uci->mc) 466 if (uci->valid && uci->mc)
467 microcode_ops->apply_microcode(cpu); 467 microcode_ops->apply_microcode(cpu);
468#ifdef CONFIG_X86_64
468 else if (!uci->mc) 469 else if (!uci->mc)
469 /* 470 /*
470 * We might resume and not have applied late microcode but still 471 * We might resume and not have applied late microcode but still
@@ -473,6 +474,7 @@ static void mc_bp_resume(void)
473 * applying patches early on the APs. 474 * applying patches early on the APs.
474 */ 475 */
475 load_ucode_ap(); 476 load_ucode_ap();
477#endif
476} 478}
477 479
478static struct syscore_ops mc_syscore_ops = { 480static struct syscore_ops mc_syscore_ops = {
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 0984232e429f..5cbd5d9ea61d 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -216,9 +216,10 @@ static int bio_integrity_process(struct bio *bio,
216{ 216{
217 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 217 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
218 struct blk_integrity_iter iter; 218 struct blk_integrity_iter iter;
219 struct bio_vec *bv; 219 struct bvec_iter bviter;
220 struct bio_vec bv;
220 struct bio_integrity_payload *bip = bio_integrity(bio); 221 struct bio_integrity_payload *bip = bio_integrity(bio);
221 unsigned int i, ret = 0; 222 unsigned int ret = 0;
222 void *prot_buf = page_address(bip->bip_vec->bv_page) + 223 void *prot_buf = page_address(bip->bip_vec->bv_page) +
223 bip->bip_vec->bv_offset; 224 bip->bip_vec->bv_offset;
224 225
@@ -227,11 +228,11 @@ static int bio_integrity_process(struct bio *bio,
227 iter.seed = bip_get_seed(bip); 228 iter.seed = bip_get_seed(bip);
228 iter.prot_buf = prot_buf; 229 iter.prot_buf = prot_buf;
229 230
230 bio_for_each_segment_all(bv, bio, i) { 231 bio_for_each_segment(bv, bio, bviter) {
231 void *kaddr = kmap_atomic(bv->bv_page); 232 void *kaddr = kmap_atomic(bv.bv_page);
232 233
233 iter.data_buf = kaddr + bv->bv_offset; 234 iter.data_buf = kaddr + bv.bv_offset;
234 iter.data_size = bv->bv_len; 235 iter.data_size = bv.bv_len;
235 236
236 ret = proc_fn(&iter); 237 ret = proc_fn(&iter);
237 if (ret) { 238 if (ret) {
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 807a88a0f394..9d75ead2a1f9 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -1164,7 +1164,8 @@ static bool acpi_video_device_in_dod(struct acpi_video_device *device)
1164 return true; 1164 return true;
1165 1165
1166 for (i = 0; i < video->attached_count; i++) { 1166 for (i = 0; i < video->attached_count; i++) {
1167 if (video->attached_array[i].bind_info == device) 1167 if ((video->attached_array[i].value.int_val & 0xfff) ==
1168 (device->device_id & 0xfff))
1168 return true; 1169 return true;
1169 } 1170 }
1170 1171
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index e45f83789809..49f1e6890587 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -321,6 +321,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
321 { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */ 321 { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
322 { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */ 322 { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
323 { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */ 323 { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
324 { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
325 { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
326 { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
324 { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */ 327 { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
325 { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */ 328 { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
326 { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ 329 { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
@@ -492,6 +495,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
492 * enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731 495 * enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731
493 */ 496 */
494 { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi }, 497 { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
498 { PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_nomsi },
495 499
496 /* Enmotus */ 500 /* Enmotus */
497 { PCI_DEVICE(0x1c44, 0x8000), board_ahci }, 501 { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 07bc7e4dbd04..65071591b143 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1488,7 +1488,7 @@ static int sata_fsl_probe(struct platform_device *ofdev)
1488 host_priv->csr_base = csr_base; 1488 host_priv->csr_base = csr_base;
1489 1489
1490 irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); 1490 irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1491 if (irq < 0) { 1491 if (!irq) {
1492 dev_err(&ofdev->dev, "invalid irq from platform\n"); 1492 dev_err(&ofdev->dev, "invalid irq from platform\n");
1493 goto error_exit_with_cleanup; 1493 goto error_exit_with_cleanup;
1494 } 1494 }
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 24c2d7caedd5..c3413b6adb17 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -167,6 +167,8 @@ config DRM_SAVAGE
167 167
168source "drivers/gpu/drm/exynos/Kconfig" 168source "drivers/gpu/drm/exynos/Kconfig"
169 169
170source "drivers/gpu/drm/rockchip/Kconfig"
171
170source "drivers/gpu/drm/vmwgfx/Kconfig" 172source "drivers/gpu/drm/vmwgfx/Kconfig"
171 173
172source "drivers/gpu/drm/gma500/Kconfig" 174source "drivers/gpu/drm/gma500/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 47d89869c5df..66e40398b3d3 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -49,6 +49,7 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
49obj-$(CONFIG_DRM_VIA) +=via/ 49obj-$(CONFIG_DRM_VIA) +=via/
50obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/ 50obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
51obj-$(CONFIG_DRM_EXYNOS) +=exynos/ 51obj-$(CONFIG_DRM_EXYNOS) +=exynos/
52obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/
52obj-$(CONFIG_DRM_GMA500) += gma500/ 53obj-$(CONFIG_DRM_GMA500) += gma500/
53obj-$(CONFIG_DRM_UDL) += udl/ 54obj-$(CONFIG_DRM_UDL) += udl/
54obj-$(CONFIG_DRM_AST) += ast/ 55obj-$(CONFIG_DRM_AST) += ast/
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 102cd36799b1..4f7b275f2f7b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -102,15 +102,26 @@ struct device *kfd_chardev(void)
102static int kfd_open(struct inode *inode, struct file *filep) 102static int kfd_open(struct inode *inode, struct file *filep)
103{ 103{
104 struct kfd_process *process; 104 struct kfd_process *process;
105 bool is_32bit_user_mode;
105 106
106 if (iminor(inode) != 0) 107 if (iminor(inode) != 0)
107 return -ENODEV; 108 return -ENODEV;
108 109
110 is_32bit_user_mode = is_compat_task();
111
112 if (is_32bit_user_mode == true) {
113 dev_warn(kfd_device,
114 "Process %d (32-bit) failed to open /dev/kfd\n"
115 "32-bit processes are not supported by amdkfd\n",
116 current->pid);
117 return -EPERM;
118 }
119
109 process = kfd_create_process(current); 120 process = kfd_create_process(current);
110 if (IS_ERR(process)) 121 if (IS_ERR(process))
111 return PTR_ERR(process); 122 return PTR_ERR(process);
112 123
113 process->is_32bit_user_mode = is_compat_task(); 124 process->is_32bit_user_mode = is_32bit_user_mode;
114 125
115 dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n", 126 dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
116 process->pasid, process->is_32bit_user_mode); 127 process->pasid, process->is_32bit_user_mode);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 9abac48de499..935071410724 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -221,8 +221,14 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
221 queue_size_dwords; 221 queue_size_dwords;
222 222
223 if (packet_size_in_dwords >= queue_size_dwords || 223 if (packet_size_in_dwords >= queue_size_dwords ||
224 packet_size_in_dwords >= available_size) 224 packet_size_in_dwords >= available_size) {
225 /*
226 * make sure calling functions know
227 * acquire_packet_buffer() failed
228 */
229 *buffer_ptr = NULL;
225 return -ENOMEM; 230 return -ENOMEM;
231 }
226 232
227 if (wptr + packet_size_in_dwords >= queue_size_dwords) { 233 if (wptr + packet_size_in_dwords >= queue_size_dwords) {
228 while (wptr > 0) { 234 while (wptr > 0) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 2458ab7c0c6e..71699ad97d74 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -32,8 +32,7 @@ int kfd_pasid_init(void)
32{ 32{
33 pasid_limit = max_num_of_processes; 33 pasid_limit = max_num_of_processes;
34 34
35 pasid_bitmap = kzalloc(DIV_ROUND_UP(pasid_limit, BITS_PER_BYTE), 35 pasid_bitmap = kzalloc(BITS_TO_LONGS(pasid_limit), GFP_KERNEL);
36 GFP_KERNEL);
37 if (!pasid_bitmap) 36 if (!pasid_bitmap)
38 return -ENOMEM; 37 return -ENOMEM;
39 38
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index b4f49ac13334..b85eb0b830b4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -196,7 +196,7 @@ static void kfd_process_destroy_delayed(struct rcu_head *rcu)
196 mmdrop(p->mm); 196 mmdrop(p->mm);
197 197
198 work = (struct kfd_process_release_work *) 198 work = (struct kfd_process_release_work *)
199 kmalloc(sizeof(struct kfd_process_release_work), GFP_KERNEL); 199 kmalloc(sizeof(struct kfd_process_release_work), GFP_ATOMIC);
200 200
201 if (work) { 201 if (work) {
202 INIT_WORK((struct work_struct *) work, kfd_process_wq_release); 202 INIT_WORK((struct work_struct *) work, kfd_process_wq_release);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index de79283eaea7..5213da499d39 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -725,6 +725,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
725 WARN_ON(crtc->state && !crtc->funcs->atomic_destroy_state); 725 WARN_ON(crtc->state && !crtc->funcs->atomic_destroy_state);
726 if (crtc->state && crtc->funcs->atomic_destroy_state) 726 if (crtc->state && crtc->funcs->atomic_destroy_state)
727 crtc->funcs->atomic_destroy_state(crtc, crtc->state); 727 crtc->funcs->atomic_destroy_state(crtc, crtc->state);
728
729 memset(crtc, 0, sizeof(*crtc));
728} 730}
729EXPORT_SYMBOL(drm_crtc_cleanup); 731EXPORT_SYMBOL(drm_crtc_cleanup);
730 732
@@ -908,6 +910,11 @@ void drm_connector_cleanup(struct drm_connector *connector)
908 struct drm_device *dev = connector->dev; 910 struct drm_device *dev = connector->dev;
909 struct drm_display_mode *mode, *t; 911 struct drm_display_mode *mode, *t;
910 912
913 if (connector->tile_group) {
914 drm_mode_put_tile_group(dev, connector->tile_group);
915 connector->tile_group = NULL;
916 }
917
911 list_for_each_entry_safe(mode, t, &connector->probed_modes, head) 918 list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
912 drm_mode_remove(connector, mode); 919 drm_mode_remove(connector, mode);
913 920
@@ -927,6 +934,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
927 if (connector->state && connector->funcs->atomic_destroy_state) 934 if (connector->state && connector->funcs->atomic_destroy_state)
928 connector->funcs->atomic_destroy_state(connector, 935 connector->funcs->atomic_destroy_state(connector,
929 connector->state); 936 connector->state);
937
938 memset(connector, 0, sizeof(*connector));
930} 939}
931EXPORT_SYMBOL(drm_connector_cleanup); 940EXPORT_SYMBOL(drm_connector_cleanup);
932 941
@@ -1068,6 +1077,8 @@ void drm_bridge_cleanup(struct drm_bridge *bridge)
1068 list_del(&bridge->head); 1077 list_del(&bridge->head);
1069 dev->mode_config.num_bridge--; 1078 dev->mode_config.num_bridge--;
1070 drm_modeset_unlock_all(dev); 1079 drm_modeset_unlock_all(dev);
1080
1081 memset(bridge, 0, sizeof(*bridge));
1071} 1082}
1072EXPORT_SYMBOL(drm_bridge_cleanup); 1083EXPORT_SYMBOL(drm_bridge_cleanup);
1073 1084
@@ -1134,10 +1145,11 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
1134 drm_modeset_lock_all(dev); 1145 drm_modeset_lock_all(dev);
1135 drm_mode_object_put(dev, &encoder->base); 1146 drm_mode_object_put(dev, &encoder->base);
1136 kfree(encoder->name); 1147 kfree(encoder->name);
1137 encoder->name = NULL;
1138 list_del(&encoder->head); 1148 list_del(&encoder->head);
1139 dev->mode_config.num_encoder--; 1149 dev->mode_config.num_encoder--;
1140 drm_modeset_unlock_all(dev); 1150 drm_modeset_unlock_all(dev);
1151
1152 memset(encoder, 0, sizeof(*encoder));
1141} 1153}
1142EXPORT_SYMBOL(drm_encoder_cleanup); 1154EXPORT_SYMBOL(drm_encoder_cleanup);
1143 1155
@@ -1257,6 +1269,8 @@ void drm_plane_cleanup(struct drm_plane *plane)
1257 WARN_ON(plane->state && !plane->funcs->atomic_destroy_state); 1269 WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
1258 if (plane->state && plane->funcs->atomic_destroy_state) 1270 if (plane->state && plane->funcs->atomic_destroy_state)
1259 plane->funcs->atomic_destroy_state(plane, plane->state); 1271 plane->funcs->atomic_destroy_state(plane, plane->state);
1272
1273 memset(plane, 0, sizeof(*plane));
1260} 1274}
1261EXPORT_SYMBOL(drm_plane_cleanup); 1275EXPORT_SYMBOL(drm_plane_cleanup);
1262 1276
@@ -1339,6 +1353,11 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
1339 "PATH", 0); 1353 "PATH", 0);
1340 dev->mode_config.path_property = dev_path; 1354 dev->mode_config.path_property = dev_path;
1341 1355
1356 dev->mode_config.tile_property = drm_property_create(dev,
1357 DRM_MODE_PROP_BLOB |
1358 DRM_MODE_PROP_IMMUTABLE,
1359 "TILE", 0);
1360
1342 return 0; 1361 return 0;
1343} 1362}
1344 1363
@@ -3444,7 +3463,7 @@ void drm_fb_release(struct drm_file *priv)
3444 3463
3445 /* 3464 /*
3446 * When the file gets released that means no one else can access the fb 3465 * When the file gets released that means no one else can access the fb
3447 * list any more, so no need to grab fpriv->fbs_lock. And we need to to 3466 * list any more, so no need to grab fpriv->fbs_lock. And we need to
3448 * avoid upsetting lockdep since the universal cursor code adds a 3467 * avoid upsetting lockdep since the universal cursor code adds a
3449 * framebuffer while holding mutex locks. 3468 * framebuffer while holding mutex locks.
3450 * 3469 *
@@ -4083,6 +4102,52 @@ int drm_mode_connector_set_path_property(struct drm_connector *connector,
4083EXPORT_SYMBOL(drm_mode_connector_set_path_property); 4102EXPORT_SYMBOL(drm_mode_connector_set_path_property);
4084 4103
4085/** 4104/**
4105 * drm_mode_connector_set_tile_property - set tile property on connector
4106 * @connector: connector to set property on.
4107 *
4108 * This looks up the tile information for a connector, and creates a
4109 * property for userspace to parse if it exists. The property is of
4110 * the form of 8 integers using ':' as a separator.
4111 *
4112 * Returns:
4113 * Zero on success, errno on failure.
4114 */
4115int drm_mode_connector_set_tile_property(struct drm_connector *connector)
4116{
4117 struct drm_device *dev = connector->dev;
4118 int ret, size;
4119 char tile[256];
4120
4121 if (connector->tile_blob_ptr)
4122 drm_property_destroy_blob(dev, connector->tile_blob_ptr);
4123
4124 if (!connector->has_tile) {
4125 connector->tile_blob_ptr = NULL;
4126 ret = drm_object_property_set_value(&connector->base,
4127 dev->mode_config.tile_property, 0);
4128 return ret;
4129 }
4130
4131 snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d",
4132 connector->tile_group->id, connector->tile_is_single_monitor,
4133 connector->num_h_tile, connector->num_v_tile,
4134 connector->tile_h_loc, connector->tile_v_loc,
4135 connector->tile_h_size, connector->tile_v_size);
4136 size = strlen(tile) + 1;
4137
4138 connector->tile_blob_ptr = drm_property_create_blob(connector->dev,
4139 size, tile);
4140 if (!connector->tile_blob_ptr)
4141 return -EINVAL;
4142
4143 ret = drm_object_property_set_value(&connector->base,
4144 dev->mode_config.tile_property,
4145 connector->tile_blob_ptr->base.id);
4146 return ret;
4147}
4148EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
4149
4150/**
4086 * drm_mode_connector_update_edid_property - update the edid property of a connector 4151 * drm_mode_connector_update_edid_property - update the edid property of a connector
4087 * @connector: drm connector 4152 * @connector: drm connector
4088 * @edid: new value of the edid property 4153 * @edid: new value of the edid property
@@ -5152,6 +5217,7 @@ void drm_mode_config_init(struct drm_device *dev)
5152 INIT_LIST_HEAD(&dev->mode_config.property_blob_list); 5217 INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
5153 INIT_LIST_HEAD(&dev->mode_config.plane_list); 5218 INIT_LIST_HEAD(&dev->mode_config.plane_list);
5154 idr_init(&dev->mode_config.crtc_idr); 5219 idr_init(&dev->mode_config.crtc_idr);
5220 idr_init(&dev->mode_config.tile_idr);
5155 5221
5156 drm_modeset_lock_all(dev); 5222 drm_modeset_lock_all(dev);
5157 drm_mode_create_standard_connector_properties(dev); 5223 drm_mode_create_standard_connector_properties(dev);
@@ -5239,6 +5305,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
5239 crtc->funcs->destroy(crtc); 5305 crtc->funcs->destroy(crtc);
5240 } 5306 }
5241 5307
5308 idr_destroy(&dev->mode_config.tile_idr);
5242 idr_destroy(&dev->mode_config.crtc_idr); 5309 idr_destroy(&dev->mode_config.crtc_idr);
5243 drm_modeset_lock_fini(&dev->mode_config.connection_mutex); 5310 drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
5244} 5311}
@@ -5261,3 +5328,100 @@ struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
5261 supported_rotations); 5328 supported_rotations);
5262} 5329}
5263EXPORT_SYMBOL(drm_mode_create_rotation_property); 5330EXPORT_SYMBOL(drm_mode_create_rotation_property);
5331
5332/**
5333 * DOC: Tile group
5334 *
5335 * Tile groups are used to represent tiled monitors with a unique
5336 * integer identifier. Tiled monitors using DisplayID v1.3 have
5337 * a unique 8-byte handle, we store this in a tile group, so we
5338 * have a common identifier for all tiles in a monitor group.
5339 */
5340static void drm_tile_group_free(struct kref *kref)
5341{
5342 struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount);
5343 struct drm_device *dev = tg->dev;
5344 mutex_lock(&dev->mode_config.idr_mutex);
5345 idr_remove(&dev->mode_config.tile_idr, tg->id);
5346 mutex_unlock(&dev->mode_config.idr_mutex);
5347 kfree(tg);
5348}
5349
5350/**
5351 * drm_mode_put_tile_group - drop a reference to a tile group.
5352 * @dev: DRM device
5353 * @tg: tile group to drop reference to.
5354 *
5355 * drop reference to tile group and free if 0.
5356 */
5357void drm_mode_put_tile_group(struct drm_device *dev,
5358 struct drm_tile_group *tg)
5359{
5360 kref_put(&tg->refcount, drm_tile_group_free);
5361}
5362
5363/**
5364 * drm_mode_get_tile_group - get a reference to an existing tile group
5365 * @dev: DRM device
5366 * @topology: 8-bytes unique per monitor.
5367 *
5368 * Use the unique bytes to get a reference to an existing tile group.
5369 *
5370 * RETURNS:
5371 * tile group or NULL if not found.
5372 */
5373struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
5374 char topology[8])
5375{
5376 struct drm_tile_group *tg;
5377 int id;
5378 mutex_lock(&dev->mode_config.idr_mutex);
5379 idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) {
5380 if (!memcmp(tg->group_data, topology, 8)) {
5381 if (!kref_get_unless_zero(&tg->refcount))
5382 tg = NULL;
5383 mutex_unlock(&dev->mode_config.idr_mutex);
5384 return tg;
5385 }
5386 }
5387 mutex_unlock(&dev->mode_config.idr_mutex);
5388 return NULL;
5389}
5390
5391/**
5392 * drm_mode_create_tile_group - create a tile group from a displayid description
5393 * @dev: DRM device
5394 * @topology: 8-bytes unique per monitor.
5395 *
5396 * Create a tile group for the unique monitor, and get a unique
5397 * identifier for the tile group.
5398 *
5399 * RETURNS:
5400 * new tile group or error.
5401 */
5402struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
5403 char topology[8])
5404{
5405 struct drm_tile_group *tg;
5406 int ret;
5407
5408 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
5409 if (!tg)
5410 return ERR_PTR(-ENOMEM);
5411
5412 kref_init(&tg->refcount);
5413 memcpy(tg->group_data, topology, 8);
5414 tg->dev = dev;
5415
5416 mutex_lock(&dev->mode_config.idr_mutex);
5417 ret = idr_alloc(&dev->mode_config.tile_idr, tg, 1, 0, GFP_KERNEL);
5418 if (ret >= 0) {
5419 tg->id = ret;
5420 } else {
5421 kfree(tg);
5422 tg = ERR_PTR(ret);
5423 }
5424
5425 mutex_unlock(&dev->mode_config.idr_mutex);
5426 return tg;
5427}
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 959e2074b0d4..79968e39c8d0 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -186,10 +186,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
186 186
187 /* 187 /*
188 * The specification doesn't give any recommendation on how often to 188 * The specification doesn't give any recommendation on how often to
189 * retry native transactions, so retry 7 times like for I2C-over-AUX 189 * retry native transactions. We used to retry 7 times like for
190 * transactions. 190 * aux i2c transactions but real world devices this wasn't
191 * sufficient, bump to 32 which makes Dell 4k monitors happier.
191 */ 192 */
192 for (retry = 0; retry < 7; retry++) { 193 for (retry = 0; retry < 32; retry++) {
193 194
194 mutex_lock(&aux->hw_mutex); 195 mutex_lock(&aux->hw_mutex);
195 err = aux->transfer(aux, &msg); 196 err = aux->transfer(aux, &msg);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 5682d7e9f1ec..9a5b68717ec8 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -839,6 +839,8 @@ static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
839 839
840static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) 840static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
841{ 841{
842 struct drm_dp_mst_branch *mstb;
843
842 switch (old_pdt) { 844 switch (old_pdt) {
843 case DP_PEER_DEVICE_DP_LEGACY_CONV: 845 case DP_PEER_DEVICE_DP_LEGACY_CONV:
844 case DP_PEER_DEVICE_SST_SINK: 846 case DP_PEER_DEVICE_SST_SINK:
@@ -846,8 +848,9 @@ static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
846 drm_dp_mst_unregister_i2c_bus(&port->aux); 848 drm_dp_mst_unregister_i2c_bus(&port->aux);
847 break; 849 break;
848 case DP_PEER_DEVICE_MST_BRANCHING: 850 case DP_PEER_DEVICE_MST_BRANCHING:
849 drm_dp_put_mst_branch_device(port->mstb); 851 mstb = port->mstb;
850 port->mstb = NULL; 852 port->mstb = NULL;
853 drm_dp_put_mst_branch_device(mstb);
851 break; 854 break;
852 } 855 }
853} 856}
@@ -858,6 +861,8 @@ static void drm_dp_destroy_port(struct kref *kref)
858 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 861 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
859 if (!port->input) { 862 if (!port->input) {
860 port->vcpi.num_slots = 0; 863 port->vcpi.num_slots = 0;
864
865 kfree(port->cached_edid);
861 if (port->connector) 866 if (port->connector)
862 (*port->mgr->cbs->destroy_connector)(mgr, port->connector); 867 (*port->mgr->cbs->destroy_connector)(mgr, port->connector);
863 drm_dp_port_teardown_pdt(port, port->pdt); 868 drm_dp_port_teardown_pdt(port, port->pdt);
@@ -1097,6 +1102,10 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1097 char proppath[255]; 1102 char proppath[255];
1098 build_mst_prop_path(port, mstb, proppath, sizeof(proppath)); 1103 build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
1099 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); 1104 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
1105
1106 if (port->port_num >= 8) {
1107 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1108 }
1100 } 1109 }
1101 1110
1102 /* put reference to this port */ 1111 /* put reference to this port */
@@ -2167,7 +2176,8 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2167 * This returns the current connection state for a port. It validates the 2176 * This returns the current connection state for a port. It validates the
2168 * port pointer still exists so the caller doesn't require a reference 2177 * port pointer still exists so the caller doesn't require a reference
2169 */ 2178 */
2170enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 2179enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
2180 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2171{ 2181{
2172 enum drm_connector_status status = connector_status_disconnected; 2182 enum drm_connector_status status = connector_status_disconnected;
2173 2183
@@ -2186,6 +2196,10 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr
2186 2196
2187 case DP_PEER_DEVICE_SST_SINK: 2197 case DP_PEER_DEVICE_SST_SINK:
2188 status = connector_status_connected; 2198 status = connector_status_connected;
2199 /* for logical ports - cache the EDID */
2200 if (port->port_num >= 8 && !port->cached_edid) {
2201 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2202 }
2189 break; 2203 break;
2190 case DP_PEER_DEVICE_DP_LEGACY_CONV: 2204 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2191 if (port->ldps) 2205 if (port->ldps)
@@ -2217,7 +2231,12 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
2217 if (!port) 2231 if (!port)
2218 return NULL; 2232 return NULL;
2219 2233
2220 edid = drm_get_edid(connector, &port->aux.ddc); 2234 if (port->cached_edid)
2235 edid = drm_edid_duplicate(port->cached_edid);
2236 else
2237 edid = drm_get_edid(connector, &port->aux.ddc);
2238
2239 drm_mode_connector_set_tile_property(connector);
2221 drm_dp_put_port(port); 2240 drm_dp_put_port(port);
2222 return edid; 2241 return edid;
2223} 2242}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index a7b5a71856a7..53bc7a628909 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -34,6 +34,7 @@
34#include <linux/module.h> 34#include <linux/module.h>
35#include <drm/drmP.h> 35#include <drm/drmP.h>
36#include <drm/drm_edid.h> 36#include <drm/drm_edid.h>
37#include <drm/drm_displayid.h>
37 38
38#define version_greater(edid, maj, min) \ 39#define version_greater(edid, maj, min) \
39 (((edid)->version > (maj)) || \ 40 (((edid)->version > (maj)) || \
@@ -1014,6 +1015,27 @@ module_param_named(edid_fixup, edid_fixup, int, 0400);
1014MODULE_PARM_DESC(edid_fixup, 1015MODULE_PARM_DESC(edid_fixup,
1015 "Minimum number of valid EDID header bytes (0-8, default 6)"); 1016 "Minimum number of valid EDID header bytes (0-8, default 6)");
1016 1017
1018static void drm_get_displayid(struct drm_connector *connector,
1019 struct edid *edid);
1020
1021static int drm_edid_block_checksum(const u8 *raw_edid)
1022{
1023 int i;
1024 u8 csum = 0;
1025 for (i = 0; i < EDID_LENGTH; i++)
1026 csum += raw_edid[i];
1027
1028 return csum;
1029}
1030
1031static bool drm_edid_is_zero(const u8 *in_edid, int length)
1032{
1033 if (memchr_inv(in_edid, 0, length))
1034 return false;
1035
1036 return true;
1037}
1038
1017/** 1039/**
1018 * drm_edid_block_valid - Sanity check the EDID block (base or extension) 1040 * drm_edid_block_valid - Sanity check the EDID block (base or extension)
1019 * @raw_edid: pointer to raw EDID block 1041 * @raw_edid: pointer to raw EDID block
@@ -1027,8 +1049,7 @@ MODULE_PARM_DESC(edid_fixup,
1027 */ 1049 */
1028bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid) 1050bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
1029{ 1051{
1030 int i; 1052 u8 csum;
1031 u8 csum = 0;
1032 struct edid *edid = (struct edid *)raw_edid; 1053 struct edid *edid = (struct edid *)raw_edid;
1033 1054
1034 if (WARN_ON(!raw_edid)) 1055 if (WARN_ON(!raw_edid))
@@ -1048,8 +1069,7 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
1048 } 1069 }
1049 } 1070 }
1050 1071
1051 for (i = 0; i < EDID_LENGTH; i++) 1072 csum = drm_edid_block_checksum(raw_edid);
1052 csum += raw_edid[i];
1053 if (csum) { 1073 if (csum) {
1054 if (print_bad_edid) { 1074 if (print_bad_edid) {
1055 DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); 1075 DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
@@ -1080,9 +1100,13 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
1080 1100
1081bad: 1101bad:
1082 if (print_bad_edid) { 1102 if (print_bad_edid) {
1083 printk(KERN_ERR "Raw EDID:\n"); 1103 if (drm_edid_is_zero(raw_edid, EDID_LENGTH)) {
1084 print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1, 1104 printk(KERN_ERR "EDID block is all zeroes\n");
1105 } else {
1106 printk(KERN_ERR "Raw EDID:\n");
1107 print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
1085 raw_edid, EDID_LENGTH, false); 1108 raw_edid, EDID_LENGTH, false);
1109 }
1086 } 1110 }
1087 return false; 1111 return false;
1088} 1112}
@@ -1115,7 +1139,7 @@ EXPORT_SYMBOL(drm_edid_is_valid);
1115#define DDC_SEGMENT_ADDR 0x30 1139#define DDC_SEGMENT_ADDR 0x30
1116/** 1140/**
1117 * drm_do_probe_ddc_edid() - get EDID information via I2C 1141 * drm_do_probe_ddc_edid() - get EDID information via I2C
1118 * @adapter: I2C device adaptor 1142 * @data: I2C device adapter
1119 * @buf: EDID data buffer to be filled 1143 * @buf: EDID data buffer to be filled
1120 * @block: 128 byte EDID block to start fetching from 1144 * @block: 128 byte EDID block to start fetching from
1121 * @len: EDID data buffer length to fetch 1145 * @len: EDID data buffer length to fetch
@@ -1176,14 +1200,6 @@ drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len)
1176 return ret == xfers ? 0 : -1; 1200 return ret == xfers ? 0 : -1;
1177} 1201}
1178 1202
1179static bool drm_edid_is_zero(u8 *in_edid, int length)
1180{
1181 if (memchr_inv(in_edid, 0, length))
1182 return false;
1183
1184 return true;
1185}
1186
1187/** 1203/**
1188 * drm_do_get_edid - get EDID data using a custom EDID block read function 1204 * drm_do_get_edid - get EDID data using a custom EDID block read function
1189 * @connector: connector we're probing 1205 * @connector: connector we're probing
@@ -1308,10 +1324,15 @@ EXPORT_SYMBOL(drm_probe_ddc);
1308struct edid *drm_get_edid(struct drm_connector *connector, 1324struct edid *drm_get_edid(struct drm_connector *connector,
1309 struct i2c_adapter *adapter) 1325 struct i2c_adapter *adapter)
1310{ 1326{
1327 struct edid *edid;
1328
1311 if (!drm_probe_ddc(adapter)) 1329 if (!drm_probe_ddc(adapter))
1312 return NULL; 1330 return NULL;
1313 1331
1314 return drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter); 1332 edid = drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
1333 if (edid)
1334 drm_get_displayid(connector, edid);
1335 return edid;
1315} 1336}
1316EXPORT_SYMBOL(drm_get_edid); 1337EXPORT_SYMBOL(drm_get_edid);
1317 1338
@@ -2406,7 +2427,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
2406/* 2427/*
2407 * Search EDID for CEA extension block. 2428 * Search EDID for CEA extension block.
2408 */ 2429 */
2409static u8 *drm_find_cea_extension(struct edid *edid) 2430static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
2410{ 2431{
2411 u8 *edid_ext = NULL; 2432 u8 *edid_ext = NULL;
2412 int i; 2433 int i;
@@ -2418,7 +2439,7 @@ static u8 *drm_find_cea_extension(struct edid *edid)
2418 /* Find CEA extension */ 2439 /* Find CEA extension */
2419 for (i = 0; i < edid->extensions; i++) { 2440 for (i = 0; i < edid->extensions; i++) {
2420 edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1); 2441 edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
2421 if (edid_ext[0] == CEA_EXT) 2442 if (edid_ext[0] == ext_id)
2422 break; 2443 break;
2423 } 2444 }
2424 2445
@@ -2428,6 +2449,16 @@ static u8 *drm_find_cea_extension(struct edid *edid)
2428 return edid_ext; 2449 return edid_ext;
2429} 2450}
2430 2451
2452static u8 *drm_find_cea_extension(struct edid *edid)
2453{
2454 return drm_find_edid_extension(edid, CEA_EXT);
2455}
2456
2457static u8 *drm_find_displayid_extension(struct edid *edid)
2458{
2459 return drm_find_edid_extension(edid, DISPLAYID_EXT);
2460}
2461
2431/* 2462/*
2432 * Calculate the alternate clock for the CEA mode 2463 * Calculate the alternate clock for the CEA mode
2433 * (60Hz vs. 59.94Hz etc.) 2464 * (60Hz vs. 59.94Hz etc.)
@@ -3888,3 +3919,123 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
3888 return 0; 3919 return 0;
3889} 3920}
3890EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode); 3921EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
3922
3923static int drm_parse_display_id(struct drm_connector *connector,
3924 u8 *displayid, int length,
3925 bool is_edid_extension)
3926{
3927 /* if this is an EDID extension the first byte will be 0x70 */
3928 int idx = 0;
3929 struct displayid_hdr *base;
3930 struct displayid_block *block;
3931 u8 csum = 0;
3932 int i;
3933
3934 if (is_edid_extension)
3935 idx = 1;
3936
3937 base = (struct displayid_hdr *)&displayid[idx];
3938
3939 DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
3940 base->rev, base->bytes, base->prod_id, base->ext_count);
3941
3942 if (base->bytes + 5 > length - idx)
3943 return -EINVAL;
3944
3945 for (i = idx; i <= base->bytes + 5; i++) {
3946 csum += displayid[i];
3947 }
3948 if (csum) {
3949 DRM_ERROR("DisplayID checksum invalid, remainder is %d\n", csum);
3950 return -EINVAL;
3951 }
3952
3953 block = (struct displayid_block *)&displayid[idx + 4];
3954 DRM_DEBUG_KMS("block id %d, rev %d, len %d\n",
3955 block->tag, block->rev, block->num_bytes);
3956
3957 switch (block->tag) {
3958 case DATA_BLOCK_TILED_DISPLAY: {
3959 struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
3960
3961 u16 w, h;
3962 u8 tile_v_loc, tile_h_loc;
3963 u8 num_v_tile, num_h_tile;
3964 struct drm_tile_group *tg;
3965
3966 w = tile->tile_size[0] | tile->tile_size[1] << 8;
3967 h = tile->tile_size[2] | tile->tile_size[3] << 8;
3968
3969 num_v_tile = (tile->topo[0] & 0xf) | (tile->topo[2] & 0x30);
3970 num_h_tile = (tile->topo[0] >> 4) | ((tile->topo[2] >> 2) & 0x30);
3971 tile_v_loc = (tile->topo[1] & 0xf) | ((tile->topo[2] & 0x3) << 4);
3972 tile_h_loc = (tile->topo[1] >> 4) | (((tile->topo[2] >> 2) & 0x3) << 4);
3973
3974 connector->has_tile = true;
3975 if (tile->tile_cap & 0x80)
3976 connector->tile_is_single_monitor = true;
3977
3978 connector->num_h_tile = num_h_tile + 1;
3979 connector->num_v_tile = num_v_tile + 1;
3980 connector->tile_h_loc = tile_h_loc;
3981 connector->tile_v_loc = tile_v_loc;
3982 connector->tile_h_size = w + 1;
3983 connector->tile_v_size = h + 1;
3984
3985 DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap);
3986 DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1);
3987 DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n",
3988 num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc);
3989 DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]);
3990
3991 tg = drm_mode_get_tile_group(connector->dev, tile->topology_id);
3992 if (!tg) {
3993 tg = drm_mode_create_tile_group(connector->dev, tile->topology_id);
3994 }
3995 if (!tg)
3996 return -ENOMEM;
3997
3998 if (connector->tile_group != tg) {
3999 /* if we haven't got a pointer,
4000 take the reference, drop ref to old tile group */
4001 if (connector->tile_group) {
4002 drm_mode_put_tile_group(connector->dev, connector->tile_group);
4003 }
4004 connector->tile_group = tg;
4005 } else
4006 /* if same tile group, then release the ref we just took. */
4007 drm_mode_put_tile_group(connector->dev, tg);
4008 }
4009 break;
4010 default:
4011 printk("unknown displayid tag %d\n", block->tag);
4012 break;
4013 }
4014 return 0;
4015}
4016
4017static void drm_get_displayid(struct drm_connector *connector,
4018 struct edid *edid)
4019{
4020 void *displayid = NULL;
4021 int ret;
4022 connector->has_tile = false;
4023 displayid = drm_find_displayid_extension(edid);
4024 if (!displayid) {
4025 /* drop reference to any tile group we had */
4026 goto out_drop_ref;
4027 }
4028
4029 ret = drm_parse_display_id(connector, displayid, EDID_LENGTH, true);
4030 if (ret < 0)
4031 goto out_drop_ref;
4032 if (!connector->has_tile)
4033 goto out_drop_ref;
4034 return;
4035out_drop_ref:
4036 if (connector->tile_group) {
4037 drm_mode_put_tile_group(connector->dev, connector->tile_group);
4038 connector->tile_group = NULL;
4039 }
4040 return;
4041}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 09d47e9ba026..52ce26d6b4fb 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -347,9 +347,18 @@ bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
347{ 347{
348 struct drm_device *dev = fb_helper->dev; 348 struct drm_device *dev = fb_helper->dev;
349 bool ret; 349 bool ret;
350 bool do_delayed = false;
351
350 drm_modeset_lock_all(dev); 352 drm_modeset_lock_all(dev);
351 ret = restore_fbdev_mode(fb_helper); 353 ret = restore_fbdev_mode(fb_helper);
354
355 do_delayed = fb_helper->delayed_hotplug;
356 if (do_delayed)
357 fb_helper->delayed_hotplug = false;
352 drm_modeset_unlock_all(dev); 358 drm_modeset_unlock_all(dev);
359
360 if (do_delayed)
361 drm_fb_helper_hotplug_event(fb_helper);
353 return ret; 362 return ret;
354} 363}
355EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked); 364EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
@@ -888,10 +897,6 @@ int drm_fb_helper_set_par(struct fb_info *info)
888 897
889 drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); 898 drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
890 899
891 if (fb_helper->delayed_hotplug) {
892 fb_helper->delayed_hotplug = false;
893 drm_fb_helper_hotplug_event(fb_helper);
894 }
895 return 0; 900 return 0;
896} 901}
897EXPORT_SYMBOL(drm_fb_helper_set_par); 902EXPORT_SYMBOL(drm_fb_helper_set_par);
@@ -995,19 +1000,21 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
995 crtc_count = 0; 1000 crtc_count = 0;
996 for (i = 0; i < fb_helper->crtc_count; i++) { 1001 for (i = 0; i < fb_helper->crtc_count; i++) {
997 struct drm_display_mode *desired_mode; 1002 struct drm_display_mode *desired_mode;
1003 int x, y;
998 desired_mode = fb_helper->crtc_info[i].desired_mode; 1004 desired_mode = fb_helper->crtc_info[i].desired_mode;
999 1005 x = fb_helper->crtc_info[i].x;
1006 y = fb_helper->crtc_info[i].y;
1000 if (desired_mode) { 1007 if (desired_mode) {
1001 if (gamma_size == 0) 1008 if (gamma_size == 0)
1002 gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size; 1009 gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
1003 if (desired_mode->hdisplay < sizes.fb_width) 1010 if (desired_mode->hdisplay + x < sizes.fb_width)
1004 sizes.fb_width = desired_mode->hdisplay; 1011 sizes.fb_width = desired_mode->hdisplay + x;
1005 if (desired_mode->vdisplay < sizes.fb_height) 1012 if (desired_mode->vdisplay + y < sizes.fb_height)
1006 sizes.fb_height = desired_mode->vdisplay; 1013 sizes.fb_height = desired_mode->vdisplay + y;
1007 if (desired_mode->hdisplay > sizes.surface_width) 1014 if (desired_mode->hdisplay + x > sizes.surface_width)
1008 sizes.surface_width = desired_mode->hdisplay; 1015 sizes.surface_width = desired_mode->hdisplay + x;
1009 if (desired_mode->vdisplay > sizes.surface_height) 1016 if (desired_mode->vdisplay + y > sizes.surface_height)
1010 sizes.surface_height = desired_mode->vdisplay; 1017 sizes.surface_height = desired_mode->vdisplay + y;
1011 crtc_count++; 1018 crtc_count++;
1012 } 1019 }
1013 } 1020 }
@@ -1307,6 +1314,7 @@ static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
1307 1314
1308static bool drm_target_cloned(struct drm_fb_helper *fb_helper, 1315static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
1309 struct drm_display_mode **modes, 1316 struct drm_display_mode **modes,
1317 struct drm_fb_offset *offsets,
1310 bool *enabled, int width, int height) 1318 bool *enabled, int width, int height)
1311{ 1319{
1312 int count, i, j; 1320 int count, i, j;
@@ -1378,27 +1386,88 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
1378 return false; 1386 return false;
1379} 1387}
1380 1388
1389static int drm_get_tile_offsets(struct drm_fb_helper *fb_helper,
1390 struct drm_display_mode **modes,
1391 struct drm_fb_offset *offsets,
1392 int idx,
1393 int h_idx, int v_idx)
1394{
1395 struct drm_fb_helper_connector *fb_helper_conn;
1396 int i;
1397 int hoffset = 0, voffset = 0;
1398
1399 for (i = 0; i < fb_helper->connector_count; i++) {
1400 fb_helper_conn = fb_helper->connector_info[i];
1401 if (!fb_helper_conn->connector->has_tile)
1402 continue;
1403
1404 if (!modes[i] && (h_idx || v_idx)) {
1405 DRM_DEBUG_KMS("no modes for connector tiled %d %d\n", i,
1406 fb_helper_conn->connector->base.id);
1407 continue;
1408 }
1409 if (fb_helper_conn->connector->tile_h_loc < h_idx)
1410 hoffset += modes[i]->hdisplay;
1411
1412 if (fb_helper_conn->connector->tile_v_loc < v_idx)
1413 voffset += modes[i]->vdisplay;
1414 }
1415 offsets[idx].x = hoffset;
1416 offsets[idx].y = voffset;
1417 DRM_DEBUG_KMS("returned %d %d for %d %d\n", hoffset, voffset, h_idx, v_idx);
1418 return 0;
1419}
1420
1381static bool drm_target_preferred(struct drm_fb_helper *fb_helper, 1421static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
1382 struct drm_display_mode **modes, 1422 struct drm_display_mode **modes,
1423 struct drm_fb_offset *offsets,
1383 bool *enabled, int width, int height) 1424 bool *enabled, int width, int height)
1384{ 1425{
1385 struct drm_fb_helper_connector *fb_helper_conn; 1426 struct drm_fb_helper_connector *fb_helper_conn;
1386 int i; 1427 int i;
1387 1428 uint64_t conn_configured = 0, mask;
1429 int tile_pass = 0;
1430 mask = (1 << fb_helper->connector_count) - 1;
1431retry:
1388 for (i = 0; i < fb_helper->connector_count; i++) { 1432 for (i = 0; i < fb_helper->connector_count; i++) {
1389 fb_helper_conn = fb_helper->connector_info[i]; 1433 fb_helper_conn = fb_helper->connector_info[i];
1390 1434
1391 if (enabled[i] == false) 1435 if (conn_configured & (1 << i))
1392 continue; 1436 continue;
1393 1437
1438 if (enabled[i] == false) {
1439 conn_configured |= (1 << i);
1440 continue;
1441 }
1442
1443 /* first pass over all the untiled connectors */
1444 if (tile_pass == 0 && fb_helper_conn->connector->has_tile)
1445 continue;
1446
1447 if (tile_pass == 1) {
1448 if (fb_helper_conn->connector->tile_h_loc != 0 ||
1449 fb_helper_conn->connector->tile_v_loc != 0)
1450 continue;
1451
1452 } else {
1453 if (fb_helper_conn->connector->tile_h_loc != tile_pass -1 &&
1454 fb_helper_conn->connector->tile_v_loc != tile_pass - 1)
1455 /* if this tile_pass doesn't cover any of the tiles - keep going */
1456 continue;
1457
1458 /* find the tile offsets for this pass - need
1459 to find all tiles left and above */
1460 drm_get_tile_offsets(fb_helper, modes, offsets,
1461 i, fb_helper_conn->connector->tile_h_loc, fb_helper_conn->connector->tile_v_loc);
1462 }
1394 DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n", 1463 DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
1395 fb_helper_conn->connector->base.id); 1464 fb_helper_conn->connector->base.id);
1396 1465
1397 /* got for command line mode first */ 1466 /* got for command line mode first */
1398 modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height); 1467 modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
1399 if (!modes[i]) { 1468 if (!modes[i]) {
1400 DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", 1469 DRM_DEBUG_KMS("looking for preferred mode on connector %d %d\n",
1401 fb_helper_conn->connector->base.id); 1470 fb_helper_conn->connector->base.id, fb_helper_conn->connector->tile_group ? fb_helper_conn->connector->tile_group->id : 0);
1402 modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height); 1471 modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
1403 } 1472 }
1404 /* No preferred modes, pick one off the list */ 1473 /* No preferred modes, pick one off the list */
@@ -1408,6 +1477,12 @@ static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
1408 } 1477 }
1409 DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name : 1478 DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
1410 "none"); 1479 "none");
1480 conn_configured |= (1 << i);
1481 }
1482
1483 if ((conn_configured & mask) != mask) {
1484 tile_pass++;
1485 goto retry;
1411 } 1486 }
1412 return true; 1487 return true;
1413} 1488}
@@ -1497,6 +1572,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1497 struct drm_device *dev = fb_helper->dev; 1572 struct drm_device *dev = fb_helper->dev;
1498 struct drm_fb_helper_crtc **crtcs; 1573 struct drm_fb_helper_crtc **crtcs;
1499 struct drm_display_mode **modes; 1574 struct drm_display_mode **modes;
1575 struct drm_fb_offset *offsets;
1500 struct drm_mode_set *modeset; 1576 struct drm_mode_set *modeset;
1501 bool *enabled; 1577 bool *enabled;
1502 int width, height; 1578 int width, height;
@@ -1511,9 +1587,11 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1511 sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); 1587 sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
1512 modes = kcalloc(dev->mode_config.num_connector, 1588 modes = kcalloc(dev->mode_config.num_connector,
1513 sizeof(struct drm_display_mode *), GFP_KERNEL); 1589 sizeof(struct drm_display_mode *), GFP_KERNEL);
1590 offsets = kcalloc(dev->mode_config.num_connector,
1591 sizeof(struct drm_fb_offset), GFP_KERNEL);
1514 enabled = kcalloc(dev->mode_config.num_connector, 1592 enabled = kcalloc(dev->mode_config.num_connector,
1515 sizeof(bool), GFP_KERNEL); 1593 sizeof(bool), GFP_KERNEL);
1516 if (!crtcs || !modes || !enabled) { 1594 if (!crtcs || !modes || !enabled || !offsets) {
1517 DRM_ERROR("Memory allocation failed\n"); 1595 DRM_ERROR("Memory allocation failed\n");
1518 goto out; 1596 goto out;
1519 } 1597 }
@@ -1523,14 +1601,16 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1523 1601
1524 if (!(fb_helper->funcs->initial_config && 1602 if (!(fb_helper->funcs->initial_config &&
1525 fb_helper->funcs->initial_config(fb_helper, crtcs, modes, 1603 fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
1604 offsets,
1526 enabled, width, height))) { 1605 enabled, width, height))) {
1527 memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0])); 1606 memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0]));
1528 memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0])); 1607 memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0]));
1608 memset(offsets, 0, dev->mode_config.num_connector*sizeof(offsets[0]));
1529 1609
1530 if (!drm_target_cloned(fb_helper, 1610 if (!drm_target_cloned(fb_helper, modes, offsets,
1531 modes, enabled, width, height) && 1611 enabled, width, height) &&
1532 !drm_target_preferred(fb_helper, 1612 !drm_target_preferred(fb_helper, modes, offsets,
1533 modes, enabled, width, height)) 1613 enabled, width, height))
1534 DRM_ERROR("Unable to find initial modes\n"); 1614 DRM_ERROR("Unable to find initial modes\n");
1535 1615
1536 DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", 1616 DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n",
@@ -1550,18 +1630,23 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1550 for (i = 0; i < fb_helper->connector_count; i++) { 1630 for (i = 0; i < fb_helper->connector_count; i++) {
1551 struct drm_display_mode *mode = modes[i]; 1631 struct drm_display_mode *mode = modes[i];
1552 struct drm_fb_helper_crtc *fb_crtc = crtcs[i]; 1632 struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
1633 struct drm_fb_offset *offset = &offsets[i];
1553 modeset = &fb_crtc->mode_set; 1634 modeset = &fb_crtc->mode_set;
1554 1635
1555 if (mode && fb_crtc) { 1636 if (mode && fb_crtc) {
1556 DRM_DEBUG_KMS("desired mode %s set on crtc %d\n", 1637 DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
1557 mode->name, fb_crtc->mode_set.crtc->base.id); 1638 mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y);
1558 fb_crtc->desired_mode = mode; 1639 fb_crtc->desired_mode = mode;
1640 fb_crtc->x = offset->x;
1641 fb_crtc->y = offset->y;
1559 if (modeset->mode) 1642 if (modeset->mode)
1560 drm_mode_destroy(dev, modeset->mode); 1643 drm_mode_destroy(dev, modeset->mode);
1561 modeset->mode = drm_mode_duplicate(dev, 1644 modeset->mode = drm_mode_duplicate(dev,
1562 fb_crtc->desired_mode); 1645 fb_crtc->desired_mode);
1563 modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector; 1646 modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
1564 modeset->fb = fb_helper->fb; 1647 modeset->fb = fb_helper->fb;
1648 modeset->x = offset->x;
1649 modeset->y = offset->y;
1565 } 1650 }
1566 } 1651 }
1567 1652
@@ -1578,6 +1663,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1578out: 1663out:
1579 kfree(crtcs); 1664 kfree(crtcs);
1580 kfree(modes); 1665 kfree(modes);
1666 kfree(offsets);
1581 kfree(enabled); 1667 kfree(enabled);
1582} 1668}
1583 1669
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 91e1105f2800..0b9514b6cd64 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -527,6 +527,7 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
527 if (copy_to_user(buffer + total, 527 if (copy_to_user(buffer + total,
528 e->event, e->event->length)) { 528 e->event, e->event->length)) {
529 total = -EFAULT; 529 total = -EFAULT;
530 e->destroy(e);
530 break; 531 break;
531 } 532 }
532 533
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0e47df4ef24e..f5a5f18efa5b 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -166,7 +166,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
166 spin_lock_irqsave(&dev->vblank_time_lock, irqflags); 166 spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
167 167
168 /* 168 /*
169 * If the vblank interrupt was already disbled update the count 169 * If the vblank interrupt was already disabled update the count
170 * and timestamp to maintain the appearance that the counter 170 * and timestamp to maintain the appearance that the counter
171 * has been ticking all along until this time. This makes the 171 * has been ticking all along until this time. This makes the
172 * count account for the entire time between drm_vblank_on() and 172 * count account for the entire time between drm_vblank_on() and
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 03d0b0cb8e05..fb3e3d429191 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4565,7 +4565,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
4565 ironlake_fdi_disable(crtc); 4565 ironlake_fdi_disable(crtc);
4566 4566
4567 ironlake_disable_pch_transcoder(dev_priv, pipe); 4567 ironlake_disable_pch_transcoder(dev_priv, pipe);
4568 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4569 4568
4570 if (HAS_PCH_CPT(dev)) { 4569 if (HAS_PCH_CPT(dev)) {
4571 /* disable TRANS_DP_CTL */ 4570 /* disable TRANS_DP_CTL */
@@ -4636,8 +4635,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
4636 4635
4637 if (intel_crtc->config.has_pch_encoder) { 4636 if (intel_crtc->config.has_pch_encoder) {
4638 lpt_disable_pch_transcoder(dev_priv); 4637 lpt_disable_pch_transcoder(dev_priv);
4639 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4640 true);
4641 intel_ddi_fdi_disable(crtc); 4638 intel_ddi_fdi_disable(crtc);
4642 } 4639 }
4643 4640
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index bfe359506377..7f8c6a66680a 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -283,7 +283,7 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
283 struct intel_connector *intel_connector = to_intel_connector(connector); 283 struct intel_connector *intel_connector = to_intel_connector(connector);
284 struct intel_dp *intel_dp = intel_connector->mst_port; 284 struct intel_dp *intel_dp = intel_connector->mst_port;
285 285
286 return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port); 286 return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port);
287} 287}
288 288
289static int 289static int
@@ -414,6 +414,8 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
414 intel_dp_add_properties(intel_dp, connector); 414 intel_dp_add_properties(intel_dp, connector);
415 415
416 drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); 416 drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
417 drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
418
417 drm_mode_connector_set_path_property(connector, pathprop); 419 drm_mode_connector_set_path_property(connector, pathprop);
418 drm_reinit_primary_mode_group(dev); 420 drm_reinit_primary_mode_group(dev);
419 mutex_lock(&dev->mode_config.mutex); 421 mutex_lock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index f2183b554cbc..850cf7d6578c 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -324,6 +324,7 @@ intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
324static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, 324static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
325 struct drm_fb_helper_crtc **crtcs, 325 struct drm_fb_helper_crtc **crtcs,
326 struct drm_display_mode **modes, 326 struct drm_display_mode **modes,
327 struct drm_fb_offset *offsets,
327 bool *enabled, int width, int height) 328 bool *enabled, int width, int height)
328{ 329{
329 struct drm_device *dev = fb_helper->dev; 330 struct drm_device *dev = fb_helper->dev;
@@ -332,6 +333,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
332 bool fallback = true; 333 bool fallback = true;
333 int num_connectors_enabled = 0; 334 int num_connectors_enabled = 0;
334 int num_connectors_detected = 0; 335 int num_connectors_detected = 0;
336 uint64_t conn_configured = 0, mask;
337 int pass = 0;
335 338
336 save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool), 339 save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
337 GFP_KERNEL); 340 GFP_KERNEL);
@@ -339,7 +342,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
339 return false; 342 return false;
340 343
341 memcpy(save_enabled, enabled, dev->mode_config.num_connector); 344 memcpy(save_enabled, enabled, dev->mode_config.num_connector);
342 345 mask = (1 << fb_helper->connector_count) - 1;
346retry:
343 for (i = 0; i < fb_helper->connector_count; i++) { 347 for (i = 0; i < fb_helper->connector_count; i++) {
344 struct drm_fb_helper_connector *fb_conn; 348 struct drm_fb_helper_connector *fb_conn;
345 struct drm_connector *connector; 349 struct drm_connector *connector;
@@ -349,12 +353,19 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
349 fb_conn = fb_helper->connector_info[i]; 353 fb_conn = fb_helper->connector_info[i];
350 connector = fb_conn->connector; 354 connector = fb_conn->connector;
351 355
356 if (conn_configured & (1 << i))
357 continue;
358
359 if (pass == 0 && !connector->has_tile)
360 continue;
361
352 if (connector->status == connector_status_connected) 362 if (connector->status == connector_status_connected)
353 num_connectors_detected++; 363 num_connectors_detected++;
354 364
355 if (!enabled[i]) { 365 if (!enabled[i]) {
356 DRM_DEBUG_KMS("connector %s not enabled, skipping\n", 366 DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
357 connector->name); 367 connector->name);
368 conn_configured |= (1 << i);
358 continue; 369 continue;
359 } 370 }
360 371
@@ -373,6 +384,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
373 DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n", 384 DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
374 connector->name); 385 connector->name);
375 enabled[i] = false; 386 enabled[i] = false;
387 conn_configured |= (1 << i);
376 continue; 388 continue;
377 } 389 }
378 390
@@ -400,8 +412,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
400 412
401 /* try for preferred next */ 413 /* try for preferred next */
402 if (!modes[i]) { 414 if (!modes[i]) {
403 DRM_DEBUG_KMS("looking for preferred mode on connector %s\n", 415 DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
404 connector->name); 416 connector->name, connector->has_tile);
405 modes[i] = drm_has_preferred_mode(fb_conn, width, 417 modes[i] = drm_has_preferred_mode(fb_conn, width,
406 height); 418 height);
407 } 419 }
@@ -444,6 +456,12 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
444 modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :""); 456 modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
445 457
446 fallback = false; 458 fallback = false;
459 conn_configured |= (1 << i);
460 }
461
462 if ((conn_configured & mask) != mask) {
463 pass++;
464 goto retry;
447 } 465 }
448 466
449 /* 467 /*
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c03d457a5150..14654d628ca4 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -899,6 +899,17 @@ void intel_lvds_init(struct drm_device *dev)
899 int pipe; 899 int pipe;
900 u8 pin; 900 u8 pin;
901 901
902 /*
903 * Unlock registers and just leave them unlocked. Do this before
904 * checking quirk lists to avoid bogus WARNINGs.
905 */
906 if (HAS_PCH_SPLIT(dev)) {
907 I915_WRITE(PCH_PP_CONTROL,
908 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
909 } else {
910 I915_WRITE(PP_CONTROL,
911 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
912 }
902 if (!intel_lvds_supported(dev)) 913 if (!intel_lvds_supported(dev))
903 return; 914 return;
904 915
@@ -1097,17 +1108,6 @@ out:
1097 lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) & 1108 lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
1098 LVDS_A3_POWER_MASK; 1109 LVDS_A3_POWER_MASK;
1099 1110
1100 /*
1101 * Unlock registers and just
1102 * leave them unlocked
1103 */
1104 if (HAS_PCH_SPLIT(dev)) {
1105 I915_WRITE(PCH_PP_CONTROL,
1106 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
1107 } else {
1108 I915_WRITE(PP_CONTROL,
1109 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
1110 }
1111 lvds_connector->lid_notifier.notifier_call = intel_lid_notify; 1111 lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
1112 if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { 1112 if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
1113 DRM_DEBUG_KMS("lid notifier registration failed\n"); 1113 DRM_DEBUG_KMS("lid notifier registration failed\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index cd05677ad4b7..72a40f95d048 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -218,7 +218,6 @@ nvc0_identify(struct nouveau_device *device)
218 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 218 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
219 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 219 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
220 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 220 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
221 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
222 device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass; 221 device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
223 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass; 222 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
224 break; 223 break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index 5ae6a43893b5..1931057f9962 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -551,8 +551,8 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
551 } 551 }
552 552
553 if (status & 0x40000000) { 553 if (status & 0x40000000) {
554 nouveau_fifo_uevent(&priv->base);
555 nv_wr32(priv, 0x002100, 0x40000000); 554 nv_wr32(priv, 0x002100, 0x40000000);
555 nouveau_fifo_uevent(&priv->base);
556 status &= ~0x40000000; 556 status &= ~0x40000000;
557 } 557 }
558 } 558 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 1fe1f8fbda0c..074d434c3077 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -740,6 +740,8 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
740 u32 inte = nv_rd32(priv, 0x002628); 740 u32 inte = nv_rd32(priv, 0x002628);
741 u32 unkn; 741 u32 unkn;
742 742
743 nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
744
743 for (unkn = 0; unkn < 8; unkn++) { 745 for (unkn = 0; unkn < 8; unkn++) {
744 u32 ints = (intr >> (unkn * 0x04)) & inte; 746 u32 ints = (intr >> (unkn * 0x04)) & inte;
745 if (ints & 0x1) { 747 if (ints & 0x1) {
@@ -751,8 +753,6 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
751 nv_mask(priv, 0x002628, ints, 0); 753 nv_mask(priv, 0x002628, ints, 0);
752 } 754 }
753 } 755 }
754
755 nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
756} 756}
757 757
758static void 758static void
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index fc9ef663f25a..6a8db7c80bd1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -982,8 +982,8 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
982 } 982 }
983 983
984 if (stat & 0x80000000) { 984 if (stat & 0x80000000) {
985 nve0_fifo_intr_engine(priv);
986 nv_wr32(priv, 0x002100, 0x80000000); 985 nv_wr32(priv, 0x002100, 0x80000000);
986 nve0_fifo_intr_engine(priv);
987 stat &= ~0x80000000; 987 stat &= ~0x80000000;
988 } 988 }
989 989
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index afb93bb72f97..65910e3aed0c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -664,7 +664,6 @@ nouveau_pmops_suspend(struct device *dev)
664 664
665 pci_save_state(pdev); 665 pci_save_state(pdev);
666 pci_disable_device(pdev); 666 pci_disable_device(pdev);
667 pci_ignore_hotplug(pdev);
668 pci_set_power_state(pdev, PCI_D3hot); 667 pci_set_power_state(pdev, PCI_D3hot);
669 return 0; 668 return 0;
670} 669}
@@ -732,6 +731,7 @@ nouveau_pmops_runtime_suspend(struct device *dev)
732 ret = nouveau_do_suspend(drm_dev, true); 731 ret = nouveau_do_suspend(drm_dev, true);
733 pci_save_state(pdev); 732 pci_save_state(pdev);
734 pci_disable_device(pdev); 733 pci_disable_device(pdev);
734 pci_ignore_hotplug(pdev);
735 pci_set_power_state(pdev, PCI_D3cold); 735 pci_set_power_state(pdev, PCI_D3cold);
736 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; 736 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
737 return ret; 737 return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 515cd9aebb99..f32a434724e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -52,20 +52,24 @@ nouveau_fctx(struct nouveau_fence *fence)
52 return container_of(fence->base.lock, struct nouveau_fence_chan, lock); 52 return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
53} 53}
54 54
55static void 55static int
56nouveau_fence_signal(struct nouveau_fence *fence) 56nouveau_fence_signal(struct nouveau_fence *fence)
57{ 57{
58 int drop = 0;
59
58 fence_signal_locked(&fence->base); 60 fence_signal_locked(&fence->base);
59 list_del(&fence->head); 61 list_del(&fence->head);
62 rcu_assign_pointer(fence->channel, NULL);
60 63
61 if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) { 64 if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
62 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 65 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
63 66
64 if (!--fctx->notify_ref) 67 if (!--fctx->notify_ref)
65 nvif_notify_put(&fctx->notify); 68 drop = 1;
66 } 69 }
67 70
68 fence_put(&fence->base); 71 fence_put(&fence->base);
72 return drop;
69} 73}
70 74
71static struct nouveau_fence * 75static struct nouveau_fence *
@@ -88,16 +92,23 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
88{ 92{
89 struct nouveau_fence *fence; 93 struct nouveau_fence *fence;
90 94
91 nvif_notify_fini(&fctx->notify);
92
93 spin_lock_irq(&fctx->lock); 95 spin_lock_irq(&fctx->lock);
94 while (!list_empty(&fctx->pending)) { 96 while (!list_empty(&fctx->pending)) {
95 fence = list_entry(fctx->pending.next, typeof(*fence), head); 97 fence = list_entry(fctx->pending.next, typeof(*fence), head);
96 98
97 nouveau_fence_signal(fence); 99 if (nouveau_fence_signal(fence))
98 fence->channel = NULL; 100 nvif_notify_put(&fctx->notify);
99 } 101 }
100 spin_unlock_irq(&fctx->lock); 102 spin_unlock_irq(&fctx->lock);
103
104 nvif_notify_fini(&fctx->notify);
105 fctx->dead = 1;
106
107 /*
108 * Ensure that all accesses to fence->channel complete before freeing
109 * the channel.
110 */
111 synchronize_rcu();
101} 112}
102 113
103static void 114static void
@@ -112,21 +123,23 @@ nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
112 kref_put(&fctx->fence_ref, nouveau_fence_context_put); 123 kref_put(&fctx->fence_ref, nouveau_fence_context_put);
113} 124}
114 125
115static void 126static int
116nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx) 127nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
117{ 128{
118 struct nouveau_fence *fence; 129 struct nouveau_fence *fence;
119 130 int drop = 0;
120 u32 seq = fctx->read(chan); 131 u32 seq = fctx->read(chan);
121 132
122 while (!list_empty(&fctx->pending)) { 133 while (!list_empty(&fctx->pending)) {
123 fence = list_entry(fctx->pending.next, typeof(*fence), head); 134 fence = list_entry(fctx->pending.next, typeof(*fence), head);
124 135
125 if ((int)(seq - fence->base.seqno) < 0) 136 if ((int)(seq - fence->base.seqno) < 0)
126 return; 137 break;
127 138
128 nouveau_fence_signal(fence); 139 drop |= nouveau_fence_signal(fence);
129 } 140 }
141
142 return drop;
130} 143}
131 144
132static int 145static int
@@ -135,18 +148,21 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
135 struct nouveau_fence_chan *fctx = 148 struct nouveau_fence_chan *fctx =
136 container_of(notify, typeof(*fctx), notify); 149 container_of(notify, typeof(*fctx), notify);
137 unsigned long flags; 150 unsigned long flags;
151 int ret = NVIF_NOTIFY_KEEP;
138 152
139 spin_lock_irqsave(&fctx->lock, flags); 153 spin_lock_irqsave(&fctx->lock, flags);
140 if (!list_empty(&fctx->pending)) { 154 if (!list_empty(&fctx->pending)) {
141 struct nouveau_fence *fence; 155 struct nouveau_fence *fence;
156 struct nouveau_channel *chan;
142 157
143 fence = list_entry(fctx->pending.next, typeof(*fence), head); 158 fence = list_entry(fctx->pending.next, typeof(*fence), head);
144 nouveau_fence_update(fence->channel, fctx); 159 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
160 if (nouveau_fence_update(fence->channel, fctx))
161 ret = NVIF_NOTIFY_DROP;
145 } 162 }
146 spin_unlock_irqrestore(&fctx->lock, flags); 163 spin_unlock_irqrestore(&fctx->lock, flags);
147 164
148 /* Always return keep here. NVIF refcount is handled with nouveau_fence_update */ 165 return ret;
149 return NVIF_NOTIFY_KEEP;
150} 166}
151 167
152void 168void
@@ -262,7 +278,10 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
262 if (!ret) { 278 if (!ret) {
263 fence_get(&fence->base); 279 fence_get(&fence->base);
264 spin_lock_irq(&fctx->lock); 280 spin_lock_irq(&fctx->lock);
265 nouveau_fence_update(chan, fctx); 281
282 if (nouveau_fence_update(chan, fctx))
283 nvif_notify_put(&fctx->notify);
284
266 list_add_tail(&fence->head, &fctx->pending); 285 list_add_tail(&fence->head, &fctx->pending);
267 spin_unlock_irq(&fctx->lock); 286 spin_unlock_irq(&fctx->lock);
268 } 287 }
@@ -276,13 +295,16 @@ nouveau_fence_done(struct nouveau_fence *fence)
276 if (fence->base.ops == &nouveau_fence_ops_legacy || 295 if (fence->base.ops == &nouveau_fence_ops_legacy ||
277 fence->base.ops == &nouveau_fence_ops_uevent) { 296 fence->base.ops == &nouveau_fence_ops_uevent) {
278 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 297 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
298 struct nouveau_channel *chan;
279 unsigned long flags; 299 unsigned long flags;
280 300
281 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) 301 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
282 return true; 302 return true;
283 303
284 spin_lock_irqsave(&fctx->lock, flags); 304 spin_lock_irqsave(&fctx->lock, flags);
285 nouveau_fence_update(fence->channel, fctx); 305 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
306 if (chan && nouveau_fence_update(chan, fctx))
307 nvif_notify_put(&fctx->notify);
286 spin_unlock_irqrestore(&fctx->lock, flags); 308 spin_unlock_irqrestore(&fctx->lock, flags);
287 } 309 }
288 return fence_is_signaled(&fence->base); 310 return fence_is_signaled(&fence->base);
@@ -387,12 +409,18 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
387 409
388 if (fence && (!exclusive || !fobj || !fobj->shared_count)) { 410 if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
389 struct nouveau_channel *prev = NULL; 411 struct nouveau_channel *prev = NULL;
412 bool must_wait = true;
390 413
391 f = nouveau_local_fence(fence, chan->drm); 414 f = nouveau_local_fence(fence, chan->drm);
392 if (f) 415 if (f) {
393 prev = f->channel; 416 rcu_read_lock();
417 prev = rcu_dereference(f->channel);
418 if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
419 must_wait = false;
420 rcu_read_unlock();
421 }
394 422
395 if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan)))) 423 if (must_wait)
396 ret = fence_wait(fence, intr); 424 ret = fence_wait(fence, intr);
397 425
398 return ret; 426 return ret;
@@ -403,19 +431,22 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
403 431
404 for (i = 0; i < fobj->shared_count && !ret; ++i) { 432 for (i = 0; i < fobj->shared_count && !ret; ++i) {
405 struct nouveau_channel *prev = NULL; 433 struct nouveau_channel *prev = NULL;
434 bool must_wait = true;
406 435
407 fence = rcu_dereference_protected(fobj->shared[i], 436 fence = rcu_dereference_protected(fobj->shared[i],
408 reservation_object_held(resv)); 437 reservation_object_held(resv));
409 438
410 f = nouveau_local_fence(fence, chan->drm); 439 f = nouveau_local_fence(fence, chan->drm);
411 if (f) 440 if (f) {
412 prev = f->channel; 441 rcu_read_lock();
442 prev = rcu_dereference(f->channel);
443 if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
444 must_wait = false;
445 rcu_read_unlock();
446 }
413 447
414 if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan)))) 448 if (must_wait)
415 ret = fence_wait(fence, intr); 449 ret = fence_wait(fence, intr);
416
417 if (ret)
418 break;
419 } 450 }
420 451
421 return ret; 452 return ret;
@@ -463,7 +494,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f)
463 struct nouveau_fence *fence = from_fence(f); 494 struct nouveau_fence *fence = from_fence(f);
464 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 495 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
465 496
466 return fence->channel ? fctx->name : "dead channel"; 497 return !fctx->dead ? fctx->name : "dead channel";
467} 498}
468 499
469/* 500/*
@@ -476,9 +507,16 @@ static bool nouveau_fence_is_signaled(struct fence *f)
476{ 507{
477 struct nouveau_fence *fence = from_fence(f); 508 struct nouveau_fence *fence = from_fence(f);
478 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 509 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
479 struct nouveau_channel *chan = fence->channel; 510 struct nouveau_channel *chan;
511 bool ret = false;
512
513 rcu_read_lock();
514 chan = rcu_dereference(fence->channel);
515 if (chan)
516 ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0;
517 rcu_read_unlock();
480 518
481 return (int)(fctx->read(chan) - fence->base.seqno) >= 0; 519 return ret;
482} 520}
483 521
484static bool nouveau_fence_no_signaling(struct fence *f) 522static bool nouveau_fence_no_signaling(struct fence *f)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 943b0b17b1fc..96e461c6f68f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -14,7 +14,7 @@ struct nouveau_fence {
14 14
15 bool sysmem; 15 bool sysmem;
16 16
17 struct nouveau_channel *channel; 17 struct nouveau_channel __rcu *channel;
18 unsigned long timeout; 18 unsigned long timeout;
19}; 19};
20 20
@@ -47,7 +47,7 @@ struct nouveau_fence_chan {
47 char name[32]; 47 char name[32];
48 48
49 struct nvif_notify notify; 49 struct nvif_notify notify;
50 int notify_ref; 50 int notify_ref, dead;
51}; 51};
52 52
53struct nouveau_fence_priv { 53struct nouveau_fence_priv {
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 446e71ca36cb..d9b25684ac98 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -264,7 +264,8 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
264 if (list_is_singular(&release->bos)) 264 if (list_is_singular(&release->bos))
265 return 0; 265 return 0;
266 266
267 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, !no_intr); 267 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
268 !no_intr, NULL);
268 if (ret) 269 if (ret)
269 return ret; 270 return ret;
270 271
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 30d242b25078..d59ec491dbb9 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -2039,6 +2039,7 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
2039 atombios_crtc_set_base(crtc, x, y, old_fb); 2039 atombios_crtc_set_base(crtc, x, y, old_fb);
2040 atombios_overscan_setup(crtc, mode, adjusted_mode); 2040 atombios_overscan_setup(crtc, mode, adjusted_mode);
2041 atombios_scaler_setup(crtc); 2041 atombios_scaler_setup(crtc);
2042 radeon_cursor_reset(crtc);
2042 /* update the hw version fpr dpm */ 2043 /* update the hw version fpr dpm */
2043 radeon_crtc->hw_mode = *adjusted_mode; 2044 radeon_crtc->hw_mode = *adjusted_mode;
2044 2045
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 3f898d020ae6..f373a81ba3d5 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -937,7 +937,7 @@ static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
937 tmp |= TMIN(0); 937 tmp |= TMIN(0);
938 WREG32_SMC(CG_FDO_CTRL2, tmp); 938 WREG32_SMC(CG_FDO_CTRL2, tmp);
939 939
940 tmp = RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK; 940 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
941 tmp |= FDO_PWM_MODE(mode); 941 tmp |= FDO_PWM_MODE(mode);
942 WREG32_SMC(CG_FDO_CTRL2, tmp); 942 WREG32_SMC(CG_FDO_CTRL2, tmp);
943} 943}
@@ -1162,7 +1162,7 @@ static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
1162 tmp |= TARGET_PERIOD(tach_period); 1162 tmp |= TARGET_PERIOD(tach_period);
1163 WREG32_SMC(CG_TACH_CTRL, tmp); 1163 WREG32_SMC(CG_TACH_CTRL, tmp);
1164 1164
1165 ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC); 1165 ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
1166 1166
1167 return 0; 1167 return 0;
1168} 1168}
@@ -1178,7 +1178,7 @@ static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev)
1178 tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode); 1178 tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode);
1179 WREG32_SMC(CG_FDO_CTRL2, tmp); 1179 WREG32_SMC(CG_FDO_CTRL2, tmp);
1180 1180
1181 tmp = RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK; 1181 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
1182 tmp |= TMIN(pi->t_min); 1182 tmp |= TMIN(pi->t_min);
1183 WREG32_SMC(CG_FDO_CTRL2, tmp); 1183 WREG32_SMC(CG_FDO_CTRL2, tmp);
1184 pi->fan_ctrl_is_in_default_mode = true; 1184 pi->fan_ctrl_is_in_default_mode = true;
@@ -5849,7 +5849,6 @@ int ci_dpm_init(struct radeon_device *rdev)
5849 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 5849 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5850 5850
5851 pi->fan_ctrl_is_in_default_mode = true; 5851 pi->fan_ctrl_is_in_default_mode = true;
5852 rdev->pm.dpm.fan.ucode_fan_control = false;
5853 5852
5854 return 0; 5853 return 0;
5855} 5854}
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index e4e88ca8b82e..ba85986febea 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -213,18 +213,18 @@
213 213
214#define CG_FDO_CTRL0 0xC0300064 214#define CG_FDO_CTRL0 0xC0300064
215#define FDO_STATIC_DUTY(x) ((x) << 0) 215#define FDO_STATIC_DUTY(x) ((x) << 0)
216#define FDO_STATIC_DUTY_MASK 0x0000000F 216#define FDO_STATIC_DUTY_MASK 0x000000FF
217#define FDO_STATIC_DUTY_SHIFT 0 217#define FDO_STATIC_DUTY_SHIFT 0
218#define CG_FDO_CTRL1 0xC0300068 218#define CG_FDO_CTRL1 0xC0300068
219#define FMAX_DUTY100(x) ((x) << 0) 219#define FMAX_DUTY100(x) ((x) << 0)
220#define FMAX_DUTY100_MASK 0x0000000F 220#define FMAX_DUTY100_MASK 0x000000FF
221#define FMAX_DUTY100_SHIFT 0 221#define FMAX_DUTY100_SHIFT 0
222#define CG_FDO_CTRL2 0xC030006C 222#define CG_FDO_CTRL2 0xC030006C
223#define TMIN(x) ((x) << 0) 223#define TMIN(x) ((x) << 0)
224#define TMIN_MASK 0x0000000F 224#define TMIN_MASK 0x000000FF
225#define TMIN_SHIFT 0 225#define TMIN_SHIFT 0
226#define FDO_PWM_MODE(x) ((x) << 11) 226#define FDO_PWM_MODE(x) ((x) << 11)
227#define FDO_PWM_MODE_MASK (3 << 11) 227#define FDO_PWM_MODE_MASK (7 << 11)
228#define FDO_PWM_MODE_SHIFT 11 228#define FDO_PWM_MODE_SHIFT 11
229#define TACH_PWM_RESP_RATE(x) ((x) << 25) 229#define TACH_PWM_RESP_RATE(x) ((x) << 25)
230#define TACH_PWM_RESP_RATE_MASK (0x7f << 25) 230#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 5c8b358f9fba..924b1b7ab455 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -35,7 +35,7 @@
35#define MIN(a,b) (((a)<(b))?(a):(b)) 35#define MIN(a,b) (((a)<(b))?(a):(b))
36 36
37int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, 37int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
38 struct radeon_cs_reloc **cs_reloc); 38 struct radeon_bo_list **cs_reloc);
39struct evergreen_cs_track { 39struct evergreen_cs_track {
40 u32 group_size; 40 u32 group_size;
41 u32 nbanks; 41 u32 nbanks;
@@ -1094,7 +1094,7 @@ static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
1094static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) 1094static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1095{ 1095{
1096 struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track; 1096 struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
1097 struct radeon_cs_reloc *reloc; 1097 struct radeon_bo_list *reloc;
1098 u32 last_reg; 1098 u32 last_reg;
1099 u32 m, i, tmp, *ib; 1099 u32 m, i, tmp, *ib;
1100 int r; 1100 int r;
@@ -1792,7 +1792,7 @@ static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1792static int evergreen_packet3_check(struct radeon_cs_parser *p, 1792static int evergreen_packet3_check(struct radeon_cs_parser *p,
1793 struct radeon_cs_packet *pkt) 1793 struct radeon_cs_packet *pkt)
1794{ 1794{
1795 struct radeon_cs_reloc *reloc; 1795 struct radeon_bo_list *reloc;
1796 struct evergreen_cs_track *track; 1796 struct evergreen_cs_track *track;
1797 volatile u32 *ib; 1797 volatile u32 *ib;
1798 unsigned idx; 1798 unsigned idx;
@@ -2661,7 +2661,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
2661 p->track = NULL; 2661 p->track = NULL;
2662 return r; 2662 return r;
2663 } 2663 }
2664 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2664 } while (p->idx < p->chunk_ib->length_dw);
2665#if 0 2665#if 0
2666 for (r = 0; r < p->ib.length_dw; r++) { 2666 for (r = 0; r < p->ib.length_dw; r++) {
2667 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); 2667 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
@@ -2684,8 +2684,8 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
2684 **/ 2684 **/
2685int evergreen_dma_cs_parse(struct radeon_cs_parser *p) 2685int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2686{ 2686{
2687 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 2687 struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
2688 struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc; 2688 struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc;
2689 u32 header, cmd, count, sub_cmd; 2689 u32 header, cmd, count, sub_cmd;
2690 volatile u32 *ib = p->ib.ptr; 2690 volatile u32 *ib = p->ib.ptr;
2691 u32 idx; 2691 u32 idx;
@@ -3100,7 +3100,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
3100 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); 3100 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
3101 return -EINVAL; 3101 return -EINVAL;
3102 } 3102 }
3103 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 3103 } while (p->idx < p->chunk_ib->length_dw);
3104#if 0 3104#if 0
3105 for (r = 0; r < p->ib->length_dw; r++) { 3105 for (r = 0; r < p->ib->length_dw; r++) {
3106 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); 3106 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index b53b31a7b76f..74f06d540591 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1254,7 +1254,7 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
1254 int r; 1254 int r;
1255 u32 tile_flags = 0; 1255 u32 tile_flags = 0;
1256 u32 tmp; 1256 u32 tmp;
1257 struct radeon_cs_reloc *reloc; 1257 struct radeon_bo_list *reloc;
1258 u32 value; 1258 u32 value;
1259 1259
1260 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1260 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
@@ -1293,7 +1293,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
1293 int idx) 1293 int idx)
1294{ 1294{
1295 unsigned c, i; 1295 unsigned c, i;
1296 struct radeon_cs_reloc *reloc; 1296 struct radeon_bo_list *reloc;
1297 struct r100_cs_track *track; 1297 struct r100_cs_track *track;
1298 int r = 0; 1298 int r = 0;
1299 volatile uint32_t *ib; 1299 volatile uint32_t *ib;
@@ -1542,7 +1542,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1542 struct radeon_cs_packet *pkt, 1542 struct radeon_cs_packet *pkt,
1543 unsigned idx, unsigned reg) 1543 unsigned idx, unsigned reg)
1544{ 1544{
1545 struct radeon_cs_reloc *reloc; 1545 struct radeon_bo_list *reloc;
1546 struct r100_cs_track *track; 1546 struct r100_cs_track *track;
1547 volatile uint32_t *ib; 1547 volatile uint32_t *ib;
1548 uint32_t tmp; 1548 uint32_t tmp;
@@ -1901,7 +1901,7 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1901static int r100_packet3_check(struct radeon_cs_parser *p, 1901static int r100_packet3_check(struct radeon_cs_parser *p,
1902 struct radeon_cs_packet *pkt) 1902 struct radeon_cs_packet *pkt)
1903{ 1903{
1904 struct radeon_cs_reloc *reloc; 1904 struct radeon_bo_list *reloc;
1905 struct r100_cs_track *track; 1905 struct r100_cs_track *track;
1906 unsigned idx; 1906 unsigned idx;
1907 volatile uint32_t *ib; 1907 volatile uint32_t *ib;
@@ -2061,7 +2061,7 @@ int r100_cs_parse(struct radeon_cs_parser *p)
2061 } 2061 }
2062 if (r) 2062 if (r)
2063 return r; 2063 return r;
2064 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2064 } while (p->idx < p->chunk_ib->length_dw);
2065 return 0; 2065 return 0;
2066} 2066}
2067 2067
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 732d4938aab7..c70e6d5bcd19 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -146,7 +146,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
146 struct radeon_cs_packet *pkt, 146 struct radeon_cs_packet *pkt,
147 unsigned idx, unsigned reg) 147 unsigned idx, unsigned reg)
148{ 148{
149 struct radeon_cs_reloc *reloc; 149 struct radeon_bo_list *reloc;
150 struct r100_cs_track *track; 150 struct r100_cs_track *track;
151 volatile uint32_t *ib; 151 volatile uint32_t *ib;
152 uint32_t tmp; 152 uint32_t tmp;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 1bc4704034ce..064ad5569cca 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -598,7 +598,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
598 struct radeon_cs_packet *pkt, 598 struct radeon_cs_packet *pkt,
599 unsigned idx, unsigned reg) 599 unsigned idx, unsigned reg)
600{ 600{
601 struct radeon_cs_reloc *reloc; 601 struct radeon_bo_list *reloc;
602 struct r100_cs_track *track; 602 struct r100_cs_track *track;
603 volatile uint32_t *ib; 603 volatile uint32_t *ib;
604 uint32_t tmp, tile_flags = 0; 604 uint32_t tmp, tile_flags = 0;
@@ -1142,7 +1142,7 @@ fail:
1142static int r300_packet3_check(struct radeon_cs_parser *p, 1142static int r300_packet3_check(struct radeon_cs_parser *p,
1143 struct radeon_cs_packet *pkt) 1143 struct radeon_cs_packet *pkt)
1144{ 1144{
1145 struct radeon_cs_reloc *reloc; 1145 struct radeon_bo_list *reloc;
1146 struct r100_cs_track *track; 1146 struct r100_cs_track *track;
1147 volatile uint32_t *ib; 1147 volatile uint32_t *ib;
1148 unsigned idx; 1148 unsigned idx;
@@ -1283,7 +1283,7 @@ int r300_cs_parse(struct radeon_cs_parser *p)
1283 if (r) { 1283 if (r) {
1284 return r; 1284 return r;
1285 } 1285 }
1286 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 1286 } while (p->idx < p->chunk_ib->length_dw);
1287 return 0; 1287 return 0;
1288} 1288}
1289 1289
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index c47537a1ddba..acc1f99c84d9 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -969,7 +969,7 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
969static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) 969static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
970{ 970{
971 struct r600_cs_track *track = (struct r600_cs_track *)p->track; 971 struct r600_cs_track *track = (struct r600_cs_track *)p->track;
972 struct radeon_cs_reloc *reloc; 972 struct radeon_bo_list *reloc;
973 u32 m, i, tmp, *ib; 973 u32 m, i, tmp, *ib;
974 int r; 974 int r;
975 975
@@ -1626,7 +1626,7 @@ static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1626static int r600_packet3_check(struct radeon_cs_parser *p, 1626static int r600_packet3_check(struct radeon_cs_parser *p,
1627 struct radeon_cs_packet *pkt) 1627 struct radeon_cs_packet *pkt)
1628{ 1628{
1629 struct radeon_cs_reloc *reloc; 1629 struct radeon_bo_list *reloc;
1630 struct r600_cs_track *track; 1630 struct r600_cs_track *track;
1631 volatile u32 *ib; 1631 volatile u32 *ib;
1632 unsigned idx; 1632 unsigned idx;
@@ -2316,7 +2316,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
2316 p->track = NULL; 2316 p->track = NULL;
2317 return r; 2317 return r;
2318 } 2318 }
2319 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2319 } while (p->idx < p->chunk_ib->length_dw);
2320#if 0 2320#if 0
2321 for (r = 0; r < p->ib.length_dw; r++) { 2321 for (r = 0; r < p->ib.length_dw; r++) {
2322 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); 2322 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
@@ -2351,10 +2351,10 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
2351 2351
2352static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p) 2352static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
2353{ 2353{
2354 if (p->chunk_relocs_idx == -1) { 2354 if (p->chunk_relocs == NULL) {
2355 return 0; 2355 return 0;
2356 } 2356 }
2357 p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL); 2357 p->relocs = kzalloc(sizeof(struct radeon_bo_list), GFP_KERNEL);
2358 if (p->relocs == NULL) { 2358 if (p->relocs == NULL) {
2359 return -ENOMEM; 2359 return -ENOMEM;
2360 } 2360 }
@@ -2398,7 +2398,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
2398 /* Copy the packet into the IB, the parser will read from the 2398 /* Copy the packet into the IB, the parser will read from the
2399 * input memory (cached) and write to the IB (which can be 2399 * input memory (cached) and write to the IB (which can be
2400 * uncached). */ 2400 * uncached). */
2401 ib_chunk = &parser.chunks[parser.chunk_ib_idx]; 2401 ib_chunk = parser.chunk_ib;
2402 parser.ib.length_dw = ib_chunk->length_dw; 2402 parser.ib.length_dw = ib_chunk->length_dw;
2403 *l = parser.ib.length_dw; 2403 *l = parser.ib.length_dw;
2404 if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) { 2404 if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
@@ -2435,24 +2435,24 @@ void r600_cs_legacy_init(void)
2435 * GPU offset using the provided start. 2435 * GPU offset using the provided start.
2436 **/ 2436 **/
2437int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, 2437int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
2438 struct radeon_cs_reloc **cs_reloc) 2438 struct radeon_bo_list **cs_reloc)
2439{ 2439{
2440 struct radeon_cs_chunk *relocs_chunk; 2440 struct radeon_cs_chunk *relocs_chunk;
2441 unsigned idx; 2441 unsigned idx;
2442 2442
2443 *cs_reloc = NULL; 2443 *cs_reloc = NULL;
2444 if (p->chunk_relocs_idx == -1) { 2444 if (p->chunk_relocs == NULL) {
2445 DRM_ERROR("No relocation chunk !\n"); 2445 DRM_ERROR("No relocation chunk !\n");
2446 return -EINVAL; 2446 return -EINVAL;
2447 } 2447 }
2448 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 2448 relocs_chunk = p->chunk_relocs;
2449 idx = p->dma_reloc_idx; 2449 idx = p->dma_reloc_idx;
2450 if (idx >= p->nrelocs) { 2450 if (idx >= p->nrelocs) {
2451 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 2451 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
2452 idx, p->nrelocs); 2452 idx, p->nrelocs);
2453 return -EINVAL; 2453 return -EINVAL;
2454 } 2454 }
2455 *cs_reloc = p->relocs_ptr[idx]; 2455 *cs_reloc = &p->relocs[idx];
2456 p->dma_reloc_idx++; 2456 p->dma_reloc_idx++;
2457 return 0; 2457 return 0;
2458} 2458}
@@ -2472,8 +2472,8 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
2472 **/ 2472 **/
2473int r600_dma_cs_parse(struct radeon_cs_parser *p) 2473int r600_dma_cs_parse(struct radeon_cs_parser *p)
2474{ 2474{
2475 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 2475 struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
2476 struct radeon_cs_reloc *src_reloc, *dst_reloc; 2476 struct radeon_bo_list *src_reloc, *dst_reloc;
2477 u32 header, cmd, count, tiled; 2477 u32 header, cmd, count, tiled;
2478 volatile u32 *ib = p->ib.ptr; 2478 volatile u32 *ib = p->ib.ptr;
2479 u32 idx, idx_value; 2479 u32 idx, idx_value;
@@ -2619,7 +2619,7 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
2619 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); 2619 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
2620 return -EINVAL; 2620 return -EINVAL;
2621 } 2621 }
2622 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2622 } while (p->idx < p->chunk_ib->length_dw);
2623#if 0 2623#if 0
2624 for (r = 0; r < p->ib->length_dw; r++) { 2624 for (r = 0; r < p->ib->length_dw; r++) {
2625 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); 2625 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3207bb60715e..54529b837afa 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -450,6 +450,15 @@ struct radeon_mman {
450#endif 450#endif
451}; 451};
452 452
453struct radeon_bo_list {
454 struct radeon_bo *robj;
455 struct ttm_validate_buffer tv;
456 uint64_t gpu_offset;
457 unsigned prefered_domains;
458 unsigned allowed_domains;
459 uint32_t tiling_flags;
460};
461
453/* bo virtual address in a specific vm */ 462/* bo virtual address in a specific vm */
454struct radeon_bo_va { 463struct radeon_bo_va {
455 /* protected by bo being reserved */ 464 /* protected by bo being reserved */
@@ -920,6 +929,9 @@ struct radeon_vm {
920 929
921 struct rb_root va; 930 struct rb_root va;
922 931
932 /* protecting invalidated and freed */
933 spinlock_t status_lock;
934
923 /* BOs moved, but not yet updated in the PT */ 935 /* BOs moved, but not yet updated in the PT */
924 struct list_head invalidated; 936 struct list_head invalidated;
925 937
@@ -1044,19 +1056,7 @@ void cayman_dma_fini(struct radeon_device *rdev);
1044/* 1056/*
1045 * CS. 1057 * CS.
1046 */ 1058 */
1047struct radeon_cs_reloc {
1048 struct drm_gem_object *gobj;
1049 struct radeon_bo *robj;
1050 struct ttm_validate_buffer tv;
1051 uint64_t gpu_offset;
1052 unsigned prefered_domains;
1053 unsigned allowed_domains;
1054 uint32_t tiling_flags;
1055 uint32_t handle;
1056};
1057
1058struct radeon_cs_chunk { 1059struct radeon_cs_chunk {
1059 uint32_t chunk_id;
1060 uint32_t length_dw; 1060 uint32_t length_dw;
1061 uint32_t *kdata; 1061 uint32_t *kdata;
1062 void __user *user_ptr; 1062 void __user *user_ptr;
@@ -1074,16 +1074,15 @@ struct radeon_cs_parser {
1074 unsigned idx; 1074 unsigned idx;
1075 /* relocations */ 1075 /* relocations */
1076 unsigned nrelocs; 1076 unsigned nrelocs;
1077 struct radeon_cs_reloc *relocs; 1077 struct radeon_bo_list *relocs;
1078 struct radeon_cs_reloc **relocs_ptr; 1078 struct radeon_bo_list *vm_bos;
1079 struct radeon_cs_reloc *vm_bos;
1080 struct list_head validated; 1079 struct list_head validated;
1081 unsigned dma_reloc_idx; 1080 unsigned dma_reloc_idx;
1082 /* indices of various chunks */ 1081 /* indices of various chunks */
1083 int chunk_ib_idx; 1082 struct radeon_cs_chunk *chunk_ib;
1084 int chunk_relocs_idx; 1083 struct radeon_cs_chunk *chunk_relocs;
1085 int chunk_flags_idx; 1084 struct radeon_cs_chunk *chunk_flags;
1086 int chunk_const_ib_idx; 1085 struct radeon_cs_chunk *chunk_const_ib;
1087 struct radeon_ib ib; 1086 struct radeon_ib ib;
1088 struct radeon_ib const_ib; 1087 struct radeon_ib const_ib;
1089 void *track; 1088 void *track;
@@ -1097,7 +1096,7 @@ struct radeon_cs_parser {
1097 1096
1098static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) 1097static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
1099{ 1098{
1100 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; 1099 struct radeon_cs_chunk *ibc = p->chunk_ib;
1101 1100
1102 if (ibc->kdata) 1101 if (ibc->kdata)
1103 return ibc->kdata[idx]; 1102 return ibc->kdata[idx];
@@ -2975,7 +2974,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev);
2975void radeon_vm_manager_fini(struct radeon_device *rdev); 2974void radeon_vm_manager_fini(struct radeon_device *rdev);
2976int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); 2975int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
2977void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); 2976void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
2978struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, 2977struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
2979 struct radeon_vm *vm, 2978 struct radeon_vm *vm,
2980 struct list_head *head); 2979 struct list_head *head);
2981struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, 2980struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
@@ -3089,7 +3088,7 @@ bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
3089void radeon_cs_dump_packet(struct radeon_cs_parser *p, 3088void radeon_cs_dump_packet(struct radeon_cs_parser *p,
3090 struct radeon_cs_packet *pkt); 3089 struct radeon_cs_packet *pkt);
3091int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, 3090int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
3092 struct radeon_cs_reloc **cs_reloc, 3091 struct radeon_bo_list **cs_reloc,
3093 int nomm); 3092 int nomm);
3094int r600_cs_common_vline_parse(struct radeon_cs_parser *p, 3093int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
3095 uint32_t *vline_start_end, 3094 uint32_t *vline_start_end,
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 75f22e5e999f..c830863bc98a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -77,22 +77,18 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
77 struct drm_device *ddev = p->rdev->ddev; 77 struct drm_device *ddev = p->rdev->ddev;
78 struct radeon_cs_chunk *chunk; 78 struct radeon_cs_chunk *chunk;
79 struct radeon_cs_buckets buckets; 79 struct radeon_cs_buckets buckets;
80 unsigned i, j; 80 unsigned i;
81 bool duplicate, need_mmap_lock = false; 81 bool need_mmap_lock = false;
82 int r; 82 int r;
83 83
84 if (p->chunk_relocs_idx == -1) { 84 if (p->chunk_relocs == NULL) {
85 return 0; 85 return 0;
86 } 86 }
87 chunk = &p->chunks[p->chunk_relocs_idx]; 87 chunk = p->chunk_relocs;
88 p->dma_reloc_idx = 0; 88 p->dma_reloc_idx = 0;
89 /* FIXME: we assume that each relocs use 4 dwords */ 89 /* FIXME: we assume that each relocs use 4 dwords */
90 p->nrelocs = chunk->length_dw / 4; 90 p->nrelocs = chunk->length_dw / 4;
91 p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL); 91 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL);
92 if (p->relocs_ptr == NULL) {
93 return -ENOMEM;
94 }
95 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
96 if (p->relocs == NULL) { 92 if (p->relocs == NULL) {
97 return -ENOMEM; 93 return -ENOMEM;
98 } 94 }
@@ -101,31 +97,17 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
101 97
102 for (i = 0; i < p->nrelocs; i++) { 98 for (i = 0; i < p->nrelocs; i++) {
103 struct drm_radeon_cs_reloc *r; 99 struct drm_radeon_cs_reloc *r;
100 struct drm_gem_object *gobj;
104 unsigned priority; 101 unsigned priority;
105 102
106 duplicate = false;
107 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; 103 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
108 for (j = 0; j < i; j++) { 104 gobj = drm_gem_object_lookup(ddev, p->filp, r->handle);
109 if (r->handle == p->relocs[j].handle) { 105 if (gobj == NULL) {
110 p->relocs_ptr[i] = &p->relocs[j];
111 duplicate = true;
112 break;
113 }
114 }
115 if (duplicate) {
116 p->relocs[i].handle = 0;
117 continue;
118 }
119
120 p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
121 r->handle);
122 if (p->relocs[i].gobj == NULL) {
123 DRM_ERROR("gem object lookup failed 0x%x\n", 106 DRM_ERROR("gem object lookup failed 0x%x\n",
124 r->handle); 107 r->handle);
125 return -ENOENT; 108 return -ENOENT;
126 } 109 }
127 p->relocs_ptr[i] = &p->relocs[i]; 110 p->relocs[i].robj = gem_to_radeon_bo(gobj);
128 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
129 111
130 /* The userspace buffer priorities are from 0 to 15. A higher 112 /* The userspace buffer priorities are from 0 to 15. A higher
131 * number means the buffer is more important. 113 * number means the buffer is more important.
@@ -184,7 +166,6 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
184 166
185 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; 167 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
186 p->relocs[i].tv.shared = !r->write_domain; 168 p->relocs[i].tv.shared = !r->write_domain;
187 p->relocs[i].handle = r->handle;
188 169
189 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head, 170 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
190 priority); 171 priority);
@@ -251,22 +232,19 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
251 232
252static int radeon_cs_sync_rings(struct radeon_cs_parser *p) 233static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
253{ 234{
254 int i, r = 0; 235 struct radeon_bo_list *reloc;
236 int r;
255 237
256 for (i = 0; i < p->nrelocs; i++) { 238 list_for_each_entry(reloc, &p->validated, tv.head) {
257 struct reservation_object *resv; 239 struct reservation_object *resv;
258 240
259 if (!p->relocs[i].robj) 241 resv = reloc->robj->tbo.resv;
260 continue;
261
262 resv = p->relocs[i].robj->tbo.resv;
263 r = radeon_sync_resv(p->rdev, &p->ib.sync, resv, 242 r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
264 p->relocs[i].tv.shared); 243 reloc->tv.shared);
265
266 if (r) 244 if (r)
267 break; 245 return r;
268 } 246 }
269 return r; 247 return 0;
270} 248}
271 249
272/* XXX: note that this is called from the legacy UMS CS ioctl as well */ 250/* XXX: note that this is called from the legacy UMS CS ioctl as well */
@@ -286,10 +264,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
286 p->idx = 0; 264 p->idx = 0;
287 p->ib.sa_bo = NULL; 265 p->ib.sa_bo = NULL;
288 p->const_ib.sa_bo = NULL; 266 p->const_ib.sa_bo = NULL;
289 p->chunk_ib_idx = -1; 267 p->chunk_ib = NULL;
290 p->chunk_relocs_idx = -1; 268 p->chunk_relocs = NULL;
291 p->chunk_flags_idx = -1; 269 p->chunk_flags = NULL;
292 p->chunk_const_ib_idx = -1; 270 p->chunk_const_ib = NULL;
293 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL); 271 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
294 if (p->chunks_array == NULL) { 272 if (p->chunks_array == NULL) {
295 return -ENOMEM; 273 return -ENOMEM;
@@ -316,24 +294,23 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
316 return -EFAULT; 294 return -EFAULT;
317 } 295 }
318 p->chunks[i].length_dw = user_chunk.length_dw; 296 p->chunks[i].length_dw = user_chunk.length_dw;
319 p->chunks[i].chunk_id = user_chunk.chunk_id; 297 if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
320 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { 298 p->chunk_relocs = &p->chunks[i];
321 p->chunk_relocs_idx = i;
322 } 299 }
323 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { 300 if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
324 p->chunk_ib_idx = i; 301 p->chunk_ib = &p->chunks[i];
325 /* zero length IB isn't useful */ 302 /* zero length IB isn't useful */
326 if (p->chunks[i].length_dw == 0) 303 if (p->chunks[i].length_dw == 0)
327 return -EINVAL; 304 return -EINVAL;
328 } 305 }
329 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) { 306 if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
330 p->chunk_const_ib_idx = i; 307 p->chunk_const_ib = &p->chunks[i];
331 /* zero length CONST IB isn't useful */ 308 /* zero length CONST IB isn't useful */
332 if (p->chunks[i].length_dw == 0) 309 if (p->chunks[i].length_dw == 0)
333 return -EINVAL; 310 return -EINVAL;
334 } 311 }
335 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { 312 if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
336 p->chunk_flags_idx = i; 313 p->chunk_flags = &p->chunks[i];
337 /* zero length flags aren't useful */ 314 /* zero length flags aren't useful */
338 if (p->chunks[i].length_dw == 0) 315 if (p->chunks[i].length_dw == 0)
339 return -EINVAL; 316 return -EINVAL;
@@ -342,10 +319,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
342 size = p->chunks[i].length_dw; 319 size = p->chunks[i].length_dw;
343 cdata = (void __user *)(unsigned long)user_chunk.chunk_data; 320 cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
344 p->chunks[i].user_ptr = cdata; 321 p->chunks[i].user_ptr = cdata;
345 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) 322 if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
346 continue; 323 continue;
347 324
348 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { 325 if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
349 if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP)) 326 if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
350 continue; 327 continue;
351 } 328 }
@@ -358,7 +335,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
358 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { 335 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
359 return -EFAULT; 336 return -EFAULT;
360 } 337 }
361 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { 338 if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
362 p->cs_flags = p->chunks[i].kdata[0]; 339 p->cs_flags = p->chunks[i].kdata[0];
363 if (p->chunks[i].length_dw > 1) 340 if (p->chunks[i].length_dw > 1)
364 ring = p->chunks[i].kdata[1]; 341 ring = p->chunks[i].kdata[1];
@@ -399,8 +376,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
399static int cmp_size_smaller_first(void *priv, struct list_head *a, 376static int cmp_size_smaller_first(void *priv, struct list_head *a,
400 struct list_head *b) 377 struct list_head *b)
401{ 378{
402 struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head); 379 struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
403 struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head); 380 struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
404 381
405 /* Sort A before B if A is smaller. */ 382 /* Sort A before B if A is smaller. */
406 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; 383 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
@@ -441,13 +418,15 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
441 418
442 if (parser->relocs != NULL) { 419 if (parser->relocs != NULL) {
443 for (i = 0; i < parser->nrelocs; i++) { 420 for (i = 0; i < parser->nrelocs; i++) {
444 if (parser->relocs[i].gobj) 421 struct radeon_bo *bo = parser->relocs[i].robj;
445 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); 422 if (bo == NULL)
423 continue;
424
425 drm_gem_object_unreference_unlocked(&bo->gem_base);
446 } 426 }
447 } 427 }
448 kfree(parser->track); 428 kfree(parser->track);
449 kfree(parser->relocs); 429 kfree(parser->relocs);
450 kfree(parser->relocs_ptr);
451 drm_free_large(parser->vm_bos); 430 drm_free_large(parser->vm_bos);
452 for (i = 0; i < parser->nchunks; i++) 431 for (i = 0; i < parser->nchunks; i++)
453 drm_free_large(parser->chunks[i].kdata); 432 drm_free_large(parser->chunks[i].kdata);
@@ -462,7 +441,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
462{ 441{
463 int r; 442 int r;
464 443
465 if (parser->chunk_ib_idx == -1) 444 if (parser->chunk_ib == NULL)
466 return 0; 445 return 0;
467 446
468 if (parser->cs_flags & RADEON_CS_USE_VM) 447 if (parser->cs_flags & RADEON_CS_USE_VM)
@@ -505,9 +484,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
505 if (r) 484 if (r)
506 return r; 485 return r;
507 486
508 radeon_sync_resv(p->rdev, &p->ib.sync, vm->page_directory->tbo.resv,
509 true);
510
511 r = radeon_vm_clear_freed(rdev, vm); 487 r = radeon_vm_clear_freed(rdev, vm);
512 if (r) 488 if (r)
513 return r; 489 return r;
@@ -525,10 +501,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
525 for (i = 0; i < p->nrelocs; i++) { 501 for (i = 0; i < p->nrelocs; i++) {
526 struct radeon_bo *bo; 502 struct radeon_bo *bo;
527 503
528 /* ignore duplicates */
529 if (p->relocs_ptr[i] != &p->relocs[i])
530 continue;
531
532 bo = p->relocs[i].robj; 504 bo = p->relocs[i].robj;
533 bo_va = radeon_vm_bo_find(vm, bo); 505 bo_va = radeon_vm_bo_find(vm, bo);
534 if (bo_va == NULL) { 506 if (bo_va == NULL) {
@@ -553,7 +525,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
553 struct radeon_vm *vm = &fpriv->vm; 525 struct radeon_vm *vm = &fpriv->vm;
554 int r; 526 int r;
555 527
556 if (parser->chunk_ib_idx == -1) 528 if (parser->chunk_ib == NULL)
557 return 0; 529 return 0;
558 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) 530 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
559 return 0; 531 return 0;
@@ -587,7 +559,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
587 } 559 }
588 560
589 if ((rdev->family >= CHIP_TAHITI) && 561 if ((rdev->family >= CHIP_TAHITI) &&
590 (parser->chunk_const_ib_idx != -1)) { 562 (parser->chunk_const_ib != NULL)) {
591 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true); 563 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
592 } else { 564 } else {
593 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true); 565 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
@@ -614,7 +586,7 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
614 struct radeon_vm *vm = NULL; 586 struct radeon_vm *vm = NULL;
615 int r; 587 int r;
616 588
617 if (parser->chunk_ib_idx == -1) 589 if (parser->chunk_ib == NULL)
618 return 0; 590 return 0;
619 591
620 if (parser->cs_flags & RADEON_CS_USE_VM) { 592 if (parser->cs_flags & RADEON_CS_USE_VM) {
@@ -622,8 +594,8 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
622 vm = &fpriv->vm; 594 vm = &fpriv->vm;
623 595
624 if ((rdev->family >= CHIP_TAHITI) && 596 if ((rdev->family >= CHIP_TAHITI) &&
625 (parser->chunk_const_ib_idx != -1)) { 597 (parser->chunk_const_ib != NULL)) {
626 ib_chunk = &parser->chunks[parser->chunk_const_ib_idx]; 598 ib_chunk = parser->chunk_const_ib;
627 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { 599 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
628 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw); 600 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
629 return -EINVAL; 601 return -EINVAL;
@@ -642,13 +614,13 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
642 return -EFAULT; 614 return -EFAULT;
643 } 615 }
644 616
645 ib_chunk = &parser->chunks[parser->chunk_ib_idx]; 617 ib_chunk = parser->chunk_ib;
646 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { 618 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
647 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw); 619 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
648 return -EINVAL; 620 return -EINVAL;
649 } 621 }
650 } 622 }
651 ib_chunk = &parser->chunks[parser->chunk_ib_idx]; 623 ib_chunk = parser->chunk_ib;
652 624
653 r = radeon_ib_get(rdev, parser->ring, &parser->ib, 625 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
654 vm, ib_chunk->length_dw * 4); 626 vm, ib_chunk->length_dw * 4);
@@ -740,7 +712,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
740 struct radeon_cs_packet *pkt, 712 struct radeon_cs_packet *pkt,
741 unsigned idx) 713 unsigned idx)
742{ 714{
743 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 715 struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
744 struct radeon_device *rdev = p->rdev; 716 struct radeon_device *rdev = p->rdev;
745 uint32_t header; 717 uint32_t header;
746 718
@@ -834,7 +806,7 @@ void radeon_cs_dump_packet(struct radeon_cs_parser *p,
834 * GPU offset using the provided start. 806 * GPU offset using the provided start.
835 **/ 807 **/
836int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, 808int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
837 struct radeon_cs_reloc **cs_reloc, 809 struct radeon_bo_list **cs_reloc,
838 int nomm) 810 int nomm)
839{ 811{
840 struct radeon_cs_chunk *relocs_chunk; 812 struct radeon_cs_chunk *relocs_chunk;
@@ -842,12 +814,12 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
842 unsigned idx; 814 unsigned idx;
843 int r; 815 int r;
844 816
845 if (p->chunk_relocs_idx == -1) { 817 if (p->chunk_relocs == NULL) {
846 DRM_ERROR("No relocation chunk !\n"); 818 DRM_ERROR("No relocation chunk !\n");
847 return -EINVAL; 819 return -EINVAL;
848 } 820 }
849 *cs_reloc = NULL; 821 *cs_reloc = NULL;
850 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 822 relocs_chunk = p->chunk_relocs;
851 r = radeon_cs_packet_parse(p, &p3reloc, p->idx); 823 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
852 if (r) 824 if (r)
853 return r; 825 return r;
@@ -873,6 +845,6 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
873 (u64)relocs_chunk->kdata[idx + 3] << 32; 845 (u64)relocs_chunk->kdata[idx + 3] << 32;
874 (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0]; 846 (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
875 } else 847 } else
876 *cs_reloc = p->relocs_ptr[(idx / 4)]; 848 *cs_reloc = &p->relocs[(idx / 4)];
877 return 0; 849 return 0;
878} 850}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 85f38ee11888..45e54060ee97 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -227,11 +227,24 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
227 return ret; 227 return ret;
228} 228}
229 229
230static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, 230static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
231 uint64_t gpu_addr, int hot_x, int hot_y)
232{ 231{
233 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 232 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
234 struct radeon_device *rdev = crtc->dev->dev_private; 233 struct radeon_device *rdev = crtc->dev->dev_private;
234 struct radeon_bo *robj = gem_to_radeon_bo(obj);
235 uint64_t gpu_addr;
236 int ret;
237
238 ret = radeon_bo_reserve(robj, false);
239 if (unlikely(ret != 0))
240 goto fail;
241 /* Only 27 bit offset for legacy cursor */
242 ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
243 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
244 &gpu_addr);
245 radeon_bo_unreserve(robj);
246 if (ret)
247 goto fail;
235 248
236 if (ASIC_IS_DCE4(rdev)) { 249 if (ASIC_IS_DCE4(rdev)) {
237 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 250 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
@@ -253,18 +266,12 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
253 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset); 266 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
254 } 267 }
255 268
256 if (hot_x != radeon_crtc->cursor_hot_x || 269 return 0;
257 hot_y != radeon_crtc->cursor_hot_y) {
258 int x, y;
259
260 x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x;
261 y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y;
262 270
263 radeon_cursor_move_locked(crtc, x, y); 271fail:
272 drm_gem_object_unreference_unlocked(obj);
264 273
265 radeon_crtc->cursor_hot_x = hot_x; 274 return ret;
266 radeon_crtc->cursor_hot_y = hot_y;
267 }
268} 275}
269 276
270int radeon_crtc_cursor_set2(struct drm_crtc *crtc, 277int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
@@ -276,10 +283,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
276 int32_t hot_y) 283 int32_t hot_y)
277{ 284{
278 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 285 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
279 struct radeon_device *rdev = crtc->dev->dev_private;
280 struct drm_gem_object *obj; 286 struct drm_gem_object *obj;
281 struct radeon_bo *robj;
282 uint64_t gpu_addr;
283 int ret; 287 int ret;
284 288
285 if (!handle) { 289 if (!handle) {
@@ -301,41 +305,76 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
301 return -ENOENT; 305 return -ENOENT;
302 } 306 }
303 307
304 robj = gem_to_radeon_bo(obj);
305 ret = radeon_bo_reserve(robj, false);
306 if (unlikely(ret != 0))
307 goto fail;
308 /* Only 27 bit offset for legacy cursor */
309 ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
310 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
311 &gpu_addr);
312 radeon_bo_unreserve(robj);
313 if (ret)
314 goto fail;
315
316 radeon_crtc->cursor_width = width; 308 radeon_crtc->cursor_width = width;
317 radeon_crtc->cursor_height = height; 309 radeon_crtc->cursor_height = height;
318 310
319 radeon_lock_cursor(crtc, true); 311 radeon_lock_cursor(crtc, true);
320 radeon_set_cursor(crtc, obj, gpu_addr, hot_x, hot_y); 312
321 radeon_show_cursor(crtc); 313 if (hot_x != radeon_crtc->cursor_hot_x ||
314 hot_y != radeon_crtc->cursor_hot_y) {
315 int x, y;
316
317 x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x;
318 y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y;
319
320 radeon_cursor_move_locked(crtc, x, y);
321
322 radeon_crtc->cursor_hot_x = hot_x;
323 radeon_crtc->cursor_hot_y = hot_y;
324 }
325
326 ret = radeon_set_cursor(crtc, obj);
327
328 if (ret)
329 DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",
330 ret);
331 else
332 radeon_show_cursor(crtc);
333
322 radeon_lock_cursor(crtc, false); 334 radeon_lock_cursor(crtc, false);
323 335
324unpin: 336unpin:
325 if (radeon_crtc->cursor_bo) { 337 if (radeon_crtc->cursor_bo) {
326 robj = gem_to_radeon_bo(radeon_crtc->cursor_bo); 338 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
327 ret = radeon_bo_reserve(robj, false); 339 ret = radeon_bo_reserve(robj, false);
328 if (likely(ret == 0)) { 340 if (likely(ret == 0)) {
329 radeon_bo_unpin(robj); 341 radeon_bo_unpin(robj);
330 radeon_bo_unreserve(robj); 342 radeon_bo_unreserve(robj);
331 } 343 }
332 drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo); 344 if (radeon_crtc->cursor_bo != obj)
345 drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
333 } 346 }
334 347
335 radeon_crtc->cursor_bo = obj; 348 radeon_crtc->cursor_bo = obj;
336 return 0; 349 return 0;
337fail: 350}
338 drm_gem_object_unreference_unlocked(obj);
339 351
340 return ret; 352/**
353 * radeon_cursor_reset - Re-set the current cursor, if any.
354 *
355 * @crtc: drm crtc
356 *
357 * If the CRTC passed in currently has a cursor assigned, this function
358 * makes sure it's visible.
359 */
360void radeon_cursor_reset(struct drm_crtc *crtc)
361{
362 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
363 int ret;
364
365 if (radeon_crtc->cursor_bo) {
366 radeon_lock_cursor(crtc, true);
367
368 radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
369 radeon_crtc->cursor_y);
370
371 ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo);
372 if (ret)
373 DRM_ERROR("radeon_set_cursor returned %d, not showing "
374 "cursor\n", ret);
375 else
376 radeon_show_cursor(crtc);
377
378 radeon_lock_cursor(crtc, false);
379 }
341} 380}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 0ea1db83d573..29b9220ec399 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -48,10 +48,40 @@ struct radeon_fbdev {
48 struct radeon_device *rdev; 48 struct radeon_device *rdev;
49}; 49};
50 50
51/**
52 * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev.
53 *
54 * @info: fbdev info
55 *
56 * This function hides the cursor on all CRTCs used by fbdev.
57 */
58static int radeon_fb_helper_set_par(struct fb_info *info)
59{
60 int ret;
61
62 ret = drm_fb_helper_set_par(info);
63
64 /* XXX: with universal plane support fbdev will automatically disable
65 * all non-primary planes (including the cursor)
66 */
67 if (ret == 0) {
68 struct drm_fb_helper *fb_helper = info->par;
69 int i;
70
71 for (i = 0; i < fb_helper->crtc_count; i++) {
72 struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc;
73
74 radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
75 }
76 }
77
78 return ret;
79}
80
51static struct fb_ops radeonfb_ops = { 81static struct fb_ops radeonfb_ops = {
52 .owner = THIS_MODULE, 82 .owner = THIS_MODULE,
53 .fb_check_var = drm_fb_helper_check_var, 83 .fb_check_var = drm_fb_helper_check_var,
54 .fb_set_par = drm_fb_helper_set_par, 84 .fb_set_par = radeon_fb_helper_set_par,
55 .fb_fillrect = cfb_fillrect, 85 .fb_fillrect = cfb_fillrect,
56 .fb_copyarea = cfb_copyarea, 86 .fb_copyarea = cfb_copyarea,
57 .fb_imageblit = cfb_imageblit, 87 .fb_imageblit = cfb_imageblit,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 12cfaeac1205..fe48f229043e 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -548,7 +548,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
548 struct radeon_bo_va *bo_va) 548 struct radeon_bo_va *bo_va)
549{ 549{
550 struct ttm_validate_buffer tv, *entry; 550 struct ttm_validate_buffer tv, *entry;
551 struct radeon_cs_reloc *vm_bos; 551 struct radeon_bo_list *vm_bos;
552 struct ww_acquire_ctx ticket; 552 struct ww_acquire_ctx ticket;
553 struct list_head list; 553 struct list_head list;
554 unsigned domain; 554 unsigned domain;
@@ -564,7 +564,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
564 if (!vm_bos) 564 if (!vm_bos)
565 return; 565 return;
566 566
567 r = ttm_eu_reserve_buffers(&ticket, &list, true); 567 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
568 if (r) 568 if (r)
569 goto error_free; 569 goto error_free;
570 570
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index f4dd26ae33e5..3cf9c1fa6475 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -800,6 +800,8 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
800 800
801 /* Get associated drm_crtc: */ 801 /* Get associated drm_crtc: */
802 drmcrtc = &rdev->mode_info.crtcs[crtc]->base; 802 drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
803 if (!drmcrtc)
804 return -EINVAL;
803 805
804 /* Helper routine in DRM core does all the work: */ 806 /* Helper routine in DRM core does all the work: */
805 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, 807 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index cafb1ccf2ec3..678b4386540d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -1054,6 +1054,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
1054 DRM_ERROR("Mode need scaling but only first crtc can do that.\n"); 1054 DRM_ERROR("Mode need scaling but only first crtc can do that.\n");
1055 } 1055 }
1056 } 1056 }
1057 radeon_cursor_reset(crtc);
1057 return 0; 1058 return 0;
1058} 1059}
1059 1060
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index f3d87cdd5c9d..390db897f322 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -818,6 +818,7 @@ extern int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
818 int32_t hot_y); 818 int32_t hot_y);
819extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, 819extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
820 int x, int y); 820 int x, int y);
821extern void radeon_cursor_reset(struct drm_crtc *crtc);
821 822
822extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, 823extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
823 unsigned int flags, 824 unsigned int flags,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 87b00d902bf7..7d68223eb469 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -233,6 +233,13 @@ int radeon_bo_create(struct radeon_device *rdev,
233 if (!(rdev->flags & RADEON_IS_PCIE)) 233 if (!(rdev->flags & RADEON_IS_PCIE))
234 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 234 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
235 235
236#ifdef CONFIG_X86_32
237 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
238 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
239 */
240 bo->flags &= ~RADEON_GEM_GTT_WC;
241#endif
242
236 radeon_ttm_placement_from_domain(bo, domain); 243 radeon_ttm_placement_from_domain(bo, domain);
237 /* Kernel allocation are uninterruptible */ 244 /* Kernel allocation are uninterruptible */
238 down_read(&rdev->pm.mclk_lock); 245 down_read(&rdev->pm.mclk_lock);
@@ -502,19 +509,20 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
502 struct ww_acquire_ctx *ticket, 509 struct ww_acquire_ctx *ticket,
503 struct list_head *head, int ring) 510 struct list_head *head, int ring)
504{ 511{
505 struct radeon_cs_reloc *lobj; 512 struct radeon_bo_list *lobj;
506 struct radeon_bo *bo; 513 struct list_head duplicates;
507 int r; 514 int r;
508 u64 bytes_moved = 0, initial_bytes_moved; 515 u64 bytes_moved = 0, initial_bytes_moved;
509 u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); 516 u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
510 517
511 r = ttm_eu_reserve_buffers(ticket, head, true); 518 INIT_LIST_HEAD(&duplicates);
519 r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
512 if (unlikely(r != 0)) { 520 if (unlikely(r != 0)) {
513 return r; 521 return r;
514 } 522 }
515 523
516 list_for_each_entry(lobj, head, tv.head) { 524 list_for_each_entry(lobj, head, tv.head) {
517 bo = lobj->robj; 525 struct radeon_bo *bo = lobj->robj;
518 if (!bo->pin_count) { 526 if (!bo->pin_count) {
519 u32 domain = lobj->prefered_domains; 527 u32 domain = lobj->prefered_domains;
520 u32 allowed = lobj->allowed_domains; 528 u32 allowed = lobj->allowed_domains;
@@ -562,6 +570,12 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
562 lobj->gpu_offset = radeon_bo_gpu_offset(bo); 570 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
563 lobj->tiling_flags = bo->tiling_flags; 571 lobj->tiling_flags = bo->tiling_flags;
564 } 572 }
573
574 list_for_each_entry(lobj, &duplicates, tv.head) {
575 lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
576 lobj->tiling_flags = lobj->robj->tiling_flags;
577 }
578
565 return 0; 579 return 0;
566} 580}
567 581
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 9db74a96ef61..ce075cb08cb2 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -38,7 +38,7 @@ TRACE_EVENT(radeon_cs,
38 38
39 TP_fast_assign( 39 TP_fast_assign(
40 __entry->ring = p->ring; 40 __entry->ring = p->ring;
41 __entry->dw = p->chunks[p->chunk_ib_idx].length_dw; 41 __entry->dw = p->chunk_ib->length_dw;
42 __entry->fences = radeon_fence_count_emitted( 42 __entry->fences = radeon_fence_count_emitted(
43 p->rdev, p->ring); 43 p->rdev, p->ring);
44 ), 44 ),
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index cbe7b32d181c..d02aa1d0f588 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -196,7 +196,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
196 rbo = container_of(bo, struct radeon_bo, tbo); 196 rbo = container_of(bo, struct radeon_bo, tbo);
197 switch (bo->mem.mem_type) { 197 switch (bo->mem.mem_type) {
198 case TTM_PL_VRAM: 198 case TTM_PL_VRAM:
199 if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false) 199 if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
200 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); 200 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
201 else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size && 201 else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
202 bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) { 202 bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 11b662469253..c10b2aec6450 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -488,12 +488,12 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
488 unsigned buf_sizes[], bool *has_msg_cmd) 488 unsigned buf_sizes[], bool *has_msg_cmd)
489{ 489{
490 struct radeon_cs_chunk *relocs_chunk; 490 struct radeon_cs_chunk *relocs_chunk;
491 struct radeon_cs_reloc *reloc; 491 struct radeon_bo_list *reloc;
492 unsigned idx, cmd, offset; 492 unsigned idx, cmd, offset;
493 uint64_t start, end; 493 uint64_t start, end;
494 int r; 494 int r;
495 495
496 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 496 relocs_chunk = p->chunk_relocs;
497 offset = radeon_get_ib_value(p, data0); 497 offset = radeon_get_ib_value(p, data0);
498 idx = radeon_get_ib_value(p, data1); 498 idx = radeon_get_ib_value(p, data1);
499 if (idx >= relocs_chunk->length_dw) { 499 if (idx >= relocs_chunk->length_dw) {
@@ -502,7 +502,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
502 return -EINVAL; 502 return -EINVAL;
503 } 503 }
504 504
505 reloc = p->relocs_ptr[(idx / 4)]; 505 reloc = &p->relocs[(idx / 4)];
506 start = reloc->gpu_offset; 506 start = reloc->gpu_offset;
507 end = start + radeon_bo_size(reloc->robj); 507 end = start + radeon_bo_size(reloc->robj);
508 start += offset; 508 start += offset;
@@ -610,13 +610,13 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
610 [0x00000003] = 2048, 610 [0x00000003] = 2048,
611 }; 611 };
612 612
613 if (p->chunks[p->chunk_ib_idx].length_dw % 16) { 613 if (p->chunk_ib->length_dw % 16) {
614 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", 614 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
615 p->chunks[p->chunk_ib_idx].length_dw); 615 p->chunk_ib->length_dw);
616 return -EINVAL; 616 return -EINVAL;
617 } 617 }
618 618
619 if (p->chunk_relocs_idx == -1) { 619 if (p->chunk_relocs == NULL) {
620 DRM_ERROR("No relocation chunk !\n"); 620 DRM_ERROR("No relocation chunk !\n");
621 return -EINVAL; 621 return -EINVAL;
622 } 622 }
@@ -640,7 +640,7 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
640 DRM_ERROR("Unknown packet type %d !\n", pkt.type); 640 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
641 return -EINVAL; 641 return -EINVAL;
642 } 642 }
643 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 643 } while (p->idx < p->chunk_ib->length_dw);
644 644
645 if (!has_msg_cmd) { 645 if (!has_msg_cmd) {
646 DRM_ERROR("UVD-IBs need a msg command!\n"); 646 DRM_ERROR("UVD-IBs need a msg command!\n");
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 9e85757d5599..976fe432f4e2 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -453,11 +453,11 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
453 unsigned size) 453 unsigned size)
454{ 454{
455 struct radeon_cs_chunk *relocs_chunk; 455 struct radeon_cs_chunk *relocs_chunk;
456 struct radeon_cs_reloc *reloc; 456 struct radeon_bo_list *reloc;
457 uint64_t start, end, offset; 457 uint64_t start, end, offset;
458 unsigned idx; 458 unsigned idx;
459 459
460 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 460 relocs_chunk = p->chunk_relocs;
461 offset = radeon_get_ib_value(p, lo); 461 offset = radeon_get_ib_value(p, lo);
462 idx = radeon_get_ib_value(p, hi); 462 idx = radeon_get_ib_value(p, hi);
463 463
@@ -467,7 +467,7 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
467 return -EINVAL; 467 return -EINVAL;
468 } 468 }
469 469
470 reloc = p->relocs_ptr[(idx / 4)]; 470 reloc = &p->relocs[(idx / 4)];
471 start = reloc->gpu_offset; 471 start = reloc->gpu_offset;
472 end = start + radeon_bo_size(reloc->robj); 472 end = start + radeon_bo_size(reloc->robj);
473 start += offset; 473 start += offset;
@@ -534,7 +534,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
534 uint32_t *size = &tmp; 534 uint32_t *size = &tmp;
535 int i, r; 535 int i, r;
536 536
537 while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { 537 while (p->idx < p->chunk_ib->length_dw) {
538 uint32_t len = radeon_get_ib_value(p, p->idx); 538 uint32_t len = radeon_get_ib_value(p, p->idx);
539 uint32_t cmd = radeon_get_ib_value(p, p->idx + 1); 539 uint32_t cmd = radeon_get_ib_value(p, p->idx + 1);
540 540
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 0b10f3a03ce2..cde48c42b30a 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -125,41 +125,37 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
125 * Add the page directory to the list of BOs to 125 * Add the page directory to the list of BOs to
126 * validate for command submission (cayman+). 126 * validate for command submission (cayman+).
127 */ 127 */
128struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, 128struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
129 struct radeon_vm *vm, 129 struct radeon_vm *vm,
130 struct list_head *head) 130 struct list_head *head)
131{ 131{
132 struct radeon_cs_reloc *list; 132 struct radeon_bo_list *list;
133 unsigned i, idx; 133 unsigned i, idx;
134 134
135 list = drm_malloc_ab(vm->max_pde_used + 2, 135 list = drm_malloc_ab(vm->max_pde_used + 2,
136 sizeof(struct radeon_cs_reloc)); 136 sizeof(struct radeon_bo_list));
137 if (!list) 137 if (!list)
138 return NULL; 138 return NULL;
139 139
140 /* add the vm page table to the list */ 140 /* add the vm page table to the list */
141 list[0].gobj = NULL;
142 list[0].robj = vm->page_directory; 141 list[0].robj = vm->page_directory;
143 list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM; 142 list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
144 list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM; 143 list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
145 list[0].tv.bo = &vm->page_directory->tbo; 144 list[0].tv.bo = &vm->page_directory->tbo;
146 list[0].tv.shared = true; 145 list[0].tv.shared = true;
147 list[0].tiling_flags = 0; 146 list[0].tiling_flags = 0;
148 list[0].handle = 0;
149 list_add(&list[0].tv.head, head); 147 list_add(&list[0].tv.head, head);
150 148
151 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { 149 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
152 if (!vm->page_tables[i].bo) 150 if (!vm->page_tables[i].bo)
153 continue; 151 continue;
154 152
155 list[idx].gobj = NULL;
156 list[idx].robj = vm->page_tables[i].bo; 153 list[idx].robj = vm->page_tables[i].bo;
157 list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM; 154 list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
158 list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM; 155 list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
159 list[idx].tv.bo = &list[idx].robj->tbo; 156 list[idx].tv.bo = &list[idx].robj->tbo;
160 list[idx].tv.shared = true; 157 list[idx].tv.shared = true;
161 list[idx].tiling_flags = 0; 158 list[idx].tiling_flags = 0;
162 list[idx].handle = 0;
163 list_add(&list[idx++].tv.head, head); 159 list_add(&list[idx++].tv.head, head);
164 } 160 }
165 161
@@ -491,7 +487,9 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
491 tmp->vm = vm; 487 tmp->vm = vm;
492 tmp->addr = bo_va->addr; 488 tmp->addr = bo_va->addr;
493 tmp->bo = radeon_bo_ref(bo_va->bo); 489 tmp->bo = radeon_bo_ref(bo_va->bo);
490 spin_lock(&vm->status_lock);
494 list_add(&tmp->vm_status, &vm->freed); 491 list_add(&tmp->vm_status, &vm->freed);
492 spin_unlock(&vm->status_lock);
495 } 493 }
496 494
497 interval_tree_remove(&bo_va->it, &vm->va); 495 interval_tree_remove(&bo_va->it, &vm->va);
@@ -802,11 +800,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
802 * 800 *
803 * Global and local mutex must be locked! 801 * Global and local mutex must be locked!
804 */ 802 */
805static void radeon_vm_update_ptes(struct radeon_device *rdev, 803static int radeon_vm_update_ptes(struct radeon_device *rdev,
806 struct radeon_vm *vm, 804 struct radeon_vm *vm,
807 struct radeon_ib *ib, 805 struct radeon_ib *ib,
808 uint64_t start, uint64_t end, 806 uint64_t start, uint64_t end,
809 uint64_t dst, uint32_t flags) 807 uint64_t dst, uint32_t flags)
810{ 808{
811 uint64_t mask = RADEON_VM_PTE_COUNT - 1; 809 uint64_t mask = RADEON_VM_PTE_COUNT - 1;
812 uint64_t last_pte = ~0, last_dst = ~0; 810 uint64_t last_pte = ~0, last_dst = ~0;
@@ -819,8 +817,12 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
819 struct radeon_bo *pt = vm->page_tables[pt_idx].bo; 817 struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
820 unsigned nptes; 818 unsigned nptes;
821 uint64_t pte; 819 uint64_t pte;
820 int r;
822 821
823 radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true); 822 radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true);
823 r = reservation_object_reserve_shared(pt->tbo.resv);
824 if (r)
825 return r;
824 826
825 if ((addr & ~mask) == (end & ~mask)) 827 if ((addr & ~mask) == (end & ~mask))
826 nptes = end - addr; 828 nptes = end - addr;
@@ -854,6 +856,8 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
854 last_pte + 8 * count, 856 last_pte + 8 * count,
855 last_dst, flags); 857 last_dst, flags);
856 } 858 }
859
860 return 0;
857} 861}
858 862
859/** 863/**
@@ -878,7 +882,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
878 end >>= radeon_vm_block_size; 882 end >>= radeon_vm_block_size;
879 883
880 for (i = start; i <= end; ++i) 884 for (i = start; i <= end; ++i)
881 radeon_bo_fence(vm->page_tables[i].bo, fence, false); 885 radeon_bo_fence(vm->page_tables[i].bo, fence, true);
882} 886}
883 887
884/** 888/**
@@ -911,7 +915,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
911 return -EINVAL; 915 return -EINVAL;
912 } 916 }
913 917
918 spin_lock(&vm->status_lock);
914 list_del_init(&bo_va->vm_status); 919 list_del_init(&bo_va->vm_status);
920 spin_unlock(&vm->status_lock);
915 921
916 bo_va->flags &= ~RADEON_VM_PAGE_VALID; 922 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
917 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; 923 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
@@ -987,9 +993,13 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
987 radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use); 993 radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use);
988 } 994 }
989 995
990 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start, 996 r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
991 bo_va->it.last + 1, addr, 997 bo_va->it.last + 1, addr,
992 radeon_vm_page_flags(bo_va->flags)); 998 radeon_vm_page_flags(bo_va->flags));
999 if (r) {
1000 radeon_ib_free(rdev, &ib);
1001 return r;
1002 }
993 1003
994 radeon_asic_vm_pad_ib(rdev, &ib); 1004 radeon_asic_vm_pad_ib(rdev, &ib);
995 WARN_ON(ib.length_dw > ndw); 1005 WARN_ON(ib.length_dw > ndw);
@@ -1022,17 +1032,25 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
1022int radeon_vm_clear_freed(struct radeon_device *rdev, 1032int radeon_vm_clear_freed(struct radeon_device *rdev,
1023 struct radeon_vm *vm) 1033 struct radeon_vm *vm)
1024{ 1034{
1025 struct radeon_bo_va *bo_va, *tmp; 1035 struct radeon_bo_va *bo_va;
1026 int r; 1036 int r;
1027 1037
1028 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) { 1038 spin_lock(&vm->status_lock);
1039 while (!list_empty(&vm->freed)) {
1040 bo_va = list_first_entry(&vm->freed,
1041 struct radeon_bo_va, vm_status);
1042 spin_unlock(&vm->status_lock);
1043
1029 r = radeon_vm_bo_update(rdev, bo_va, NULL); 1044 r = radeon_vm_bo_update(rdev, bo_va, NULL);
1030 radeon_bo_unref(&bo_va->bo); 1045 radeon_bo_unref(&bo_va->bo);
1031 radeon_fence_unref(&bo_va->last_pt_update); 1046 radeon_fence_unref(&bo_va->last_pt_update);
1032 kfree(bo_va); 1047 kfree(bo_va);
1033 if (r) 1048 if (r)
1034 return r; 1049 return r;
1050
1051 spin_lock(&vm->status_lock);
1035 } 1052 }
1053 spin_unlock(&vm->status_lock);
1036 return 0; 1054 return 0;
1037 1055
1038} 1056}
@@ -1051,14 +1069,23 @@ int radeon_vm_clear_freed(struct radeon_device *rdev,
1051int radeon_vm_clear_invalids(struct radeon_device *rdev, 1069int radeon_vm_clear_invalids(struct radeon_device *rdev,
1052 struct radeon_vm *vm) 1070 struct radeon_vm *vm)
1053{ 1071{
1054 struct radeon_bo_va *bo_va, *tmp; 1072 struct radeon_bo_va *bo_va;
1055 int r; 1073 int r;
1056 1074
1057 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, vm_status) { 1075 spin_lock(&vm->status_lock);
1076 while (!list_empty(&vm->invalidated)) {
1077 bo_va = list_first_entry(&vm->invalidated,
1078 struct radeon_bo_va, vm_status);
1079 spin_unlock(&vm->status_lock);
1080
1058 r = radeon_vm_bo_update(rdev, bo_va, NULL); 1081 r = radeon_vm_bo_update(rdev, bo_va, NULL);
1059 if (r) 1082 if (r)
1060 return r; 1083 return r;
1084
1085 spin_lock(&vm->status_lock);
1061 } 1086 }
1087 spin_unlock(&vm->status_lock);
1088
1062 return 0; 1089 return 0;
1063} 1090}
1064 1091
@@ -1081,6 +1108,7 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
1081 1108
1082 mutex_lock(&vm->mutex); 1109 mutex_lock(&vm->mutex);
1083 interval_tree_remove(&bo_va->it, &vm->va); 1110 interval_tree_remove(&bo_va->it, &vm->va);
1111 spin_lock(&vm->status_lock);
1084 list_del(&bo_va->vm_status); 1112 list_del(&bo_va->vm_status);
1085 1113
1086 if (bo_va->addr) { 1114 if (bo_va->addr) {
@@ -1090,6 +1118,7 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
1090 radeon_fence_unref(&bo_va->last_pt_update); 1118 radeon_fence_unref(&bo_va->last_pt_update);
1091 kfree(bo_va); 1119 kfree(bo_va);
1092 } 1120 }
1121 spin_unlock(&vm->status_lock);
1093 1122
1094 mutex_unlock(&vm->mutex); 1123 mutex_unlock(&vm->mutex);
1095} 1124}
@@ -1110,10 +1139,10 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1110 1139
1111 list_for_each_entry(bo_va, &bo->va, bo_list) { 1140 list_for_each_entry(bo_va, &bo->va, bo_list) {
1112 if (bo_va->addr) { 1141 if (bo_va->addr) {
1113 mutex_lock(&bo_va->vm->mutex); 1142 spin_lock(&bo_va->vm->status_lock);
1114 list_del(&bo_va->vm_status); 1143 list_del(&bo_va->vm_status);
1115 list_add(&bo_va->vm_status, &bo_va->vm->invalidated); 1144 list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1116 mutex_unlock(&bo_va->vm->mutex); 1145 spin_unlock(&bo_va->vm->status_lock);
1117 } 1146 }
1118 } 1147 }
1119} 1148}
@@ -1141,6 +1170,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
1141 } 1170 }
1142 mutex_init(&vm->mutex); 1171 mutex_init(&vm->mutex);
1143 vm->va = RB_ROOT; 1172 vm->va = RB_ROOT;
1173 spin_lock_init(&vm->status_lock);
1144 INIT_LIST_HEAD(&vm->invalidated); 1174 INIT_LIST_HEAD(&vm->invalidated);
1145 INIT_LIST_HEAD(&vm->freed); 1175 INIT_LIST_HEAD(&vm->freed);
1146 1176
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index cf4c420b5572..32e354b8b0ab 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -5893,7 +5893,7 @@ static void si_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
5893 tmp |= TMIN(0); 5893 tmp |= TMIN(0);
5894 WREG32(CG_FDO_CTRL2, tmp); 5894 WREG32(CG_FDO_CTRL2, tmp);
5895 5895
5896 tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK; 5896 tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
5897 tmp |= FDO_PWM_MODE(mode); 5897 tmp |= FDO_PWM_MODE(mode);
5898 WREG32(CG_FDO_CTRL2, tmp); 5898 WREG32(CG_FDO_CTRL2, tmp);
5899} 5899}
@@ -6098,7 +6098,7 @@ static int si_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
6098 tmp |= TARGET_PERIOD(tach_period); 6098 tmp |= TARGET_PERIOD(tach_period);
6099 WREG32(CG_TACH_CTRL, tmp); 6099 WREG32(CG_TACH_CTRL, tmp);
6100 6100
6101 si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC); 6101 si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
6102 6102
6103 return 0; 6103 return 0;
6104} 6104}
@@ -6114,7 +6114,7 @@ static void si_fan_ctrl_set_default_mode(struct radeon_device *rdev)
6114 tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode); 6114 tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
6115 WREG32(CG_FDO_CTRL2, tmp); 6115 WREG32(CG_FDO_CTRL2, tmp);
6116 6116
6117 tmp = RREG32(CG_FDO_CTRL2) & TMIN_MASK; 6117 tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
6118 tmp |= TMIN(si_pi->t_min); 6118 tmp |= TMIN(si_pi->t_min);
6119 WREG32(CG_FDO_CTRL2, tmp); 6119 WREG32(CG_FDO_CTRL2, tmp);
6120 si_pi->fan_ctrl_is_in_default_mode = true; 6120 si_pi->fan_ctrl_is_in_default_mode = true;
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index c549c16a4fe4..4069be89e585 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -208,18 +208,18 @@
208 208
209#define CG_FDO_CTRL0 0x754 209#define CG_FDO_CTRL0 0x754
210#define FDO_STATIC_DUTY(x) ((x) << 0) 210#define FDO_STATIC_DUTY(x) ((x) << 0)
211#define FDO_STATIC_DUTY_MASK 0x0000000F 211#define FDO_STATIC_DUTY_MASK 0x000000FF
212#define FDO_STATIC_DUTY_SHIFT 0 212#define FDO_STATIC_DUTY_SHIFT 0
213#define CG_FDO_CTRL1 0x758 213#define CG_FDO_CTRL1 0x758
214#define FMAX_DUTY100(x) ((x) << 0) 214#define FMAX_DUTY100(x) ((x) << 0)
215#define FMAX_DUTY100_MASK 0x0000000F 215#define FMAX_DUTY100_MASK 0x000000FF
216#define FMAX_DUTY100_SHIFT 0 216#define FMAX_DUTY100_SHIFT 0
217#define CG_FDO_CTRL2 0x75C 217#define CG_FDO_CTRL2 0x75C
218#define TMIN(x) ((x) << 0) 218#define TMIN(x) ((x) << 0)
219#define TMIN_MASK 0x0000000F 219#define TMIN_MASK 0x000000FF
220#define TMIN_SHIFT 0 220#define TMIN_SHIFT 0
221#define FDO_PWM_MODE(x) ((x) << 11) 221#define FDO_PWM_MODE(x) ((x) << 11)
222#define FDO_PWM_MODE_MASK (3 << 11) 222#define FDO_PWM_MODE_MASK (7 << 11)
223#define FDO_PWM_MODE_SHIFT 11 223#define FDO_PWM_MODE_SHIFT 11
224#define TACH_PWM_RESP_RATE(x) ((x) << 25) 224#define TACH_PWM_RESP_RATE(x) ((x) << 25)
225#define TACH_PWM_RESP_RATE_MASK (0x7f << 25) 225#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
new file mode 100644
index 000000000000..ca9f085efa92
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -0,0 +1,17 @@
1config DRM_ROCKCHIP
2 tristate "DRM Support for Rockchip"
3 depends on DRM && ROCKCHIP_IOMMU
4 select DRM_KMS_HELPER
5 select DRM_KMS_FB_HELPER
6 select DRM_PANEL
7 select FB_CFB_FILLRECT
8 select FB_CFB_COPYAREA
9 select FB_CFB_IMAGEBLIT
10 select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
11 select VIDEOMODE_HELPERS
12 help
13 Choose this option if you have a Rockchip soc chipset.
14 This driver provides kernel mode setting and buffer
15 management to userspace. This driver does not provide
16 2D or 3D acceleration; acceleration is performed by other
17 IP found on the SoC.
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
new file mode 100644
index 000000000000..2cb0672f57ed
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o rockchip_drm_fbdev.o \
6 rockchip_drm_gem.o
7
8obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_drm_vop.o
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
new file mode 100644
index 000000000000..a798c7c71f91
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -0,0 +1,551 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * based on exynos_drm_drv.c
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <asm/dma-iommu.h>
18
19#include <drm/drmP.h>
20#include <drm/drm_crtc_helper.h>
21#include <drm/drm_fb_helper.h>
22#include <linux/dma-mapping.h>
23#include <linux/pm_runtime.h>
24#include <linux/of_graph.h>
25#include <linux/component.h>
26
27#include "rockchip_drm_drv.h"
28#include "rockchip_drm_fb.h"
29#include "rockchip_drm_fbdev.h"
30#include "rockchip_drm_gem.h"
31
32#define DRIVER_NAME "rockchip"
33#define DRIVER_DESC "RockChip Soc DRM"
34#define DRIVER_DATE "20140818"
35#define DRIVER_MAJOR 1
36#define DRIVER_MINOR 0
37
38/*
39 * Attach a (component) device to the shared drm dma mapping from master drm
40 * device. This is used by the VOPs to map GEM buffers to a common DMA
41 * mapping.
42 */
43int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
44 struct device *dev)
45{
46 struct dma_iommu_mapping *mapping = drm_dev->dev->archdata.mapping;
47 int ret;
48
49 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
50 if (ret)
51 return ret;
52
53 dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
54
55 return arm_iommu_attach_device(dev, mapping);
56}
57EXPORT_SYMBOL_GPL(rockchip_drm_dma_attach_device);
58
59void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
60 struct device *dev)
61{
62 arm_iommu_detach_device(dev);
63}
64EXPORT_SYMBOL_GPL(rockchip_drm_dma_detach_device);
65
66int rockchip_register_crtc_funcs(struct drm_device *dev,
67 const struct rockchip_crtc_funcs *crtc_funcs,
68 int pipe)
69{
70 struct rockchip_drm_private *priv = dev->dev_private;
71
72 if (pipe > ROCKCHIP_MAX_CRTC)
73 return -EINVAL;
74
75 priv->crtc_funcs[pipe] = crtc_funcs;
76
77 return 0;
78}
79EXPORT_SYMBOL_GPL(rockchip_register_crtc_funcs);
80
81void rockchip_unregister_crtc_funcs(struct drm_device *dev, int pipe)
82{
83 struct rockchip_drm_private *priv = dev->dev_private;
84
85 if (pipe > ROCKCHIP_MAX_CRTC)
86 return;
87
88 priv->crtc_funcs[pipe] = NULL;
89}
90EXPORT_SYMBOL_GPL(rockchip_unregister_crtc_funcs);
91
92static struct drm_crtc *rockchip_crtc_from_pipe(struct drm_device *drm,
93 int pipe)
94{
95 struct drm_crtc *crtc;
96 int i = 0;
97
98 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
99 if (i++ == pipe)
100 return crtc;
101
102 return NULL;
103}
104
105static int rockchip_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
106{
107 struct rockchip_drm_private *priv = dev->dev_private;
108 struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe);
109
110 if (crtc && priv->crtc_funcs[pipe] &&
111 priv->crtc_funcs[pipe]->enable_vblank)
112 return priv->crtc_funcs[pipe]->enable_vblank(crtc);
113
114 return 0;
115}
116
117static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
118{
119 struct rockchip_drm_private *priv = dev->dev_private;
120 struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe);
121
122 if (crtc && priv->crtc_funcs[pipe] &&
123 priv->crtc_funcs[pipe]->enable_vblank)
124 priv->crtc_funcs[pipe]->disable_vblank(crtc);
125}
126
127static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
128{
129 struct rockchip_drm_private *private;
130 struct dma_iommu_mapping *mapping;
131 struct device *dev = drm_dev->dev;
132 int ret;
133
134 private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
135 if (!private)
136 return -ENOMEM;
137
138 drm_dev->dev_private = private;
139
140 drm_mode_config_init(drm_dev);
141
142 rockchip_drm_mode_config_init(drm_dev);
143
144 dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
145 GFP_KERNEL);
146 if (!dev->dma_parms) {
147 ret = -ENOMEM;
148 goto err_config_cleanup;
149 }
150
151 /* TODO(djkurtz): fetch the mapping start/size from somewhere */
152 mapping = arm_iommu_create_mapping(&platform_bus_type, 0x00000000,
153 SZ_2G);
154 if (IS_ERR(mapping)) {
155 ret = PTR_ERR(mapping);
156 goto err_config_cleanup;
157 }
158
159 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
160 if (ret)
161 goto err_release_mapping;
162
163 dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
164
165 ret = arm_iommu_attach_device(dev, mapping);
166 if (ret)
167 goto err_release_mapping;
168
169 /* Try to bind all sub drivers. */
170 ret = component_bind_all(dev, drm_dev);
171 if (ret)
172 goto err_detach_device;
173
174 /* init kms poll for handling hpd */
175 drm_kms_helper_poll_init(drm_dev);
176
177 /*
178 * enable drm irq mode.
179 * - with irq_enabled = true, we can use the vblank feature.
180 */
181 drm_dev->irq_enabled = true;
182
183 ret = drm_vblank_init(drm_dev, ROCKCHIP_MAX_CRTC);
184 if (ret)
185 goto err_kms_helper_poll_fini;
186
187 /*
188 * with vblank_disable_allowed = true, vblank interrupt will be disabled
189 * by drm timer once a current process gives up ownership of
190 * vblank event.(after drm_vblank_put function is called)
191 */
192 drm_dev->vblank_disable_allowed = true;
193
194 ret = rockchip_drm_fbdev_init(drm_dev);
195 if (ret)
196 goto err_vblank_cleanup;
197
198 return 0;
199err_vblank_cleanup:
200 drm_vblank_cleanup(drm_dev);
201err_kms_helper_poll_fini:
202 drm_kms_helper_poll_fini(drm_dev);
203 component_unbind_all(dev, drm_dev);
204err_detach_device:
205 arm_iommu_detach_device(dev);
206err_release_mapping:
207 arm_iommu_release_mapping(dev->archdata.mapping);
208err_config_cleanup:
209 drm_mode_config_cleanup(drm_dev);
210 drm_dev->dev_private = NULL;
211 return ret;
212}
213
214static int rockchip_drm_unload(struct drm_device *drm_dev)
215{
216 struct device *dev = drm_dev->dev;
217
218 rockchip_drm_fbdev_fini(drm_dev);
219 drm_vblank_cleanup(drm_dev);
220 drm_kms_helper_poll_fini(drm_dev);
221 component_unbind_all(dev, drm_dev);
222 arm_iommu_detach_device(dev);
223 arm_iommu_release_mapping(dev->archdata.mapping);
224 drm_mode_config_cleanup(drm_dev);
225 drm_dev->dev_private = NULL;
226
227 return 0;
228}
229
230void rockchip_drm_lastclose(struct drm_device *dev)
231{
232 struct rockchip_drm_private *priv = dev->dev_private;
233
234 drm_fb_helper_restore_fbdev_mode_unlocked(&priv->fbdev_helper);
235}
236
237static const struct file_operations rockchip_drm_driver_fops = {
238 .owner = THIS_MODULE,
239 .open = drm_open,
240 .mmap = rockchip_gem_mmap,
241 .poll = drm_poll,
242 .read = drm_read,
243 .unlocked_ioctl = drm_ioctl,
244#ifdef CONFIG_COMPAT
245 .compat_ioctl = drm_compat_ioctl,
246#endif
247 .release = drm_release,
248};
249
250const struct vm_operations_struct rockchip_drm_vm_ops = {
251 .open = drm_gem_vm_open,
252 .close = drm_gem_vm_close,
253};
254
255static struct drm_driver rockchip_drm_driver = {
256 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
257 .load = rockchip_drm_load,
258 .unload = rockchip_drm_unload,
259 .lastclose = rockchip_drm_lastclose,
260 .get_vblank_counter = drm_vblank_count,
261 .enable_vblank = rockchip_drm_crtc_enable_vblank,
262 .disable_vblank = rockchip_drm_crtc_disable_vblank,
263 .gem_vm_ops = &rockchip_drm_vm_ops,
264 .gem_free_object = rockchip_gem_free_object,
265 .dumb_create = rockchip_gem_dumb_create,
266 .dumb_map_offset = rockchip_gem_dumb_map_offset,
267 .dumb_destroy = drm_gem_dumb_destroy,
268 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
269 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
270 .gem_prime_import = drm_gem_prime_import,
271 .gem_prime_export = drm_gem_prime_export,
272 .gem_prime_get_sg_table = rockchip_gem_prime_get_sg_table,
273 .gem_prime_vmap = rockchip_gem_prime_vmap,
274 .gem_prime_vunmap = rockchip_gem_prime_vunmap,
275 .gem_prime_mmap = rockchip_gem_mmap_buf,
276 .fops = &rockchip_drm_driver_fops,
277 .name = DRIVER_NAME,
278 .desc = DRIVER_DESC,
279 .date = DRIVER_DATE,
280 .major = DRIVER_MAJOR,
281 .minor = DRIVER_MINOR,
282};
283
284#ifdef CONFIG_PM_SLEEP
285static int rockchip_drm_sys_suspend(struct device *dev)
286{
287 struct drm_device *drm = dev_get_drvdata(dev);
288 struct drm_connector *connector;
289
290 if (!drm)
291 return 0;
292
293 drm_modeset_lock_all(drm);
294 list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
295 int old_dpms = connector->dpms;
296
297 if (connector->funcs->dpms)
298 connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
299
300 /* Set the old mode back to the connector for resume */
301 connector->dpms = old_dpms;
302 }
303 drm_modeset_unlock_all(drm);
304
305 return 0;
306}
307
308static int rockchip_drm_sys_resume(struct device *dev)
309{
310 struct drm_device *drm = dev_get_drvdata(dev);
311 struct drm_connector *connector;
312 enum drm_connector_status status;
313 bool changed = false;
314
315 if (!drm)
316 return 0;
317
318 drm_modeset_lock_all(drm);
319 list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
320 int desired_mode = connector->dpms;
321
322 /*
323 * at suspend time, we save dpms to connector->dpms,
324 * restore the old_dpms, and at current time, the connector
325 * dpms status must be DRM_MODE_DPMS_OFF.
326 */
327 connector->dpms = DRM_MODE_DPMS_OFF;
328
329 /*
330 * If the connector has been disconnected during suspend,
331 * disconnect it from the encoder and leave it off. We'll notify
332 * userspace at the end.
333 */
334 if (desired_mode == DRM_MODE_DPMS_ON) {
335 status = connector->funcs->detect(connector, true);
336 if (status == connector_status_disconnected) {
337 connector->encoder = NULL;
338 connector->status = status;
339 changed = true;
340 continue;
341 }
342 }
343 if (connector->funcs->dpms)
344 connector->funcs->dpms(connector, desired_mode);
345 }
346 drm_modeset_unlock_all(drm);
347
348 drm_helper_resume_force_mode(drm);
349
350 if (changed)
351 drm_kms_helper_hotplug_event(drm);
352
353 return 0;
354}
355#endif
356
357static const struct dev_pm_ops rockchip_drm_pm_ops = {
358 SET_SYSTEM_SLEEP_PM_OPS(rockchip_drm_sys_suspend,
359 rockchip_drm_sys_resume)
360};
361
362/*
363 * @node: device tree node containing encoder input ports
364 * @encoder: drm_encoder
365 */
366int rockchip_drm_encoder_get_mux_id(struct device_node *node,
367 struct drm_encoder *encoder)
368{
369 struct device_node *ep = NULL;
370 struct drm_crtc *crtc = encoder->crtc;
371 struct of_endpoint endpoint;
372 struct device_node *port;
373 int ret;
374
375 if (!node || !crtc)
376 return -EINVAL;
377
378 do {
379 ep = of_graph_get_next_endpoint(node, ep);
380 if (!ep)
381 break;
382
383 port = of_graph_get_remote_port(ep);
384 of_node_put(port);
385 if (port == crtc->port) {
386 ret = of_graph_parse_endpoint(ep, &endpoint);
387 return ret ?: endpoint.id;
388 }
389 } while (ep);
390
391 return -EINVAL;
392}
393
394static int compare_of(struct device *dev, void *data)
395{
396 struct device_node *np = data;
397
398 return dev->of_node == np;
399}
400
401static void rockchip_add_endpoints(struct device *dev,
402 struct component_match **match,
403 struct device_node *port)
404{
405 struct device_node *ep, *remote;
406
407 for_each_child_of_node(port, ep) {
408 remote = of_graph_get_remote_port_parent(ep);
409 if (!remote || !of_device_is_available(remote)) {
410 of_node_put(remote);
411 continue;
412 } else if (!of_device_is_available(remote->parent)) {
413 dev_warn(dev, "parent device of %s is not available\n",
414 remote->full_name);
415 of_node_put(remote);
416 continue;
417 }
418
419 component_match_add(dev, match, compare_of, remote);
420 of_node_put(remote);
421 }
422}
423
424static int rockchip_drm_bind(struct device *dev)
425{
426 struct drm_device *drm;
427 int ret;
428
429 drm = drm_dev_alloc(&rockchip_drm_driver, dev);
430 if (!drm)
431 return -ENOMEM;
432
433 ret = drm_dev_set_unique(drm, "%s", dev_name(dev));
434 if (ret)
435 goto err_free;
436
437 ret = drm_dev_register(drm, 0);
438 if (ret)
439 goto err_free;
440
441 dev_set_drvdata(dev, drm);
442
443 return 0;
444
445err_free:
446 drm_dev_unref(drm);
447 return ret;
448}
449
450static void rockchip_drm_unbind(struct device *dev)
451{
452 struct drm_device *drm = dev_get_drvdata(dev);
453
454 drm_dev_unregister(drm);
455 drm_dev_unref(drm);
456 dev_set_drvdata(dev, NULL);
457}
458
459static const struct component_master_ops rockchip_drm_ops = {
460 .bind = rockchip_drm_bind,
461 .unbind = rockchip_drm_unbind,
462};
463
464static int rockchip_drm_platform_probe(struct platform_device *pdev)
465{
466 struct device *dev = &pdev->dev;
467 struct component_match *match = NULL;
468 struct device_node *np = dev->of_node;
469 struct device_node *port;
470 int i;
471
472 if (!np)
473 return -ENODEV;
474 /*
475 * Bind the crtc ports first, so that
476 * drm_of_find_possible_crtcs called from encoder .bind callbacks
477 * works as expected.
478 */
479 for (i = 0;; i++) {
480 port = of_parse_phandle(np, "ports", i);
481 if (!port)
482 break;
483
484 if (!of_device_is_available(port->parent)) {
485 of_node_put(port);
486 continue;
487 }
488
489 component_match_add(dev, &match, compare_of, port->parent);
490 of_node_put(port);
491 }
492
493 if (i == 0) {
494 dev_err(dev, "missing 'ports' property\n");
495 return -ENODEV;
496 }
497
498 if (!match) {
499 dev_err(dev, "No available vop found for display-subsystem.\n");
500 return -ENODEV;
501 }
502 /*
503 * For each bound crtc, bind the encoders attached to its
504 * remote endpoint.
505 */
506 for (i = 0;; i++) {
507 port = of_parse_phandle(np, "ports", i);
508 if (!port)
509 break;
510
511 if (!of_device_is_available(port->parent)) {
512 of_node_put(port);
513 continue;
514 }
515
516 rockchip_add_endpoints(dev, &match, port);
517 of_node_put(port);
518 }
519
520 return component_master_add_with_match(dev, &rockchip_drm_ops, match);
521}
522
523static int rockchip_drm_platform_remove(struct platform_device *pdev)
524{
525 component_master_del(&pdev->dev, &rockchip_drm_ops);
526
527 return 0;
528}
529
530static const struct of_device_id rockchip_drm_dt_ids[] = {
531 { .compatible = "rockchip,display-subsystem", },
532 { /* sentinel */ },
533};
534MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
535
536static struct platform_driver rockchip_drm_platform_driver = {
537 .probe = rockchip_drm_platform_probe,
538 .remove = rockchip_drm_platform_remove,
539 .driver = {
540 .owner = THIS_MODULE,
541 .name = "rockchip-drm",
542 .of_match_table = rockchip_drm_dt_ids,
543 .pm = &rockchip_drm_pm_ops,
544 },
545};
546
547module_platform_driver(rockchip_drm_platform_driver);
548
549MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>");
550MODULE_DESCRIPTION("ROCKCHIP DRM Driver");
551MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
new file mode 100644
index 000000000000..dc4e5f03ac79
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -0,0 +1,68 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * based on exynos_drm_drv.h
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef _ROCKCHIP_DRM_DRV_H
18#define _ROCKCHIP_DRM_DRV_H
19
20#include <drm/drm_fb_helper.h>
21#include <drm/drm_gem.h>
22
23#include <linux/module.h>
24#include <linux/component.h>
25
26#define ROCKCHIP_MAX_FB_BUFFER 3
27#define ROCKCHIP_MAX_CONNECTOR 2
28#define ROCKCHIP_MAX_CRTC 2
29
30struct drm_device;
31struct drm_connector;
32
33/*
34 * Rockchip drm private crtc funcs.
35 * @enable_vblank: enable crtc vblank irq.
36 * @disable_vblank: disable crtc vblank irq.
37 */
38struct rockchip_crtc_funcs {
39 int (*enable_vblank)(struct drm_crtc *crtc);
40 void (*disable_vblank)(struct drm_crtc *crtc);
41};
42
43/*
44 * Rockchip drm private structure.
45 *
46 * @crtc: array of enabled CRTCs, used to map from "pipe" to drm_crtc.
47 * @num_pipe: number of pipes for this device.
48 */
49struct rockchip_drm_private {
50 struct drm_fb_helper fbdev_helper;
51 struct drm_gem_object *fbdev_bo;
52 const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC];
53};
54
55int rockchip_register_crtc_funcs(struct drm_device *dev,
56 const struct rockchip_crtc_funcs *crtc_funcs,
57 int pipe);
58void rockchip_unregister_crtc_funcs(struct drm_device *dev, int pipe);
59int rockchip_drm_encoder_get_mux_id(struct device_node *node,
60 struct drm_encoder *encoder);
61int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc, int connector_type,
62 int out_mode);
63int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
64 struct device *dev);
65void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
66 struct device *dev);
67
68#endif /* _ROCKCHIP_DRM_DRV_H_ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
new file mode 100644
index 000000000000..77d52893d40f
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -0,0 +1,201 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/kernel.h>
16#include <drm/drm.h>
17#include <drm/drmP.h>
18#include <drm/drm_fb_helper.h>
19#include <drm/drm_crtc_helper.h>
20
21#include "rockchip_drm_drv.h"
22#include "rockchip_drm_gem.h"
23
24#define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
25
26struct rockchip_drm_fb {
27 struct drm_framebuffer fb;
28 struct drm_gem_object *obj[ROCKCHIP_MAX_FB_BUFFER];
29};
30
31struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
32 unsigned int plane)
33{
34 struct rockchip_drm_fb *rk_fb = to_rockchip_fb(fb);
35
36 if (plane >= ROCKCHIP_MAX_FB_BUFFER)
37 return NULL;
38
39 return rk_fb->obj[plane];
40}
41EXPORT_SYMBOL_GPL(rockchip_fb_get_gem_obj);
42
43static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
44{
45 struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
46 struct drm_gem_object *obj;
47 int i;
48
49 for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++) {
50 obj = rockchip_fb->obj[i];
51 if (obj)
52 drm_gem_object_unreference_unlocked(obj);
53 }
54
55 drm_framebuffer_cleanup(fb);
56 kfree(rockchip_fb);
57}
58
59static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb,
60 struct drm_file *file_priv,
61 unsigned int *handle)
62{
63 struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
64
65 return drm_gem_handle_create(file_priv,
66 rockchip_fb->obj[0], handle);
67}
68
69static struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
70 .destroy = rockchip_drm_fb_destroy,
71 .create_handle = rockchip_drm_fb_create_handle,
72};
73
74static struct rockchip_drm_fb *
75rockchip_fb_alloc(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd,
76 struct drm_gem_object **obj, unsigned int num_planes)
77{
78 struct rockchip_drm_fb *rockchip_fb;
79 int ret;
80 int i;
81
82 rockchip_fb = kzalloc(sizeof(*rockchip_fb), GFP_KERNEL);
83 if (!rockchip_fb)
84 return ERR_PTR(-ENOMEM);
85
86 drm_helper_mode_fill_fb_struct(&rockchip_fb->fb, mode_cmd);
87
88 for (i = 0; i < num_planes; i++)
89 rockchip_fb->obj[i] = obj[i];
90
91 ret = drm_framebuffer_init(dev, &rockchip_fb->fb,
92 &rockchip_drm_fb_funcs);
93 if (ret) {
94 dev_err(dev->dev, "Failed to initialize framebuffer: %d\n",
95 ret);
96 kfree(rockchip_fb);
97 return ERR_PTR(ret);
98 }
99
100 return rockchip_fb;
101}
102
103static struct drm_framebuffer *
104rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
105 struct drm_mode_fb_cmd2 *mode_cmd)
106{
107 struct rockchip_drm_fb *rockchip_fb;
108 struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER];
109 struct drm_gem_object *obj;
110 unsigned int hsub;
111 unsigned int vsub;
112 int num_planes;
113 int ret;
114 int i;
115
116 hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
117 vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
118 num_planes = min(drm_format_num_planes(mode_cmd->pixel_format),
119 ROCKCHIP_MAX_FB_BUFFER);
120
121 for (i = 0; i < num_planes; i++) {
122 unsigned int width = mode_cmd->width / (i ? hsub : 1);
123 unsigned int height = mode_cmd->height / (i ? vsub : 1);
124 unsigned int min_size;
125
126 obj = drm_gem_object_lookup(dev, file_priv,
127 mode_cmd->handles[i]);
128 if (!obj) {
129 dev_err(dev->dev, "Failed to lookup GEM object\n");
130 ret = -ENXIO;
131 goto err_gem_object_unreference;
132 }
133
134 min_size = (height - 1) * mode_cmd->pitches[i] +
135 mode_cmd->offsets[i] +
136 width * drm_format_plane_cpp(mode_cmd->pixel_format, i);
137
138 if (obj->size < min_size) {
139 drm_gem_object_unreference_unlocked(obj);
140 ret = -EINVAL;
141 goto err_gem_object_unreference;
142 }
143 objs[i] = obj;
144 }
145
146 rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
147 if (IS_ERR(rockchip_fb)) {
148 ret = PTR_ERR(rockchip_fb);
149 goto err_gem_object_unreference;
150 }
151
152 return &rockchip_fb->fb;
153
154err_gem_object_unreference:
155 for (i--; i >= 0; i--)
156 drm_gem_object_unreference_unlocked(objs[i]);
157 return ERR_PTR(ret);
158}
159
160static void rockchip_drm_output_poll_changed(struct drm_device *dev)
161{
162 struct rockchip_drm_private *private = dev->dev_private;
163 struct drm_fb_helper *fb_helper = &private->fbdev_helper;
164
165 drm_fb_helper_hotplug_event(fb_helper);
166}
167
168static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
169 .fb_create = rockchip_user_fb_create,
170 .output_poll_changed = rockchip_drm_output_poll_changed,
171};
172
173struct drm_framebuffer *
174rockchip_drm_framebuffer_init(struct drm_device *dev,
175 struct drm_mode_fb_cmd2 *mode_cmd,
176 struct drm_gem_object *obj)
177{
178 struct rockchip_drm_fb *rockchip_fb;
179
180 rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
181 if (IS_ERR(rockchip_fb))
182 return NULL;
183
184 return &rockchip_fb->fb;
185}
186
187void rockchip_drm_mode_config_init(struct drm_device *dev)
188{
189 dev->mode_config.min_width = 0;
190 dev->mode_config.min_height = 0;
191
192 /*
193 * set max width and height as default value(4096x4096).
194 * this value would be used to check framebuffer size limitation
195 * at drm_mode_addfb().
196 */
197 dev->mode_config.max_width = 4096;
198 dev->mode_config.max_height = 4096;
199
200 dev->mode_config.funcs = &rockchip_drm_mode_config_funcs;
201}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
new file mode 100644
index 000000000000..09574d48226f
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
@@ -0,0 +1,28 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _ROCKCHIP_DRM_FB_H
16#define _ROCKCHIP_DRM_FB_H
17
18struct drm_framebuffer *
19rockchip_drm_framebuffer_init(struct drm_device *dev,
20 struct drm_mode_fb_cmd2 *mode_cmd,
21 struct drm_gem_object *obj);
22void rockchip_drm_framebuffer_fini(struct drm_framebuffer *fb);
23
24void rockchip_drm_mode_config_init(struct drm_device *dev);
25
26struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
27 unsigned int plane);
28#endif /* _ROCKCHIP_DRM_FB_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
new file mode 100644
index 000000000000..a5d889a8716b
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -0,0 +1,210 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <drm/drm.h>
16#include <drm/drmP.h>
17#include <drm/drm_fb_helper.h>
18#include <drm/drm_crtc_helper.h>
19
20#include "rockchip_drm_drv.h"
21#include "rockchip_drm_gem.h"
22#include "rockchip_drm_fb.h"
23
24#define PREFERRED_BPP 32
25#define to_drm_private(x) \
26 container_of(x, struct rockchip_drm_private, fbdev_helper)
27
28static int rockchip_fbdev_mmap(struct fb_info *info,
29 struct vm_area_struct *vma)
30{
31 struct drm_fb_helper *helper = info->par;
32 struct rockchip_drm_private *private = to_drm_private(helper);
33
34 return rockchip_gem_mmap_buf(private->fbdev_bo, vma);
35}
36
37static struct fb_ops rockchip_drm_fbdev_ops = {
38 .owner = THIS_MODULE,
39 .fb_mmap = rockchip_fbdev_mmap,
40 .fb_fillrect = cfb_fillrect,
41 .fb_copyarea = cfb_copyarea,
42 .fb_imageblit = cfb_imageblit,
43 .fb_check_var = drm_fb_helper_check_var,
44 .fb_set_par = drm_fb_helper_set_par,
45 .fb_blank = drm_fb_helper_blank,
46 .fb_pan_display = drm_fb_helper_pan_display,
47 .fb_setcmap = drm_fb_helper_setcmap,
48};
49
50static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
51 struct drm_fb_helper_surface_size *sizes)
52{
53 struct rockchip_drm_private *private = to_drm_private(helper);
54 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
55 struct drm_device *dev = helper->dev;
56 struct rockchip_gem_object *rk_obj;
57 struct drm_framebuffer *fb;
58 unsigned int bytes_per_pixel;
59 unsigned long offset;
60 struct fb_info *fbi;
61 size_t size;
62 int ret;
63
64 bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
65
66 mode_cmd.width = sizes->surface_width;
67 mode_cmd.height = sizes->surface_height;
68 mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
69 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
70 sizes->surface_depth);
71
72 size = mode_cmd.pitches[0] * mode_cmd.height;
73
74 rk_obj = rockchip_gem_create_object(dev, size);
75 if (IS_ERR(rk_obj))
76 return -ENOMEM;
77
78 private->fbdev_bo = &rk_obj->base;
79
80 fbi = framebuffer_alloc(0, dev->dev);
81 if (!fbi) {
82 dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
83 ret = -ENOMEM;
84 goto err_rockchip_gem_free_object;
85 }
86
87 helper->fb = rockchip_drm_framebuffer_init(dev, &mode_cmd,
88 private->fbdev_bo);
89 if (IS_ERR(helper->fb)) {
90 dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
91 ret = PTR_ERR(helper->fb);
92 goto err_framebuffer_release;
93 }
94
95 helper->fbdev = fbi;
96
97 fbi->par = helper;
98 fbi->flags = FBINFO_FLAG_DEFAULT;
99 fbi->fbops = &rockchip_drm_fbdev_ops;
100
101 ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
102 if (ret) {
103 dev_err(dev->dev, "Failed to allocate color map.\n");
104 goto err_drm_framebuffer_unref;
105 }
106
107 fb = helper->fb;
108 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
109 drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
110
111 offset = fbi->var.xoffset * bytes_per_pixel;
112 offset += fbi->var.yoffset * fb->pitches[0];
113
114 dev->mode_config.fb_base = 0;
115 fbi->screen_base = rk_obj->kvaddr + offset;
116 fbi->screen_size = rk_obj->base.size;
117 fbi->fix.smem_len = rk_obj->base.size;
118
119 DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%d\n",
120 fb->width, fb->height, fb->depth, rk_obj->kvaddr,
121 offset, size);
122 return 0;
123
124err_drm_framebuffer_unref:
125 drm_framebuffer_unreference(helper->fb);
126err_framebuffer_release:
127 framebuffer_release(fbi);
128err_rockchip_gem_free_object:
129 rockchip_gem_free_object(&rk_obj->base);
130 return ret;
131}
132
133static const struct drm_fb_helper_funcs rockchip_drm_fb_helper_funcs = {
134 .fb_probe = rockchip_drm_fbdev_create,
135};
136
137int rockchip_drm_fbdev_init(struct drm_device *dev)
138{
139 struct rockchip_drm_private *private = dev->dev_private;
140 struct drm_fb_helper *helper;
141 unsigned int num_crtc;
142 int ret;
143
144 if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
145 return -EINVAL;
146
147 num_crtc = dev->mode_config.num_crtc;
148
149 helper = &private->fbdev_helper;
150
151 drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs);
152
153 ret = drm_fb_helper_init(dev, helper, num_crtc, ROCKCHIP_MAX_CONNECTOR);
154 if (ret < 0) {
155 dev_err(dev->dev, "Failed to initialize drm fb helper - %d.\n",
156 ret);
157 return ret;
158 }
159
160 ret = drm_fb_helper_single_add_all_connectors(helper);
161 if (ret < 0) {
162 dev_err(dev->dev, "Failed to add connectors - %d.\n", ret);
163 goto err_drm_fb_helper_fini;
164 }
165
166 /* disable all the possible outputs/crtcs before entering KMS mode */
167 drm_helper_disable_unused_functions(dev);
168
169 ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
170 if (ret < 0) {
171 dev_err(dev->dev, "Failed to set initial hw config - %d.\n",
172 ret);
173 goto err_drm_fb_helper_fini;
174 }
175
176 return 0;
177
178err_drm_fb_helper_fini:
179 drm_fb_helper_fini(helper);
180 return ret;
181}
182
183void rockchip_drm_fbdev_fini(struct drm_device *dev)
184{
185 struct rockchip_drm_private *private = dev->dev_private;
186 struct drm_fb_helper *helper;
187
188 helper = &private->fbdev_helper;
189
190 if (helper->fbdev) {
191 struct fb_info *info;
192 int ret;
193
194 info = helper->fbdev;
195 ret = unregister_framebuffer(info);
196 if (ret < 0)
197 DRM_DEBUG_KMS("failed unregister_framebuffer() - %d\n",
198 ret);
199
200 if (info->cmap.len)
201 fb_dealloc_cmap(&info->cmap);
202
203 framebuffer_release(info);
204 }
205
206 if (helper->fb)
207 drm_framebuffer_unreference(helper->fb);
208
209 drm_fb_helper_fini(helper);
210}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
new file mode 100644
index 000000000000..50432e9b5b37
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
@@ -0,0 +1,21 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _ROCKCHIP_DRM_FBDEV_H
16#define _ROCKCHIP_DRM_FBDEV_H
17
18int rockchip_drm_fbdev_init(struct drm_device *dev);
19void rockchip_drm_fbdev_fini(struct drm_device *dev);
20
21#endif /* _ROCKCHIP_DRM_FBDEV_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
new file mode 100644
index 000000000000..bc98a227dc76
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -0,0 +1,294 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <drm/drm.h>
16#include <drm/drmP.h>
17#include <drm/drm_gem.h>
18#include <drm/drm_vma_manager.h>
19
20#include <linux/dma-attrs.h>
21
22#include "rockchip_drm_drv.h"
23#include "rockchip_drm_gem.h"
24
25static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj)
26{
27 struct drm_gem_object *obj = &rk_obj->base;
28 struct drm_device *drm = obj->dev;
29
30 init_dma_attrs(&rk_obj->dma_attrs);
31 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs);
32
33 /* TODO(djkurtz): Use DMA_ATTR_NO_KERNEL_MAPPING except for fbdev */
34 rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
35 &rk_obj->dma_addr, GFP_KERNEL,
36 &rk_obj->dma_attrs);
37 if (IS_ERR(rk_obj->kvaddr)) {
38 int ret = PTR_ERR(rk_obj->kvaddr);
39
40 DRM_ERROR("failed to allocate %#x byte dma buffer, %d",
41 obj->size, ret);
42 return ret;
43 }
44
45 return 0;
46}
47
48static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
49{
50 struct drm_gem_object *obj = &rk_obj->base;
51 struct drm_device *drm = obj->dev;
52
53 dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
54 &rk_obj->dma_attrs);
55}
56
57int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
58 struct vm_area_struct *vma)
59{
60 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
61 struct drm_device *drm = obj->dev;
62 unsigned long vm_size;
63
64 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
65 vm_size = vma->vm_end - vma->vm_start;
66
67 if (vm_size > obj->size)
68 return -EINVAL;
69
70 return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
71 obj->size, &rk_obj->dma_attrs);
72}
73
74/* drm driver mmap file operations */
75int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
76{
77 struct drm_file *priv = filp->private_data;
78 struct drm_device *dev = priv->minor->dev;
79 struct drm_gem_object *obj;
80 struct drm_vma_offset_node *node;
81 int ret;
82
83 if (drm_device_is_unplugged(dev))
84 return -ENODEV;
85
86 mutex_lock(&dev->struct_mutex);
87
88 node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
89 vma->vm_pgoff,
90 vma_pages(vma));
91 if (!node) {
92 mutex_unlock(&dev->struct_mutex);
93 DRM_ERROR("failed to find vma node.\n");
94 return -EINVAL;
95 } else if (!drm_vma_node_is_allowed(node, filp)) {
96 mutex_unlock(&dev->struct_mutex);
97 return -EACCES;
98 }
99
100 obj = container_of(node, struct drm_gem_object, vma_node);
101 ret = rockchip_gem_mmap_buf(obj, vma);
102
103 mutex_unlock(&dev->struct_mutex);
104
105 return ret;
106}
107
108struct rockchip_gem_object *
109 rockchip_gem_create_object(struct drm_device *drm, unsigned int size)
110{
111 struct rockchip_gem_object *rk_obj;
112 struct drm_gem_object *obj;
113 int ret;
114
115 size = round_up(size, PAGE_SIZE);
116
117 rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
118 if (!rk_obj)
119 return ERR_PTR(-ENOMEM);
120
121 obj = &rk_obj->base;
122
123 drm_gem_private_object_init(drm, obj, size);
124
125 ret = rockchip_gem_alloc_buf(rk_obj);
126 if (ret)
127 goto err_free_rk_obj;
128
129 return rk_obj;
130
131err_free_rk_obj:
132 kfree(rk_obj);
133 return ERR_PTR(ret);
134}
135
136/*
137 * rockchip_gem_free_object - (struct drm_driver)->gem_free_object callback
138 * function
139 */
140void rockchip_gem_free_object(struct drm_gem_object *obj)
141{
142 struct rockchip_gem_object *rk_obj;
143
144 drm_gem_free_mmap_offset(obj);
145
146 rk_obj = to_rockchip_obj(obj);
147
148 rockchip_gem_free_buf(rk_obj);
149
150 kfree(rk_obj);
151}
152
153/*
154 * rockchip_gem_create_with_handle - allocate an object with the given
155 * size and create a gem handle on it
156 *
157 * returns a struct rockchip_gem_object* on success or ERR_PTR values
158 * on failure.
159 */
160static struct rockchip_gem_object *
161rockchip_gem_create_with_handle(struct drm_file *file_priv,
162 struct drm_device *drm, unsigned int size,
163 unsigned int *handle)
164{
165 struct rockchip_gem_object *rk_obj;
166 struct drm_gem_object *obj;
167 int ret;
168
169 rk_obj = rockchip_gem_create_object(drm, size);
170 if (IS_ERR(rk_obj))
171 return ERR_CAST(rk_obj);
172
173 obj = &rk_obj->base;
174
175 /*
176 * allocate a id of idr table where the obj is registered
177 * and handle has the id what user can see.
178 */
179 ret = drm_gem_handle_create(file_priv, obj, handle);
180 if (ret)
181 goto err_handle_create;
182
183 /* drop reference from allocate - handle holds it now. */
184 drm_gem_object_unreference_unlocked(obj);
185
186 return rk_obj;
187
188err_handle_create:
189 rockchip_gem_free_object(obj);
190
191 return ERR_PTR(ret);
192}
193
194int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
195 struct drm_device *dev, uint32_t handle,
196 uint64_t *offset)
197{
198 struct drm_gem_object *obj;
199 int ret;
200
201 mutex_lock(&dev->struct_mutex);
202
203 obj = drm_gem_object_lookup(dev, file_priv, handle);
204 if (!obj) {
205 DRM_ERROR("failed to lookup gem object.\n");
206 ret = -EINVAL;
207 goto unlock;
208 }
209
210 ret = drm_gem_create_mmap_offset(obj);
211 if (ret)
212 goto out;
213
214 *offset = drm_vma_node_offset_addr(&obj->vma_node);
215 DRM_DEBUG_KMS("offset = 0x%llx\n", *offset);
216
217out:
218 drm_gem_object_unreference(obj);
219unlock:
220 mutex_unlock(&dev->struct_mutex);
221 return ret;
222}
223
224/*
225 * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
226 * function
227 *
228 * This aligns the pitch and size arguments to the minimum required. wrap
229 * this into your own function if you need bigger alignment.
230 */
231int rockchip_gem_dumb_create(struct drm_file *file_priv,
232 struct drm_device *dev,
233 struct drm_mode_create_dumb *args)
234{
235 struct rockchip_gem_object *rk_obj;
236 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
237
238 /*
239 * align to 64 bytes since Mali requires it.
240 */
241 min_pitch = ALIGN(min_pitch, 64);
242
243 if (args->pitch < min_pitch)
244 args->pitch = min_pitch;
245
246 if (args->size < args->pitch * args->height)
247 args->size = args->pitch * args->height;
248
249 rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
250 &args->handle);
251
252 return PTR_ERR_OR_ZERO(rk_obj);
253}
254
255/*
256 * Allocate a sg_table for this GEM object.
257 * Note: Both the table's contents, and the sg_table itself must be freed by
258 * the caller.
259 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
260 */
261struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
262{
263 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
264 struct drm_device *drm = obj->dev;
265 struct sg_table *sgt;
266 int ret;
267
268 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
269 if (!sgt)
270 return ERR_PTR(-ENOMEM);
271
272 ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
273 rk_obj->dma_addr, obj->size,
274 &rk_obj->dma_attrs);
275 if (ret) {
276 DRM_ERROR("failed to allocate sgt, %d\n", ret);
277 kfree(sgt);
278 return ERR_PTR(ret);
279 }
280
281 return sgt;
282}
283
284void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
285{
286 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
287
288 return rk_obj->kvaddr;
289}
290
291void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
292{
293 /* Nothing to do */
294}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
new file mode 100644
index 000000000000..67bcebe90003
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
@@ -0,0 +1,54 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _ROCKCHIP_DRM_GEM_H
16#define _ROCKCHIP_DRM_GEM_H
17
18#define to_rockchip_obj(x) container_of(x, struct rockchip_gem_object, base)
19
20struct rockchip_gem_object {
21 struct drm_gem_object base;
22 unsigned int flags;
23
24 void *kvaddr;
25 dma_addr_t dma_addr;
26 struct dma_attrs dma_attrs;
27};
28
29struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj);
30struct drm_gem_object *
31rockchip_gem_prime_import_sg_table(struct drm_device *dev, size_t size,
32 struct sg_table *sgt);
33void *rockchip_gem_prime_vmap(struct drm_gem_object *obj);
34void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
35
36/* drm driver mmap file operations */
37int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma);
38
39/* mmap a gem object to userspace. */
40int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
41 struct vm_area_struct *vma);
42
43struct rockchip_gem_object *
44 rockchip_gem_create_object(struct drm_device *drm, unsigned int size);
45
46void rockchip_gem_free_object(struct drm_gem_object *obj);
47
48int rockchip_gem_dumb_create(struct drm_file *file_priv,
49 struct drm_device *dev,
50 struct drm_mode_create_dumb *args);
51int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
52 struct drm_device *dev, uint32_t handle,
53 uint64_t *offset);
54#endif /* _ROCKCHIP_DRM_GEM_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
new file mode 100644
index 000000000000..e7ca25b3fb38
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -0,0 +1,1455 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <drm/drm.h>
16#include <drm/drmP.h>
17#include <drm/drm_crtc.h>
18#include <drm/drm_crtc_helper.h>
19#include <drm/drm_plane_helper.h>
20
21#include <linux/kernel.h>
22#include <linux/platform_device.h>
23#include <linux/clk.h>
24#include <linux/of.h>
25#include <linux/of_device.h>
26#include <linux/pm_runtime.h>
27#include <linux/component.h>
28
29#include <linux/reset.h>
30#include <linux/delay.h>
31
32#include "rockchip_drm_drv.h"
33#include "rockchip_drm_gem.h"
34#include "rockchip_drm_fb.h"
35#include "rockchip_drm_vop.h"
36
37#define VOP_REG(off, _mask, s) \
38 {.offset = off, \
39 .mask = _mask, \
40 .shift = s,}
41
42#define __REG_SET_RELAXED(x, off, mask, shift, v) \
43 vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift)
44#define __REG_SET_NORMAL(x, off, mask, shift, v) \
45 vop_mask_write(x, off, (mask) << shift, (v) << shift)
46
47#define REG_SET(x, base, reg, v, mode) \
48 __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
49
50#define VOP_WIN_SET(x, win, name, v) \
51 REG_SET(x, win->base, win->phy->name, v, RELAXED)
52#define VOP_CTRL_SET(x, name, v) \
53 REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
54
55#define VOP_WIN_GET(x, win, name) \
56 vop_read_reg(x, win->base, &win->phy->name)
57
58#define VOP_WIN_GET_YRGBADDR(vop, win) \
59 vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
60
61#define to_vop(x) container_of(x, struct vop, crtc)
62#define to_vop_win(x) container_of(x, struct vop_win, base)
63
64struct vop_win_state {
65 struct list_head head;
66 struct drm_framebuffer *fb;
67 dma_addr_t yrgb_mst;
68 struct drm_pending_vblank_event *event;
69};
70
71struct vop_win {
72 struct drm_plane base;
73 const struct vop_win_data *data;
74 struct vop *vop;
75
76 struct list_head pending;
77 struct vop_win_state *active;
78};
79
80struct vop {
81 struct drm_crtc crtc;
82 struct device *dev;
83 struct drm_device *drm_dev;
84 unsigned int dpms;
85
86 int connector_type;
87 int connector_out_mode;
88
89 /* mutex vsync_ work */
90 struct mutex vsync_mutex;
91 bool vsync_work_pending;
92
93 const struct vop_data *data;
94
95 uint32_t *regsbak;
96 void __iomem *regs;
97
98 /* physical map length of vop register */
99 uint32_t len;
100
101 /* one time only one process allowed to config the register */
102 spinlock_t reg_lock;
103 /* lock vop irq reg */
104 spinlock_t irq_lock;
105
106 unsigned int irq;
107
108 /* vop AHP clk */
109 struct clk *hclk;
110 /* vop dclk */
111 struct clk *dclk;
112 /* vop share memory frequency */
113 struct clk *aclk;
114
115 /* vop dclk reset */
116 struct reset_control *dclk_rst;
117
118 int pipe;
119
120 struct vop_win win[];
121};
122
123enum vop_data_format {
124 VOP_FMT_ARGB8888 = 0,
125 VOP_FMT_RGB888,
126 VOP_FMT_RGB565,
127 VOP_FMT_YUV420SP = 4,
128 VOP_FMT_YUV422SP,
129 VOP_FMT_YUV444SP,
130};
131
132struct vop_reg_data {
133 uint32_t offset;
134 uint32_t value;
135};
136
137struct vop_reg {
138 uint32_t offset;
139 uint32_t shift;
140 uint32_t mask;
141};
142
143struct vop_ctrl {
144 struct vop_reg standby;
145 struct vop_reg data_blank;
146 struct vop_reg gate_en;
147 struct vop_reg mmu_en;
148 struct vop_reg rgb_en;
149 struct vop_reg edp_en;
150 struct vop_reg hdmi_en;
151 struct vop_reg mipi_en;
152 struct vop_reg out_mode;
153 struct vop_reg dither_down;
154 struct vop_reg dither_up;
155 struct vop_reg pin_pol;
156
157 struct vop_reg htotal_pw;
158 struct vop_reg hact_st_end;
159 struct vop_reg vtotal_pw;
160 struct vop_reg vact_st_end;
161 struct vop_reg hpost_st_end;
162 struct vop_reg vpost_st_end;
163};
164
165struct vop_win_phy {
166 const uint32_t *data_formats;
167 uint32_t nformats;
168
169 struct vop_reg enable;
170 struct vop_reg format;
171 struct vop_reg act_info;
172 struct vop_reg dsp_info;
173 struct vop_reg dsp_st;
174 struct vop_reg yrgb_mst;
175 struct vop_reg uv_mst;
176 struct vop_reg yrgb_vir;
177 struct vop_reg uv_vir;
178
179 struct vop_reg dst_alpha_ctl;
180 struct vop_reg src_alpha_ctl;
181};
182
183struct vop_win_data {
184 uint32_t base;
185 const struct vop_win_phy *phy;
186 enum drm_plane_type type;
187};
188
189struct vop_data {
190 const struct vop_reg_data *init_table;
191 unsigned int table_size;
192 const struct vop_ctrl *ctrl;
193 const struct vop_win_data *win;
194 unsigned int win_size;
195};
196
197static const uint32_t formats_01[] = {
198 DRM_FORMAT_XRGB8888,
199 DRM_FORMAT_ARGB8888,
200 DRM_FORMAT_RGB888,
201 DRM_FORMAT_RGB565,
202 DRM_FORMAT_NV12,
203 DRM_FORMAT_NV16,
204 DRM_FORMAT_NV24,
205};
206
207static const uint32_t formats_234[] = {
208 DRM_FORMAT_XRGB8888,
209 DRM_FORMAT_ARGB8888,
210 DRM_FORMAT_RGB888,
211 DRM_FORMAT_RGB565,
212};
213
214static const struct vop_win_phy win01_data = {
215 .data_formats = formats_01,
216 .nformats = ARRAY_SIZE(formats_01),
217 .enable = VOP_REG(WIN0_CTRL0, 0x1, 0),
218 .format = VOP_REG(WIN0_CTRL0, 0x7, 1),
219 .act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0),
220 .dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0),
221 .dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0),
222 .yrgb_mst = VOP_REG(WIN0_YRGB_MST, 0xffffffff, 0),
223 .uv_mst = VOP_REG(WIN0_CBR_MST, 0xffffffff, 0),
224 .yrgb_vir = VOP_REG(WIN0_VIR, 0x3fff, 0),
225 .uv_vir = VOP_REG(WIN0_VIR, 0x3fff, 16),
226 .src_alpha_ctl = VOP_REG(WIN0_SRC_ALPHA_CTRL, 0xff, 0),
227 .dst_alpha_ctl = VOP_REG(WIN0_DST_ALPHA_CTRL, 0xff, 0),
228};
229
230static const struct vop_win_phy win23_data = {
231 .data_formats = formats_234,
232 .nformats = ARRAY_SIZE(formats_234),
233 .enable = VOP_REG(WIN2_CTRL0, 0x1, 0),
234 .format = VOP_REG(WIN2_CTRL0, 0x7, 1),
235 .dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0),
236 .dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0),
237 .yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0),
238 .yrgb_vir = VOP_REG(WIN2_VIR0_1, 0x1fff, 0),
239 .src_alpha_ctl = VOP_REG(WIN2_SRC_ALPHA_CTRL, 0xff, 0),
240 .dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0),
241};
242
243static const struct vop_win_phy cursor_data = {
244 .data_formats = formats_234,
245 .nformats = ARRAY_SIZE(formats_234),
246 .enable = VOP_REG(HWC_CTRL0, 0x1, 0),
247 .format = VOP_REG(HWC_CTRL0, 0x7, 1),
248 .dsp_st = VOP_REG(HWC_DSP_ST, 0x1fff1fff, 0),
249 .yrgb_mst = VOP_REG(HWC_MST, 0xffffffff, 0),
250};
251
252static const struct vop_ctrl ctrl_data = {
253 .standby = VOP_REG(SYS_CTRL, 0x1, 22),
254 .gate_en = VOP_REG(SYS_CTRL, 0x1, 23),
255 .mmu_en = VOP_REG(SYS_CTRL, 0x1, 20),
256 .rgb_en = VOP_REG(SYS_CTRL, 0x1, 12),
257 .hdmi_en = VOP_REG(SYS_CTRL, 0x1, 13),
258 .edp_en = VOP_REG(SYS_CTRL, 0x1, 14),
259 .mipi_en = VOP_REG(SYS_CTRL, 0x1, 15),
260 .dither_down = VOP_REG(DSP_CTRL1, 0xf, 1),
261 .dither_up = VOP_REG(DSP_CTRL1, 0x1, 6),
262 .data_blank = VOP_REG(DSP_CTRL0, 0x1, 19),
263 .out_mode = VOP_REG(DSP_CTRL0, 0xf, 0),
264 .pin_pol = VOP_REG(DSP_CTRL0, 0xf, 4),
265 .htotal_pw = VOP_REG(DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
266 .hact_st_end = VOP_REG(DSP_HACT_ST_END, 0x1fff1fff, 0),
267 .vtotal_pw = VOP_REG(DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
268 .vact_st_end = VOP_REG(DSP_VACT_ST_END, 0x1fff1fff, 0),
269 .hpost_st_end = VOP_REG(POST_DSP_HACT_INFO, 0x1fff1fff, 0),
270 .vpost_st_end = VOP_REG(POST_DSP_VACT_INFO, 0x1fff1fff, 0),
271};
272
273static const struct vop_reg_data vop_init_reg_table[] = {
274 {SYS_CTRL, 0x00c00000},
275 {DSP_CTRL0, 0x00000000},
276 {WIN0_CTRL0, 0x00000080},
277 {WIN1_CTRL0, 0x00000080},
278};
279
280/*
281 * Note: rk3288 has a dedicated 'cursor' window, however, that window requires
282 * special support to get alpha blending working. For now, just use overlay
283 * window 1 for the drm cursor.
284 */
285static const struct vop_win_data rk3288_vop_win_data[] = {
286 { .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY },
287 { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_CURSOR },
288 { .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
289 { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
290 { .base = 0x00, .phy = &cursor_data, .type = DRM_PLANE_TYPE_OVERLAY },
291};
292
293static const struct vop_data rk3288_vop = {
294 .init_table = vop_init_reg_table,
295 .table_size = ARRAY_SIZE(vop_init_reg_table),
296 .ctrl = &ctrl_data,
297 .win = rk3288_vop_win_data,
298 .win_size = ARRAY_SIZE(rk3288_vop_win_data),
299};
300
301static const struct of_device_id vop_driver_dt_match[] = {
302 { .compatible = "rockchip,rk3288-vop",
303 .data = &rk3288_vop },
304 {},
305};
306
307static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
308{
309 writel(v, vop->regs + offset);
310 vop->regsbak[offset >> 2] = v;
311}
312
313static inline uint32_t vop_readl(struct vop *vop, uint32_t offset)
314{
315 return readl(vop->regs + offset);
316}
317
318static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
319 const struct vop_reg *reg)
320{
321 return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask;
322}
323
324static inline void vop_cfg_done(struct vop *vop)
325{
326 writel(0x01, vop->regs + REG_CFG_DONE);
327}
328
329static inline void vop_mask_write(struct vop *vop, uint32_t offset,
330 uint32_t mask, uint32_t v)
331{
332 if (mask) {
333 uint32_t cached_val = vop->regsbak[offset >> 2];
334
335 cached_val = (cached_val & ~mask) | v;
336 writel(cached_val, vop->regs + offset);
337 vop->regsbak[offset >> 2] = cached_val;
338 }
339}
340
341static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset,
342 uint32_t mask, uint32_t v)
343{
344 if (mask) {
345 uint32_t cached_val = vop->regsbak[offset >> 2];
346
347 cached_val = (cached_val & ~mask) | v;
348 writel_relaxed(cached_val, vop->regs + offset);
349 vop->regsbak[offset >> 2] = cached_val;
350 }
351}
352
353static enum vop_data_format vop_convert_format(uint32_t format)
354{
355 switch (format) {
356 case DRM_FORMAT_XRGB8888:
357 case DRM_FORMAT_ARGB8888:
358 return VOP_FMT_ARGB8888;
359 case DRM_FORMAT_RGB888:
360 return VOP_FMT_RGB888;
361 case DRM_FORMAT_RGB565:
362 return VOP_FMT_RGB565;
363 case DRM_FORMAT_NV12:
364 return VOP_FMT_YUV420SP;
365 case DRM_FORMAT_NV16:
366 return VOP_FMT_YUV422SP;
367 case DRM_FORMAT_NV24:
368 return VOP_FMT_YUV444SP;
369 default:
370 DRM_ERROR("unsupport format[%08x]\n", format);
371 return -EINVAL;
372 }
373}
374
375static bool is_alpha_support(uint32_t format)
376{
377 switch (format) {
378 case DRM_FORMAT_ARGB8888:
379 return true;
380 default:
381 return false;
382 }
383}
384
385static void vop_enable(struct drm_crtc *crtc)
386{
387 struct vop *vop = to_vop(crtc);
388 int ret;
389
390 ret = clk_enable(vop->hclk);
391 if (ret < 0) {
392 dev_err(vop->dev, "failed to enable hclk - %d\n", ret);
393 return;
394 }
395
396 ret = clk_enable(vop->dclk);
397 if (ret < 0) {
398 dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
399 goto err_disable_hclk;
400 }
401
402 ret = clk_enable(vop->aclk);
403 if (ret < 0) {
404 dev_err(vop->dev, "failed to enable aclk - %d\n", ret);
405 goto err_disable_dclk;
406 }
407
408 /*
409 * Slave iommu shares power, irq and clock with vop. It was associated
410 * automatically with this master device via common driver code.
411 * Now that we have enabled the clock we attach it to the shared drm
412 * mapping.
413 */
414 ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
415 if (ret) {
416 dev_err(vop->dev, "failed to attach dma mapping, %d\n", ret);
417 goto err_disable_aclk;
418 }
419
420 spin_lock(&vop->reg_lock);
421
422 VOP_CTRL_SET(vop, standby, 0);
423
424 spin_unlock(&vop->reg_lock);
425
426 enable_irq(vop->irq);
427
428 drm_vblank_on(vop->drm_dev, vop->pipe);
429
430 return;
431
432err_disable_aclk:
433 clk_disable(vop->aclk);
434err_disable_dclk:
435 clk_disable(vop->dclk);
436err_disable_hclk:
437 clk_disable(vop->hclk);
438}
439
440static void vop_disable(struct drm_crtc *crtc)
441{
442 struct vop *vop = to_vop(crtc);
443
444 drm_vblank_off(crtc->dev, vop->pipe);
445
446 disable_irq(vop->irq);
447
448 /*
449 * TODO: Since standby doesn't take effect until the next vblank,
450 * when we turn off dclk below, the vop is probably still active.
451 */
452 spin_lock(&vop->reg_lock);
453
454 VOP_CTRL_SET(vop, standby, 1);
455
456 spin_unlock(&vop->reg_lock);
457 /*
458 * disable dclk to stop frame scan, so we can safely detach iommu,
459 */
460 clk_disable(vop->dclk);
461
462 rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
463
464 clk_disable(vop->aclk);
465 clk_disable(vop->hclk);
466}
467
468/*
469 * Caller must hold vsync_mutex.
470 */
471static struct drm_framebuffer *vop_win_last_pending_fb(struct vop_win *vop_win)
472{
473 struct vop_win_state *last;
474 struct vop_win_state *active = vop_win->active;
475
476 if (list_empty(&vop_win->pending))
477 return active ? active->fb : NULL;
478
479 last = list_last_entry(&vop_win->pending, struct vop_win_state, head);
480 return last ? last->fb : NULL;
481}
482
483/*
484 * Caller must hold vsync_mutex.
485 */
486static int vop_win_queue_fb(struct vop_win *vop_win,
487 struct drm_framebuffer *fb, dma_addr_t yrgb_mst,
488 struct drm_pending_vblank_event *event)
489{
490 struct vop_win_state *state;
491
492 state = kzalloc(sizeof(*state), GFP_KERNEL);
493 if (!state)
494 return -ENOMEM;
495
496 state->fb = fb;
497 state->yrgb_mst = yrgb_mst;
498 state->event = event;
499
500 list_add_tail(&state->head, &vop_win->pending);
501
502 return 0;
503}
504
505static int vop_update_plane_event(struct drm_plane *plane,
506 struct drm_crtc *crtc,
507 struct drm_framebuffer *fb, int crtc_x,
508 int crtc_y, unsigned int crtc_w,
509 unsigned int crtc_h, uint32_t src_x,
510 uint32_t src_y, uint32_t src_w,
511 uint32_t src_h,
512 struct drm_pending_vblank_event *event)
513{
514 struct vop_win *vop_win = to_vop_win(plane);
515 const struct vop_win_data *win = vop_win->data;
516 struct vop *vop = to_vop(crtc);
517 struct drm_gem_object *obj;
518 struct rockchip_gem_object *rk_obj;
519 unsigned long offset;
520 unsigned int actual_w;
521 unsigned int actual_h;
522 unsigned int dsp_stx;
523 unsigned int dsp_sty;
524 unsigned int y_vir_stride;
525 dma_addr_t yrgb_mst;
526 enum vop_data_format format;
527 uint32_t val;
528 bool is_alpha;
529 bool visible;
530 int ret;
531 struct drm_rect dest = {
532 .x1 = crtc_x,
533 .y1 = crtc_y,
534 .x2 = crtc_x + crtc_w,
535 .y2 = crtc_y + crtc_h,
536 };
537 struct drm_rect src = {
538 /* 16.16 fixed point */
539 .x1 = src_x,
540 .y1 = src_y,
541 .x2 = src_x + src_w,
542 .y2 = src_y + src_h,
543 };
544 const struct drm_rect clip = {
545 .x2 = crtc->mode.hdisplay,
546 .y2 = crtc->mode.vdisplay,
547 };
548 bool can_position = plane->type != DRM_PLANE_TYPE_PRIMARY;
549
550 ret = drm_plane_helper_check_update(plane, crtc, fb,
551 &src, &dest, &clip,
552 DRM_PLANE_HELPER_NO_SCALING,
553 DRM_PLANE_HELPER_NO_SCALING,
554 can_position, false, &visible);
555 if (ret)
556 return ret;
557
558 if (!visible)
559 return 0;
560
561 is_alpha = is_alpha_support(fb->pixel_format);
562 format = vop_convert_format(fb->pixel_format);
563 if (format < 0)
564 return format;
565
566 obj = rockchip_fb_get_gem_obj(fb, 0);
567 if (!obj) {
568 DRM_ERROR("fail to get rockchip gem object from framebuffer\n");
569 return -EINVAL;
570 }
571
572 rk_obj = to_rockchip_obj(obj);
573
574 actual_w = (src.x2 - src.x1) >> 16;
575 actual_h = (src.y2 - src.y1) >> 16;
576 crtc_x = max(0, crtc_x);
577 crtc_y = max(0, crtc_y);
578
579 dsp_stx = crtc_x + crtc->mode.htotal - crtc->mode.hsync_start;
580 dsp_sty = crtc_y + crtc->mode.vtotal - crtc->mode.vsync_start;
581
582 offset = (src.x1 >> 16) * (fb->bits_per_pixel >> 3);
583 offset += (src.y1 >> 16) * fb->pitches[0];
584 yrgb_mst = rk_obj->dma_addr + offset;
585
586 y_vir_stride = fb->pitches[0] / (fb->bits_per_pixel >> 3);
587
588 /*
589 * If this plane update changes the plane's framebuffer, (or more
590 * precisely, if this update has a different framebuffer than the last
591 * update), enqueue it so we can track when it completes.
592 *
593 * Only when we discover that this update has completed, can we
594 * unreference any previous framebuffers.
595 */
596 mutex_lock(&vop->vsync_mutex);
597 if (fb != vop_win_last_pending_fb(vop_win)) {
598 ret = drm_vblank_get(plane->dev, vop->pipe);
599 if (ret) {
600 DRM_ERROR("failed to get vblank, %d\n", ret);
601 mutex_unlock(&vop->vsync_mutex);
602 return ret;
603 }
604
605 drm_framebuffer_reference(fb);
606
607 ret = vop_win_queue_fb(vop_win, fb, yrgb_mst, event);
608 if (ret) {
609 drm_vblank_put(plane->dev, vop->pipe);
610 mutex_unlock(&vop->vsync_mutex);
611 return ret;
612 }
613
614 vop->vsync_work_pending = true;
615 }
616 mutex_unlock(&vop->vsync_mutex);
617
618 spin_lock(&vop->reg_lock);
619
620 VOP_WIN_SET(vop, win, format, format);
621 VOP_WIN_SET(vop, win, yrgb_vir, y_vir_stride);
622 VOP_WIN_SET(vop, win, yrgb_mst, yrgb_mst);
623 val = (actual_h - 1) << 16;
624 val |= (actual_w - 1) & 0xffff;
625 VOP_WIN_SET(vop, win, act_info, val);
626 VOP_WIN_SET(vop, win, dsp_info, val);
627 val = (dsp_sty - 1) << 16;
628 val |= (dsp_stx - 1) & 0xffff;
629 VOP_WIN_SET(vop, win, dsp_st, val);
630
631 if (is_alpha) {
632 VOP_WIN_SET(vop, win, dst_alpha_ctl,
633 DST_FACTOR_M0(ALPHA_SRC_INVERSE));
634 val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
635 SRC_ALPHA_M0(ALPHA_STRAIGHT) |
636 SRC_BLEND_M0(ALPHA_PER_PIX) |
637 SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) |
638 SRC_FACTOR_M0(ALPHA_ONE);
639 VOP_WIN_SET(vop, win, src_alpha_ctl, val);
640 } else {
641 VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
642 }
643
644 VOP_WIN_SET(vop, win, enable, 1);
645
646 vop_cfg_done(vop);
647 spin_unlock(&vop->reg_lock);
648
649 return 0;
650}
651
652static int vop_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
653 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
654 unsigned int crtc_w, unsigned int crtc_h,
655 uint32_t src_x, uint32_t src_y, uint32_t src_w,
656 uint32_t src_h)
657{
658 return vop_update_plane_event(plane, crtc, fb, crtc_x, crtc_y, crtc_w,
659 crtc_h, src_x, src_y, src_w, src_h,
660 NULL);
661}
662
663static int vop_update_primary_plane(struct drm_crtc *crtc,
664 struct drm_pending_vblank_event *event)
665{
666 unsigned int crtc_w, crtc_h;
667
668 crtc_w = crtc->primary->fb->width - crtc->x;
669 crtc_h = crtc->primary->fb->height - crtc->y;
670
671 return vop_update_plane_event(crtc->primary, crtc, crtc->primary->fb,
672 0, 0, crtc_w, crtc_h, crtc->x << 16,
673 crtc->y << 16, crtc_w << 16,
674 crtc_h << 16, event);
675}
676
677static int vop_disable_plane(struct drm_plane *plane)
678{
679 struct vop_win *vop_win = to_vop_win(plane);
680 const struct vop_win_data *win = vop_win->data;
681 struct vop *vop;
682 int ret;
683
684 if (!plane->crtc)
685 return 0;
686
687 vop = to_vop(plane->crtc);
688
689 ret = drm_vblank_get(plane->dev, vop->pipe);
690 if (ret) {
691 DRM_ERROR("failed to get vblank, %d\n", ret);
692 return ret;
693 }
694
695 mutex_lock(&vop->vsync_mutex);
696
697 ret = vop_win_queue_fb(vop_win, NULL, 0, NULL);
698 if (ret) {
699 drm_vblank_put(plane->dev, vop->pipe);
700 mutex_unlock(&vop->vsync_mutex);
701 return ret;
702 }
703
704 vop->vsync_work_pending = true;
705 mutex_unlock(&vop->vsync_mutex);
706
707 spin_lock(&vop->reg_lock);
708 VOP_WIN_SET(vop, win, enable, 0);
709 vop_cfg_done(vop);
710 spin_unlock(&vop->reg_lock);
711
712 return 0;
713}
714
715static void vop_plane_destroy(struct drm_plane *plane)
716{
717 vop_disable_plane(plane);
718 drm_plane_cleanup(plane);
719}
720
721static const struct drm_plane_funcs vop_plane_funcs = {
722 .update_plane = vop_update_plane,
723 .disable_plane = vop_disable_plane,
724 .destroy = vop_plane_destroy,
725};
726
727int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc,
728 int connector_type,
729 int out_mode)
730{
731 struct vop *vop = to_vop(crtc);
732
733 vop->connector_type = connector_type;
734 vop->connector_out_mode = out_mode;
735
736 return 0;
737}
738
739static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
740{
741 struct vop *vop = to_vop(crtc);
742 unsigned long flags;
743
744 if (vop->dpms != DRM_MODE_DPMS_ON)
745 return -EPERM;
746
747 spin_lock_irqsave(&vop->irq_lock, flags);
748
749 vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(1));
750
751 spin_unlock_irqrestore(&vop->irq_lock, flags);
752
753 return 0;
754}
755
756static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
757{
758 struct vop *vop = to_vop(crtc);
759 unsigned long flags;
760
761 if (vop->dpms != DRM_MODE_DPMS_ON)
762 return;
763 spin_lock_irqsave(&vop->irq_lock, flags);
764 vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(0));
765 spin_unlock_irqrestore(&vop->irq_lock, flags);
766}
767
768static const struct rockchip_crtc_funcs private_crtc_funcs = {
769 .enable_vblank = vop_crtc_enable_vblank,
770 .disable_vblank = vop_crtc_disable_vblank,
771};
772
773static void vop_crtc_dpms(struct drm_crtc *crtc, int mode)
774{
775 struct vop *vop = to_vop(crtc);
776
777 DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
778
779 if (vop->dpms == mode) {
780 DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
781 return;
782 }
783
784 switch (mode) {
785 case DRM_MODE_DPMS_ON:
786 vop_enable(crtc);
787 break;
788 case DRM_MODE_DPMS_STANDBY:
789 case DRM_MODE_DPMS_SUSPEND:
790 case DRM_MODE_DPMS_OFF:
791 vop_disable(crtc);
792 break;
793 default:
794 DRM_DEBUG_KMS("unspecified mode %d\n", mode);
795 break;
796 }
797
798 vop->dpms = mode;
799}
800
801static void vop_crtc_prepare(struct drm_crtc *crtc)
802{
803 vop_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
804}
805
806static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
807 const struct drm_display_mode *mode,
808 struct drm_display_mode *adjusted_mode)
809{
810 if (adjusted_mode->htotal == 0 || adjusted_mode->vtotal == 0)
811 return false;
812
813 return true;
814}
815
816static int vop_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
817 struct drm_framebuffer *old_fb)
818{
819 int ret;
820
821 crtc->x = x;
822 crtc->y = y;
823
824 ret = vop_update_primary_plane(crtc, NULL);
825 if (ret < 0) {
826 DRM_ERROR("fail to update plane\n");
827 return ret;
828 }
829
830 return 0;
831}
832
833static int vop_crtc_mode_set(struct drm_crtc *crtc,
834 struct drm_display_mode *mode,
835 struct drm_display_mode *adjusted_mode,
836 int x, int y, struct drm_framebuffer *fb)
837{
838 struct vop *vop = to_vop(crtc);
839 u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
840 u16 hdisplay = adjusted_mode->hdisplay;
841 u16 htotal = adjusted_mode->htotal;
842 u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start;
843 u16 hact_end = hact_st + hdisplay;
844 u16 vdisplay = adjusted_mode->vdisplay;
845 u16 vtotal = adjusted_mode->vtotal;
846 u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
847 u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
848 u16 vact_end = vact_st + vdisplay;
849 int ret;
850 uint32_t val;
851
852 /*
853 * disable dclk to stop frame scan, so that we can safe config mode and
854 * enable iommu.
855 */
856 clk_disable(vop->dclk);
857
858 switch (vop->connector_type) {
859 case DRM_MODE_CONNECTOR_LVDS:
860 VOP_CTRL_SET(vop, rgb_en, 1);
861 break;
862 case DRM_MODE_CONNECTOR_eDP:
863 VOP_CTRL_SET(vop, edp_en, 1);
864 break;
865 case DRM_MODE_CONNECTOR_HDMIA:
866 VOP_CTRL_SET(vop, hdmi_en, 1);
867 break;
868 default:
869 DRM_ERROR("unsupport connector_type[%d]\n",
870 vop->connector_type);
871 return -EINVAL;
872 };
873 VOP_CTRL_SET(vop, out_mode, vop->connector_out_mode);
874
875 val = 0x8;
876 val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
877 val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? (1 << 1) : 0;
878 VOP_CTRL_SET(vop, pin_pol, val);
879
880 VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
881 val = hact_st << 16;
882 val |= hact_end;
883 VOP_CTRL_SET(vop, hact_st_end, val);
884 VOP_CTRL_SET(vop, hpost_st_end, val);
885
886 VOP_CTRL_SET(vop, vtotal_pw, (vtotal << 16) | vsync_len);
887 val = vact_st << 16;
888 val |= vact_end;
889 VOP_CTRL_SET(vop, vact_st_end, val);
890 VOP_CTRL_SET(vop, vpost_st_end, val);
891
892 ret = vop_crtc_mode_set_base(crtc, x, y, fb);
893 if (ret)
894 return ret;
895
896 /*
897 * reset dclk, take all mode config affect, so the clk would run in
898 * correct frame.
899 */
900 reset_control_assert(vop->dclk_rst);
901 usleep_range(10, 20);
902 reset_control_deassert(vop->dclk_rst);
903
904 clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
905 ret = clk_enable(vop->dclk);
906 if (ret < 0) {
907 dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
908 return ret;
909 }
910
911 return 0;
912}
913
914static void vop_crtc_commit(struct drm_crtc *crtc)
915{
916}
917
918static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
919 .dpms = vop_crtc_dpms,
920 .prepare = vop_crtc_prepare,
921 .mode_fixup = vop_crtc_mode_fixup,
922 .mode_set = vop_crtc_mode_set,
923 .mode_set_base = vop_crtc_mode_set_base,
924 .commit = vop_crtc_commit,
925};
926
927static int vop_crtc_page_flip(struct drm_crtc *crtc,
928 struct drm_framebuffer *fb,
929 struct drm_pending_vblank_event *event,
930 uint32_t page_flip_flags)
931{
932 struct vop *vop = to_vop(crtc);
933 struct drm_framebuffer *old_fb = crtc->primary->fb;
934 int ret;
935
936 /* when the page flip is requested, crtc's dpms should be on */
937 if (vop->dpms > DRM_MODE_DPMS_ON) {
938 DRM_DEBUG("failed page flip request at dpms[%d].\n", vop->dpms);
939 return 0;
940 }
941
942 crtc->primary->fb = fb;
943
944 ret = vop_update_primary_plane(crtc, event);
945 if (ret)
946 crtc->primary->fb = old_fb;
947
948 return ret;
949}
950
951static void vop_win_state_complete(struct vop_win *vop_win,
952 struct vop_win_state *state)
953{
954 struct vop *vop = vop_win->vop;
955 struct drm_crtc *crtc = &vop->crtc;
956 struct drm_device *drm = crtc->dev;
957 unsigned long flags;
958
959 if (state->event) {
960 spin_lock_irqsave(&drm->event_lock, flags);
961 drm_send_vblank_event(drm, -1, state->event);
962 spin_unlock_irqrestore(&drm->event_lock, flags);
963 }
964
965 list_del(&state->head);
966 drm_vblank_put(crtc->dev, vop->pipe);
967}
968
969static void vop_crtc_destroy(struct drm_crtc *crtc)
970{
971 drm_crtc_cleanup(crtc);
972}
973
974static const struct drm_crtc_funcs vop_crtc_funcs = {
975 .set_config = drm_crtc_helper_set_config,
976 .page_flip = vop_crtc_page_flip,
977 .destroy = vop_crtc_destroy,
978};
979
980static bool vop_win_state_is_active(struct vop_win *vop_win,
981 struct vop_win_state *state)
982{
983 bool active = false;
984
985 if (state->fb) {
986 dma_addr_t yrgb_mst;
987
988 /* check yrgb_mst to tell if pending_fb is now front */
989 yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data);
990
991 active = (yrgb_mst == state->yrgb_mst);
992 } else {
993 bool enabled;
994
995 /* if enable bit is clear, plane is now disabled */
996 enabled = VOP_WIN_GET(vop_win->vop, vop_win->data, enable);
997
998 active = (enabled == 0);
999 }
1000
1001 return active;
1002}
1003
1004static void vop_win_state_destroy(struct vop_win_state *state)
1005{
1006 struct drm_framebuffer *fb = state->fb;
1007
1008 if (fb)
1009 drm_framebuffer_unreference(fb);
1010
1011 kfree(state);
1012}
1013
1014static void vop_win_update_state(struct vop_win *vop_win)
1015{
1016 struct vop_win_state *state, *n, *new_active = NULL;
1017
1018 /* Check if any pending states are now active */
1019 list_for_each_entry(state, &vop_win->pending, head)
1020 if (vop_win_state_is_active(vop_win, state)) {
1021 new_active = state;
1022 break;
1023 }
1024
1025 if (!new_active)
1026 return;
1027
1028 /*
1029 * Destroy any 'skipped' pending states - states that were queued
1030 * before the newly active state.
1031 */
1032 list_for_each_entry_safe(state, n, &vop_win->pending, head) {
1033 if (state == new_active)
1034 break;
1035 vop_win_state_complete(vop_win, state);
1036 vop_win_state_destroy(state);
1037 }
1038
1039 vop_win_state_complete(vop_win, new_active);
1040
1041 if (vop_win->active)
1042 vop_win_state_destroy(vop_win->active);
1043 vop_win->active = new_active;
1044}
1045
1046static bool vop_win_has_pending_state(struct vop_win *vop_win)
1047{
1048 return !list_empty(&vop_win->pending);
1049}
1050
1051static irqreturn_t vop_isr_thread(int irq, void *data)
1052{
1053 struct vop *vop = data;
1054 const struct vop_data *vop_data = vop->data;
1055 unsigned int i;
1056
1057 mutex_lock(&vop->vsync_mutex);
1058
1059 if (!vop->vsync_work_pending)
1060 goto done;
1061
1062 vop->vsync_work_pending = false;
1063
1064 for (i = 0; i < vop_data->win_size; i++) {
1065 struct vop_win *vop_win = &vop->win[i];
1066
1067 vop_win_update_state(vop_win);
1068 if (vop_win_has_pending_state(vop_win))
1069 vop->vsync_work_pending = true;
1070 }
1071
1072done:
1073 mutex_unlock(&vop->vsync_mutex);
1074
1075 return IRQ_HANDLED;
1076}
1077
1078static irqreturn_t vop_isr(int irq, void *data)
1079{
1080 struct vop *vop = data;
1081 uint32_t intr0_reg, active_irqs;
1082 unsigned long flags;
1083
1084 /*
1085 * INTR_CTRL0 register has interrupt status, enable and clear bits, we
1086 * must hold irq_lock to avoid a race with enable/disable_vblank().
1087 */
1088 spin_lock_irqsave(&vop->irq_lock, flags);
1089 intr0_reg = vop_readl(vop, INTR_CTRL0);
1090 active_irqs = intr0_reg & INTR_MASK;
1091 /* Clear all active interrupt sources */
1092 if (active_irqs)
1093 vop_writel(vop, INTR_CTRL0,
1094 intr0_reg | (active_irqs << INTR_CLR_SHIFT));
1095 spin_unlock_irqrestore(&vop->irq_lock, flags);
1096
1097 /* This is expected for vop iommu irqs, since the irq is shared */
1098 if (!active_irqs)
1099 return IRQ_NONE;
1100
1101 /* Only Frame Start Interrupt is enabled; other irqs are spurious. */
1102 if (!(active_irqs & FS_INTR)) {
1103 DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs);
1104 return IRQ_NONE;
1105 }
1106
1107 drm_handle_vblank(vop->drm_dev, vop->pipe);
1108
1109 return (vop->vsync_work_pending) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
1110}
1111
1112static int vop_create_crtc(struct vop *vop)
1113{
1114 const struct vop_data *vop_data = vop->data;
1115 struct device *dev = vop->dev;
1116 struct drm_device *drm_dev = vop->drm_dev;
1117 struct drm_plane *primary = NULL, *cursor = NULL, *plane;
1118 struct drm_crtc *crtc = &vop->crtc;
1119 struct device_node *port;
1120 int ret;
1121 int i;
1122
1123 /*
1124 * Create drm_plane for primary and cursor planes first, since we need
1125 * to pass them to drm_crtc_init_with_planes, which sets the
1126 * "possible_crtcs" to the newly initialized crtc.
1127 */
1128 for (i = 0; i < vop_data->win_size; i++) {
1129 struct vop_win *vop_win = &vop->win[i];
1130 const struct vop_win_data *win_data = vop_win->data;
1131
1132 if (win_data->type != DRM_PLANE_TYPE_PRIMARY &&
1133 win_data->type != DRM_PLANE_TYPE_CURSOR)
1134 continue;
1135
1136 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
1137 0, &vop_plane_funcs,
1138 win_data->phy->data_formats,
1139 win_data->phy->nformats,
1140 win_data->type);
1141 if (ret) {
1142 DRM_ERROR("failed to initialize plane\n");
1143 goto err_cleanup_planes;
1144 }
1145
1146 plane = &vop_win->base;
1147 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
1148 primary = plane;
1149 else if (plane->type == DRM_PLANE_TYPE_CURSOR)
1150 cursor = plane;
1151 }
1152
1153 ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
1154 &vop_crtc_funcs);
1155 if (ret)
1156 return ret;
1157
1158 drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
1159
1160 /*
1161 * Create drm_planes for overlay windows with possible_crtcs restricted
1162 * to the newly created crtc.
1163 */
1164 for (i = 0; i < vop_data->win_size; i++) {
1165 struct vop_win *vop_win = &vop->win[i];
1166 const struct vop_win_data *win_data = vop_win->data;
1167 unsigned long possible_crtcs = 1 << drm_crtc_index(crtc);
1168
1169 if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
1170 continue;
1171
1172 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
1173 possible_crtcs,
1174 &vop_plane_funcs,
1175 win_data->phy->data_formats,
1176 win_data->phy->nformats,
1177 win_data->type);
1178 if (ret) {
1179 DRM_ERROR("failed to initialize overlay plane\n");
1180 goto err_cleanup_crtc;
1181 }
1182 }
1183
1184 port = of_get_child_by_name(dev->of_node, "port");
1185 if (!port) {
1186 DRM_ERROR("no port node found in %s\n",
1187 dev->of_node->full_name);
1188 goto err_cleanup_crtc;
1189 }
1190
1191 crtc->port = port;
1192 vop->pipe = drm_crtc_index(crtc);
1193 rockchip_register_crtc_funcs(drm_dev, &private_crtc_funcs, vop->pipe);
1194
1195 return 0;
1196
1197err_cleanup_crtc:
1198 drm_crtc_cleanup(crtc);
1199err_cleanup_planes:
1200 list_for_each_entry(plane, &drm_dev->mode_config.plane_list, head)
1201 drm_plane_cleanup(plane);
1202 return ret;
1203}
1204
1205static void vop_destroy_crtc(struct vop *vop)
1206{
1207 struct drm_crtc *crtc = &vop->crtc;
1208
1209 rockchip_unregister_crtc_funcs(vop->drm_dev, vop->pipe);
1210 of_node_put(crtc->port);
1211 drm_crtc_cleanup(crtc);
1212}
1213
1214static int vop_initial(struct vop *vop)
1215{
1216 const struct vop_data *vop_data = vop->data;
1217 const struct vop_reg_data *init_table = vop_data->init_table;
1218 struct reset_control *ahb_rst;
1219 int i, ret;
1220
1221 vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
1222 if (IS_ERR(vop->hclk)) {
1223 dev_err(vop->dev, "failed to get hclk source\n");
1224 return PTR_ERR(vop->hclk);
1225 }
1226 vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
1227 if (IS_ERR(vop->aclk)) {
1228 dev_err(vop->dev, "failed to get aclk source\n");
1229 return PTR_ERR(vop->aclk);
1230 }
1231 vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
1232 if (IS_ERR(vop->dclk)) {
1233 dev_err(vop->dev, "failed to get dclk source\n");
1234 return PTR_ERR(vop->dclk);
1235 }
1236
1237 ret = clk_prepare(vop->hclk);
1238 if (ret < 0) {
1239 dev_err(vop->dev, "failed to prepare hclk\n");
1240 return ret;
1241 }
1242
1243 ret = clk_prepare(vop->dclk);
1244 if (ret < 0) {
1245 dev_err(vop->dev, "failed to prepare dclk\n");
1246 goto err_unprepare_hclk;
1247 }
1248
1249 ret = clk_prepare(vop->aclk);
1250 if (ret < 0) {
1251 dev_err(vop->dev, "failed to prepare aclk\n");
1252 goto err_unprepare_dclk;
1253 }
1254
1255 /*
1256 * enable hclk, so that we can config vop register.
1257 */
1258 ret = clk_enable(vop->hclk);
1259 if (ret < 0) {
1260 dev_err(vop->dev, "failed to prepare aclk\n");
1261 goto err_unprepare_aclk;
1262 }
1263 /*
1264 * do hclk_reset, reset all vop registers.
1265 */
1266 ahb_rst = devm_reset_control_get(vop->dev, "ahb");
1267 if (IS_ERR(ahb_rst)) {
1268 dev_err(vop->dev, "failed to get ahb reset\n");
1269 ret = PTR_ERR(ahb_rst);
1270 goto err_disable_hclk;
1271 }
1272 reset_control_assert(ahb_rst);
1273 usleep_range(10, 20);
1274 reset_control_deassert(ahb_rst);
1275
1276 memcpy(vop->regsbak, vop->regs, vop->len);
1277
1278 for (i = 0; i < vop_data->table_size; i++)
1279 vop_writel(vop, init_table[i].offset, init_table[i].value);
1280
1281 for (i = 0; i < vop_data->win_size; i++) {
1282 const struct vop_win_data *win = &vop_data->win[i];
1283
1284 VOP_WIN_SET(vop, win, enable, 0);
1285 }
1286
1287 vop_cfg_done(vop);
1288
1289 /*
1290 * do dclk_reset, let all config take affect.
1291 */
1292 vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk");
1293 if (IS_ERR(vop->dclk_rst)) {
1294 dev_err(vop->dev, "failed to get dclk reset\n");
1295 ret = PTR_ERR(vop->dclk_rst);
1296 goto err_unprepare_aclk;
1297 }
1298 reset_control_assert(vop->dclk_rst);
1299 usleep_range(10, 20);
1300 reset_control_deassert(vop->dclk_rst);
1301
1302 clk_disable(vop->hclk);
1303
1304 vop->dpms = DRM_MODE_DPMS_OFF;
1305
1306 return 0;
1307
1308err_disable_hclk:
1309 clk_disable(vop->hclk);
1310err_unprepare_aclk:
1311 clk_unprepare(vop->aclk);
1312err_unprepare_dclk:
1313 clk_unprepare(vop->dclk);
1314err_unprepare_hclk:
1315 clk_unprepare(vop->hclk);
1316 return ret;
1317}
1318
1319/*
1320 * Initialize the vop->win array elements.
1321 */
1322static void vop_win_init(struct vop *vop)
1323{
1324 const struct vop_data *vop_data = vop->data;
1325 unsigned int i;
1326
1327 for (i = 0; i < vop_data->win_size; i++) {
1328 struct vop_win *vop_win = &vop->win[i];
1329 const struct vop_win_data *win_data = &vop_data->win[i];
1330
1331 vop_win->data = win_data;
1332 vop_win->vop = vop;
1333 INIT_LIST_HEAD(&vop_win->pending);
1334 }
1335}
1336
1337static int vop_bind(struct device *dev, struct device *master, void *data)
1338{
1339 struct platform_device *pdev = to_platform_device(dev);
1340 const struct of_device_id *of_id;
1341 const struct vop_data *vop_data;
1342 struct drm_device *drm_dev = data;
1343 struct vop *vop;
1344 struct resource *res;
1345 size_t alloc_size;
1346 int ret;
1347
1348 of_id = of_match_device(vop_driver_dt_match, dev);
1349 vop_data = of_id->data;
1350 if (!vop_data)
1351 return -ENODEV;
1352
1353 /* Allocate vop struct and its vop_win array */
1354 alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size;
1355 vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
1356 if (!vop)
1357 return -ENOMEM;
1358
1359 vop->dev = dev;
1360 vop->data = vop_data;
1361 vop->drm_dev = drm_dev;
1362 dev_set_drvdata(dev, vop);
1363
1364 vop_win_init(vop);
1365
1366 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1367 vop->len = resource_size(res);
1368 vop->regs = devm_ioremap_resource(dev, res);
1369 if (IS_ERR(vop->regs))
1370 return PTR_ERR(vop->regs);
1371
1372 vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
1373 if (!vop->regsbak)
1374 return -ENOMEM;
1375
1376 ret = vop_initial(vop);
1377 if (ret < 0) {
1378 dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
1379 return ret;
1380 }
1381
1382 vop->irq = platform_get_irq(pdev, 0);
1383 if (vop->irq < 0) {
1384 dev_err(dev, "cannot find irq for vop\n");
1385 return vop->irq;
1386 }
1387
1388 spin_lock_init(&vop->reg_lock);
1389 spin_lock_init(&vop->irq_lock);
1390
1391 mutex_init(&vop->vsync_mutex);
1392
1393 ret = devm_request_threaded_irq(dev, vop->irq, vop_isr, vop_isr_thread,
1394 IRQF_SHARED, dev_name(dev), vop);
1395 if (ret)
1396 return ret;
1397
1398 /* IRQ is initially disabled; it gets enabled in power_on */
1399 disable_irq(vop->irq);
1400
1401 ret = vop_create_crtc(vop);
1402 if (ret)
1403 return ret;
1404
1405 pm_runtime_enable(&pdev->dev);
1406 return 0;
1407}
1408
1409static void vop_unbind(struct device *dev, struct device *master, void *data)
1410{
1411 struct vop *vop = dev_get_drvdata(dev);
1412
1413 pm_runtime_disable(dev);
1414 vop_destroy_crtc(vop);
1415}
1416
1417static const struct component_ops vop_component_ops = {
1418 .bind = vop_bind,
1419 .unbind = vop_unbind,
1420};
1421
1422static int vop_probe(struct platform_device *pdev)
1423{
1424 struct device *dev = &pdev->dev;
1425
1426 if (!dev->of_node) {
1427 dev_err(dev, "can't find vop devices\n");
1428 return -ENODEV;
1429 }
1430
1431 return component_add(dev, &vop_component_ops);
1432}
1433
1434static int vop_remove(struct platform_device *pdev)
1435{
1436 component_del(&pdev->dev, &vop_component_ops);
1437
1438 return 0;
1439}
1440
1441struct platform_driver vop_platform_driver = {
1442 .probe = vop_probe,
1443 .remove = vop_remove,
1444 .driver = {
1445 .name = "rockchip-vop",
1446 .owner = THIS_MODULE,
1447 .of_match_table = of_match_ptr(vop_driver_dt_match),
1448 },
1449};
1450
1451module_platform_driver(vop_platform_driver);
1452
1453MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>");
1454MODULE_DESCRIPTION("ROCKCHIP VOP Driver");
1455MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
new file mode 100644
index 000000000000..63e9b3a084c5
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -0,0 +1,201 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _ROCKCHIP_DRM_VOP_H
16#define _ROCKCHIP_DRM_VOP_H
17
18/* register definition */
19#define REG_CFG_DONE 0x0000
20#define VERSION_INFO 0x0004
21#define SYS_CTRL 0x0008
22#define SYS_CTRL1 0x000c
23#define DSP_CTRL0 0x0010
24#define DSP_CTRL1 0x0014
25#define DSP_BG 0x0018
26#define MCU_CTRL 0x001c
27#define INTR_CTRL0 0x0020
28#define INTR_CTRL1 0x0024
29#define WIN0_CTRL0 0x0030
30#define WIN0_CTRL1 0x0034
31#define WIN0_COLOR_KEY 0x0038
32#define WIN0_VIR 0x003c
33#define WIN0_YRGB_MST 0x0040
34#define WIN0_CBR_MST 0x0044
35#define WIN0_ACT_INFO 0x0048
36#define WIN0_DSP_INFO 0x004c
37#define WIN0_DSP_ST 0x0050
38#define WIN0_SCL_FACTOR_YRGB 0x0054
39#define WIN0_SCL_FACTOR_CBR 0x0058
40#define WIN0_SCL_OFFSET 0x005c
41#define WIN0_SRC_ALPHA_CTRL 0x0060
42#define WIN0_DST_ALPHA_CTRL 0x0064
43#define WIN0_FADING_CTRL 0x0068
44/* win1 register */
45#define WIN1_CTRL0 0x0070
46#define WIN1_CTRL1 0x0074
47#define WIN1_COLOR_KEY 0x0078
48#define WIN1_VIR 0x007c
49#define WIN1_YRGB_MST 0x0080
50#define WIN1_CBR_MST 0x0084
51#define WIN1_ACT_INFO 0x0088
52#define WIN1_DSP_INFO 0x008c
53#define WIN1_DSP_ST 0x0090
54#define WIN1_SCL_FACTOR_YRGB 0x0094
55#define WIN1_SCL_FACTOR_CBR 0x0098
56#define WIN1_SCL_OFFSET 0x009c
57#define WIN1_SRC_ALPHA_CTRL 0x00a0
58#define WIN1_DST_ALPHA_CTRL 0x00a4
59#define WIN1_FADING_CTRL 0x00a8
60/* win2 register */
61#define WIN2_CTRL0 0x00b0
62#define WIN2_CTRL1 0x00b4
63#define WIN2_VIR0_1 0x00b8
64#define WIN2_VIR2_3 0x00bc
65#define WIN2_MST0 0x00c0
66#define WIN2_DSP_INFO0 0x00c4
67#define WIN2_DSP_ST0 0x00c8
68#define WIN2_COLOR_KEY 0x00cc
69#define WIN2_MST1 0x00d0
70#define WIN2_DSP_INFO1 0x00d4
71#define WIN2_DSP_ST1 0x00d8
72#define WIN2_SRC_ALPHA_CTRL 0x00dc
73#define WIN2_MST2 0x00e0
74#define WIN2_DSP_INFO2 0x00e4
75#define WIN2_DSP_ST2 0x00e8
76#define WIN2_DST_ALPHA_CTRL 0x00ec
77#define WIN2_MST3 0x00f0
78#define WIN2_DSP_INFO3 0x00f4
79#define WIN2_DSP_ST3 0x00f8
80#define WIN2_FADING_CTRL 0x00fc
81/* win3 register */
82#define WIN3_CTRL0 0x0100
83#define WIN3_CTRL1 0x0104
84#define WIN3_VIR0_1 0x0108
85#define WIN3_VIR2_3 0x010c
86#define WIN3_MST0 0x0110
87#define WIN3_DSP_INFO0 0x0114
88#define WIN3_DSP_ST0 0x0118
89#define WIN3_COLOR_KEY 0x011c
90#define WIN3_MST1 0x0120
91#define WIN3_DSP_INFO1 0x0124
92#define WIN3_DSP_ST1 0x0128
93#define WIN3_SRC_ALPHA_CTRL 0x012c
94#define WIN3_MST2 0x0130
95#define WIN3_DSP_INFO2 0x0134
96#define WIN3_DSP_ST2 0x0138
97#define WIN3_DST_ALPHA_CTRL 0x013c
98#define WIN3_MST3 0x0140
99#define WIN3_DSP_INFO3 0x0144
100#define WIN3_DSP_ST3 0x0148
101#define WIN3_FADING_CTRL 0x014c
102/* hwc register */
103#define HWC_CTRL0 0x0150
104#define HWC_CTRL1 0x0154
105#define HWC_MST 0x0158
106#define HWC_DSP_ST 0x015c
107#define HWC_SRC_ALPHA_CTRL 0x0160
108#define HWC_DST_ALPHA_CTRL 0x0164
109#define HWC_FADING_CTRL 0x0168
110/* post process register */
111#define POST_DSP_HACT_INFO 0x0170
112#define POST_DSP_VACT_INFO 0x0174
113#define POST_SCL_FACTOR_YRGB 0x0178
114#define POST_SCL_CTRL 0x0180
115#define POST_DSP_VACT_INFO_F1 0x0184
116#define DSP_HTOTAL_HS_END 0x0188
117#define DSP_HACT_ST_END 0x018c
118#define DSP_VTOTAL_VS_END 0x0190
119#define DSP_VACT_ST_END 0x0194
120#define DSP_VS_ST_END_F1 0x0198
121#define DSP_VACT_ST_END_F1 0x019c
122/* register definition end */
123
124/* interrupt define */
125#define DSP_HOLD_VALID_INTR (1 << 0)
126#define FS_INTR (1 << 1)
127#define LINE_FLAG_INTR (1 << 2)
128#define BUS_ERROR_INTR (1 << 3)
129
130#define INTR_MASK (DSP_HOLD_VALID_INTR | FS_INTR | \
131 LINE_FLAG_INTR | BUS_ERROR_INTR)
132
133#define DSP_HOLD_VALID_INTR_EN(x) ((x) << 4)
134#define FS_INTR_EN(x) ((x) << 5)
135#define LINE_FLAG_INTR_EN(x) ((x) << 6)
136#define BUS_ERROR_INTR_EN(x) ((x) << 7)
137#define DSP_HOLD_VALID_INTR_MASK (1 << 4)
138#define FS_INTR_MASK (1 << 5)
139#define LINE_FLAG_INTR_MASK (1 << 6)
140#define BUS_ERROR_INTR_MASK (1 << 7)
141
142#define INTR_CLR_SHIFT 8
143#define DSP_HOLD_VALID_INTR_CLR (1 << (INTR_CLR_SHIFT + 0))
144#define FS_INTR_CLR (1 << (INTR_CLR_SHIFT + 1))
145#define LINE_FLAG_INTR_CLR (1 << (INTR_CLR_SHIFT + 2))
146#define BUS_ERROR_INTR_CLR (1 << (INTR_CLR_SHIFT + 3))
147
148#define DSP_LINE_NUM(x) (((x) & 0x1fff) << 12)
149#define DSP_LINE_NUM_MASK (0x1fff << 12)
150
151/* src alpha ctrl define */
152#define SRC_FADING_VALUE(x) (((x) & 0xff) << 24)
153#define SRC_GLOBAL_ALPHA(x) (((x) & 0xff) << 16)
154#define SRC_FACTOR_M0(x) (((x) & 0x7) << 6)
155#define SRC_ALPHA_CAL_M0(x) (((x) & 0x1) << 5)
156#define SRC_BLEND_M0(x) (((x) & 0x3) << 3)
157#define SRC_ALPHA_M0(x) (((x) & 0x1) << 2)
158#define SRC_COLOR_M0(x) (((x) & 0x1) << 1)
159#define SRC_ALPHA_EN(x) (((x) & 0x1) << 0)
160/* dst alpha ctrl define */
161#define DST_FACTOR_M0(x) (((x) & 0x7) << 6)
162
163/*
164 * display output interface supported by rockchip lcdc
165 */
166#define ROCKCHIP_OUT_MODE_P888 0
167#define ROCKCHIP_OUT_MODE_P666 1
168#define ROCKCHIP_OUT_MODE_P565 2
169/* for use special outface */
170#define ROCKCHIP_OUT_MODE_AAAA 15
171
172enum alpha_mode {
173 ALPHA_STRAIGHT,
174 ALPHA_INVERSE,
175};
176
177enum global_blend_mode {
178 ALPHA_GLOBAL,
179 ALPHA_PER_PIX,
180 ALPHA_PER_PIX_GLOBAL,
181};
182
183enum alpha_cal_mode {
184 ALPHA_SATURATION,
185 ALPHA_NO_SATURATION,
186};
187
188enum color_mode {
189 ALPHA_SRC_PRE_MUL,
190 ALPHA_SRC_NO_PRE_MUL,
191};
192
193enum factor_mode {
194 ALPHA_ZERO,
195 ALPHA_ONE,
196 ALPHA_SRC,
197 ALPHA_SRC_INVERSE,
198 ALPHA_SRC_GLOBAL,
199};
200
201#endif /* _ROCKCHIP_DRM_VOP_H */
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 8ce508e76208..3820ae97a030 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -93,7 +93,8 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
93 */ 93 */
94 94
95int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, 95int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
96 struct list_head *list, bool intr) 96 struct list_head *list, bool intr,
97 struct list_head *dups)
97{ 98{
98 struct ttm_bo_global *glob; 99 struct ttm_bo_global *glob;
99 struct ttm_validate_buffer *entry; 100 struct ttm_validate_buffer *entry;
@@ -117,6 +118,13 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
117 __ttm_bo_unreserve(bo); 118 __ttm_bo_unreserve(bo);
118 119
119 ret = -EBUSY; 120 ret = -EBUSY;
121
122 } else if (ret == -EALREADY && dups) {
123 struct ttm_validate_buffer *safe = entry;
124 entry = list_prev_entry(entry, head);
125 list_del(&safe->head);
126 list_add(&safe->head, dups);
127 continue;
120 } 128 }
121 129
122 if (!ret) { 130 if (!ret) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index db7621828bc7..7b5d22110f25 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1062,8 +1062,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1062 1062
1063 vmaster = vmw_master_check(dev, file_priv, flags); 1063 vmaster = vmw_master_check(dev, file_priv, flags);
1064 if (unlikely(IS_ERR(vmaster))) { 1064 if (unlikely(IS_ERR(vmaster))) {
1065 DRM_INFO("IOCTL ERROR %d\n", nr); 1065 ret = PTR_ERR(vmaster);
1066 return PTR_ERR(vmaster); 1066
1067 if (ret != -ERESTARTSYS)
1068 DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
1069 nr, ret);
1070 return ret;
1067 } 1071 }
1068 1072
1069 ret = ioctl_func(filp, cmd, arg); 1073 ret = ioctl_func(filp, cmd, arg);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 596cd6dafd33..33176d05db35 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2487,7 +2487,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2487 if (unlikely(ret != 0)) 2487 if (unlikely(ret != 0))
2488 goto out_err_nores; 2488 goto out_err_nores;
2489 2489
2490 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true); 2490 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
2491 true, NULL);
2491 if (unlikely(ret != 0)) 2492 if (unlikely(ret != 0))
2492 goto out_err; 2493 goto out_err;
2493 2494
@@ -2677,7 +2678,8 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2677 query_val.shared = false; 2678 query_val.shared = false;
2678 list_add_tail(&query_val.head, &validate_list); 2679 list_add_tail(&query_val.head, &validate_list);
2679 2680
2680 ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false); 2681 ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
2682 false, NULL);
2681 if (unlikely(ret != 0)) { 2683 if (unlikely(ret != 0)) {
2682 vmw_execbuf_unpin_panic(dev_priv); 2684 vmw_execbuf_unpin_panic(dev_priv);
2683 goto out_no_reserve; 2685 goto out_no_reserve;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 197164fd7803..b7594cb758af 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -545,35 +545,19 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
545 545
546static void vmw_fence_destroy(struct vmw_fence_obj *fence) 546static void vmw_fence_destroy(struct vmw_fence_obj *fence)
547{ 547{
548 struct vmw_fence_manager *fman = fman_from_fence(fence);
549
550 fence_free(&fence->base); 548 fence_free(&fence->base);
551
552 /*
553 * Free kernel space accounting.
554 */
555 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
556 fman->fence_size);
557} 549}
558 550
559int vmw_fence_create(struct vmw_fence_manager *fman, 551int vmw_fence_create(struct vmw_fence_manager *fman,
560 uint32_t seqno, 552 uint32_t seqno,
561 struct vmw_fence_obj **p_fence) 553 struct vmw_fence_obj **p_fence)
562{ 554{
563 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
564 struct vmw_fence_obj *fence; 555 struct vmw_fence_obj *fence;
565 int ret; 556 int ret;
566 557
567 ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
568 false, false);
569 if (unlikely(ret != 0))
570 return ret;
571
572 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 558 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
573 if (unlikely(fence == NULL)) { 559 if (unlikely(fence == NULL))
574 ret = -ENOMEM; 560 return -ENOMEM;
575 goto out_no_object;
576 }
577 561
578 ret = vmw_fence_obj_init(fman, fence, seqno, 562 ret = vmw_fence_obj_init(fman, fence, seqno,
579 vmw_fence_destroy); 563 vmw_fence_destroy);
@@ -585,8 +569,6 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
585 569
586out_err_init: 570out_err_init:
587 kfree(fence); 571 kfree(fence);
588out_no_object:
589 ttm_mem_global_free(mem_glob, fman->fence_size);
590 return ret; 572 return ret;
591} 573}
592 574
@@ -1105,6 +1087,8 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
1105 if (ret != 0) 1087 if (ret != 0)
1106 goto out_no_queue; 1088 goto out_no_queue;
1107 1089
1090 return 0;
1091
1108out_no_queue: 1092out_no_queue:
1109 event->base.destroy(&event->base); 1093 event->base.destroy(&event->base);
1110out_no_event: 1094out_no_event:
@@ -1180,17 +1164,10 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1180 1164
1181 BUG_ON(fence == NULL); 1165 BUG_ON(fence == NULL);
1182 1166
1183 if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME) 1167 ret = vmw_event_fence_action_create(file_priv, fence,
1184 ret = vmw_event_fence_action_create(file_priv, fence, 1168 arg->flags,
1185 arg->flags, 1169 arg->user_data,
1186 arg->user_data, 1170 true);
1187 true);
1188 else
1189 ret = vmw_event_fence_action_create(file_priv, fence,
1190 arg->flags,
1191 arg->user_data,
1192 true);
1193
1194 if (unlikely(ret != 0)) { 1171 if (unlikely(ret != 0)) {
1195 if (ret != -ERESTARTSYS) 1172 if (ret != -ERESTARTSYS)
1196 DRM_ERROR("Failed to attach event to fence.\n"); 1173 DRM_ERROR("Failed to attach event to fence.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 026de7cea0f6..210ef15b1d09 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1222,7 +1222,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
1222 val_buf->bo = ttm_bo_reference(&res->backup->base); 1222 val_buf->bo = ttm_bo_reference(&res->backup->base);
1223 val_buf->shared = false; 1223 val_buf->shared = false;
1224 list_add_tail(&val_buf->head, &val_list); 1224 list_add_tail(&val_buf->head, &val_list);
1225 ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible); 1225 ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
1226 if (unlikely(ret != 0)) 1226 if (unlikely(ret != 0))
1227 goto out_no_reserve; 1227 goto out_no_reserve;
1228 1228
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 8719fb3cccc9..6a4584a43aa6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -198,7 +198,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
198 cmd->header.size = sizeof(cmd->body); 198 cmd->header.size = sizeof(cmd->body);
199 cmd->body.shid = res->id; 199 cmd->body.shid = res->id;
200 cmd->body.mobid = bo->mem.start; 200 cmd->body.mobid = bo->mem.start;
201 cmd->body.offsetInBytes = 0; 201 cmd->body.offsetInBytes = res->backup_offset;
202 res->backup_dirty = false; 202 res->backup_dirty = false;
203 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 203 vmw_fifo_commit(dev_priv, sizeof(*cmd));
204 204
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 63f3f03ecc9b..c604f4c3ac0d 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -111,6 +111,8 @@
111#define CDNS_I2C_DIVA_MAX 4 111#define CDNS_I2C_DIVA_MAX 4
112#define CDNS_I2C_DIVB_MAX 64 112#define CDNS_I2C_DIVB_MAX 64
113 113
114#define CDNS_I2C_TIMEOUT_MAX 0xFF
115
114#define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset) 116#define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset)
115#define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset) 117#define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset)
116 118
@@ -852,6 +854,15 @@ static int cdns_i2c_probe(struct platform_device *pdev)
852 goto err_clk_dis; 854 goto err_clk_dis;
853 } 855 }
854 856
857 /*
858 * Cadence I2C controller has a bug wherein it generates
859 * invalid read transaction after HW timeout in master receiver mode.
860 * HW timeout is not used by this driver and the interrupt is disabled.
861 * But the feature itself cannot be disabled. Hence maximum value
862 * is written to this register to reduce the chances of error.
863 */
864 cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
865
855 dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n", 866 dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
856 id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq); 867 id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
857 868
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index d15b7c9b9219..01f0cd87a4a5 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -407,11 +407,9 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
407 if (dev->cmd_err & DAVINCI_I2C_STR_NACK) { 407 if (dev->cmd_err & DAVINCI_I2C_STR_NACK) {
408 if (msg->flags & I2C_M_IGNORE_NAK) 408 if (msg->flags & I2C_M_IGNORE_NAK)
409 return msg->len; 409 return msg->len;
410 if (stop) { 410 w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
411 w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG); 411 w |= DAVINCI_I2C_MDR_STP;
412 w |= DAVINCI_I2C_MDR_STP; 412 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
413 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
414 }
415 return -EREMOTEIO; 413 return -EREMOTEIO;
416 } 414 }
417 return -EIO; 415 return -EIO;
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index edca99dbba23..23628b7bfb8d 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -359,7 +359,7 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
359 } 359 }
360 360
361 /* Configure Tx/Rx FIFO threshold levels */ 361 /* Configure Tx/Rx FIFO threshold levels */
362 dw_writel(dev, dev->tx_fifo_depth - 1, DW_IC_TX_TL); 362 dw_writel(dev, dev->tx_fifo_depth / 2, DW_IC_TX_TL);
363 dw_writel(dev, 0, DW_IC_RX_TL); 363 dw_writel(dev, 0, DW_IC_RX_TL);
364 364
365 /* configure the i2c master */ 365 /* configure the i2c master */
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 26942c159de1..277a2288d4a8 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -922,14 +922,12 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
922 if (stat & OMAP_I2C_STAT_NACK) { 922 if (stat & OMAP_I2C_STAT_NACK) {
923 err |= OMAP_I2C_STAT_NACK; 923 err |= OMAP_I2C_STAT_NACK;
924 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK); 924 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
925 break;
926 } 925 }
927 926
928 if (stat & OMAP_I2C_STAT_AL) { 927 if (stat & OMAP_I2C_STAT_AL) {
929 dev_err(dev->dev, "Arbitration lost\n"); 928 dev_err(dev->dev, "Arbitration lost\n");
930 err |= OMAP_I2C_STAT_AL; 929 err |= OMAP_I2C_STAT_AL;
931 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL); 930 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
932 break;
933 } 931 }
934 932
935 /* 933 /*
@@ -954,11 +952,13 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
954 if (dev->fifo_size) 952 if (dev->fifo_size)
955 num_bytes = dev->buf_len; 953 num_bytes = dev->buf_len;
956 954
957 omap_i2c_receive_data(dev, num_bytes, true); 955 if (dev->errata & I2C_OMAP_ERRATA_I207) {
958
959 if (dev->errata & I2C_OMAP_ERRATA_I207)
960 i2c_omap_errata_i207(dev, stat); 956 i2c_omap_errata_i207(dev, stat);
957 num_bytes = (omap_i2c_read_reg(dev,
958 OMAP_I2C_BUFSTAT_REG) >> 8) & 0x3F;
959 }
961 960
961 omap_i2c_receive_data(dev, num_bytes, true);
962 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR); 962 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
963 continue; 963 continue;
964 } 964 }
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index bc203485716d..8afa28e4570e 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -421,7 +421,7 @@ static int evdev_open(struct inode *inode, struct file *file)
421 421
422 err_free_client: 422 err_free_client:
423 evdev_detach_client(evdev, client); 423 evdev_detach_client(evdev, client);
424 kfree(client); 424 kvfree(client);
425 return error; 425 return error;
426} 426}
427 427
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index dd5112265cc9..d0a1261eb1ba 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -152,6 +152,18 @@ config OMAP_IOMMU_DEBUG
152 152
153 Say N unless you know you need this. 153 Say N unless you know you need this.
154 154
155config ROCKCHIP_IOMMU
156 bool "Rockchip IOMMU Support"
157 depends on ARCH_ROCKCHIP
158 select IOMMU_API
159 select ARM_DMA_USE_IOMMU
160 help
161 Support for IOMMUs found on Rockchip rk32xx SOCs.
162 These IOMMUs allow virtualization of the address space used by most
163 cores within the multimedia subsystem.
164 Say Y here if you are using a Rockchip SoC that includes an IOMMU
165 device.
166
155config TEGRA_IOMMU_GART 167config TEGRA_IOMMU_GART
156 bool "Tegra GART IOMMU Support" 168 bool "Tegra GART IOMMU Support"
157 depends on ARCH_TEGRA_2x_SOC 169 depends on ARCH_TEGRA_2x_SOC
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 16edef74b8ee..3e47ef35a35f 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
13obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o 13obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
14obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o 14obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o
15obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o 15obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
16obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
16obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o 17obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
17obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o 18obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
18obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o 19obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
new file mode 100644
index 000000000000..b2023af384b9
--- /dev/null
+++ b/drivers/iommu/rockchip-iommu.c
@@ -0,0 +1,1038 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 */
6
7#include <asm/cacheflush.h>
8#include <asm/pgtable.h>
9#include <linux/compiler.h>
10#include <linux/delay.h>
11#include <linux/device.h>
12#include <linux/errno.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/iommu.h>
16#include <linux/jiffies.h>
17#include <linux/list.h>
18#include <linux/mm.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25
26/** MMU register offsets */
27#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
28#define RK_MMU_STATUS 0x04
29#define RK_MMU_COMMAND 0x08
30#define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */
31#define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */
32#define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */
33#define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */
34#define RK_MMU_INT_MASK 0x1C /* IRQ enable */
35#define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */
36#define RK_MMU_AUTO_GATING 0x24
37
38#define DTE_ADDR_DUMMY 0xCAFEBABE
39#define FORCE_RESET_TIMEOUT 100 /* ms */
40
41/* RK_MMU_STATUS fields */
42#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
43#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
44#define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
45#define RK_MMU_STATUS_IDLE BIT(3)
46#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
47#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
48#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
49
50/* RK_MMU_COMMAND command values */
51#define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */
52#define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */
53#define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */
54#define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */
55#define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */
56#define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */
57#define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */
58
59/* RK_MMU_INT_* register fields */
60#define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */
61#define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */
62#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
63
64#define NUM_DT_ENTRIES 1024
65#define NUM_PT_ENTRIES 1024
66
67#define SPAGE_ORDER 12
68#define SPAGE_SIZE (1 << SPAGE_ORDER)
69
70 /*
71 * Support mapping any size that fits in one page table:
72 * 4 KiB to 4 MiB
73 */
74#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
75
76#define IOMMU_REG_POLL_COUNT_FAST 1000
77
78struct rk_iommu_domain {
79 struct list_head iommus;
80 u32 *dt; /* page directory table */
81 spinlock_t iommus_lock; /* lock for iommus list */
82 spinlock_t dt_lock; /* lock for modifying page directory table */
83};
84
85struct rk_iommu {
86 struct device *dev;
87 void __iomem *base;
88 int irq;
89 struct list_head node; /* entry in rk_iommu_domain.iommus */
90 struct iommu_domain *domain; /* domain to which iommu is attached */
91};
92
93static inline void rk_table_flush(u32 *va, unsigned int count)
94{
95 phys_addr_t pa_start = virt_to_phys(va);
96 phys_addr_t pa_end = virt_to_phys(va + count);
97 size_t size = pa_end - pa_start;
98
99 __cpuc_flush_dcache_area(va, size);
100 outer_flush_range(pa_start, pa_end);
101}
102
103/**
104 * Inspired by _wait_for in intel_drv.h
105 * This is NOT safe for use in interrupt context.
106 *
107 * Note that it's important that we check the condition again after having
108 * timed out, since the timeout could be due to preemption or similar and
109 * we've never had a chance to check the condition before the timeout.
110 */
111#define rk_wait_for(COND, MS) ({ \
112 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
113 int ret__ = 0; \
114 while (!(COND)) { \
115 if (time_after(jiffies, timeout__)) { \
116 ret__ = (COND) ? 0 : -ETIMEDOUT; \
117 break; \
118 } \
119 usleep_range(50, 100); \
120 } \
121 ret__; \
122})
123
124/*
125 * The Rockchip rk3288 iommu uses a 2-level page table.
126 * The first level is the "Directory Table" (DT).
127 * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
128 * to a "Page Table".
129 * The second level is the 1024 Page Tables (PT).
130 * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
131 * a 4 KB page of physical memory.
132 *
133 * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
134 * Each iommu device has a MMU_DTE_ADDR register that contains the physical
135 * address of the start of the DT page.
136 *
137 * The structure of the page table is as follows:
138 *
139 * DT
140 * MMU_DTE_ADDR -> +-----+
141 * | |
142 * +-----+ PT
143 * | DTE | -> +-----+
144 * +-----+ | | Memory
145 * | | +-----+ Page
146 * | | | PTE | -> +-----+
147 * +-----+ +-----+ | |
148 * | | | |
149 * | | | |
150 * +-----+ | |
151 * | |
152 * | |
153 * +-----+
154 */
155
156/*
157 * Each DTE has a PT address and a valid bit:
158 * +---------------------+-----------+-+
159 * | PT address | Reserved |V|
160 * +---------------------+-----------+-+
161 * 31:12 - PT address (PTs always starts on a 4 KB boundary)
162 * 11: 1 - Reserved
163 * 0 - 1 if PT @ PT address is valid
164 */
165#define RK_DTE_PT_ADDRESS_MASK 0xfffff000
166#define RK_DTE_PT_VALID BIT(0)
167
168static inline phys_addr_t rk_dte_pt_address(u32 dte)
169{
170 return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
171}
172
173static inline bool rk_dte_is_pt_valid(u32 dte)
174{
175 return dte & RK_DTE_PT_VALID;
176}
177
178static u32 rk_mk_dte(u32 *pt)
179{
180 phys_addr_t pt_phys = virt_to_phys(pt);
181 return (pt_phys & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
182}
183
184/*
185 * Each PTE has a Page address, some flags and a valid bit:
186 * +---------------------+---+-------+-+
187 * | Page address |Rsv| Flags |V|
188 * +---------------------+---+-------+-+
189 * 31:12 - Page address (Pages always start on a 4 KB boundary)
190 * 11: 9 - Reserved
191 * 8: 1 - Flags
192 * 8 - Read allocate - allocate cache space on read misses
193 * 7 - Read cache - enable cache & prefetch of data
194 * 6 - Write buffer - enable delaying writes on their way to memory
195 * 5 - Write allocate - allocate cache space on write misses
196 * 4 - Write cache - different writes can be merged together
197 * 3 - Override cache attributes
198 * if 1, bits 4-8 control cache attributes
199 * if 0, the system bus defaults are used
200 * 2 - Writable
201 * 1 - Readable
202 * 0 - 1 if Page @ Page address is valid
203 */
204#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
205#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
206#define RK_PTE_PAGE_WRITABLE BIT(2)
207#define RK_PTE_PAGE_READABLE BIT(1)
208#define RK_PTE_PAGE_VALID BIT(0)
209
210static inline phys_addr_t rk_pte_page_address(u32 pte)
211{
212 return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
213}
214
215static inline bool rk_pte_is_page_valid(u32 pte)
216{
217 return pte & RK_PTE_PAGE_VALID;
218}
219
220/* TODO: set cache flags per prot IOMMU_CACHE */
221static u32 rk_mk_pte(phys_addr_t page, int prot)
222{
223 u32 flags = 0;
224 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
225 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
226 page &= RK_PTE_PAGE_ADDRESS_MASK;
227 return page | flags | RK_PTE_PAGE_VALID;
228}
229
230static u32 rk_mk_pte_invalid(u32 pte)
231{
232 return pte & ~RK_PTE_PAGE_VALID;
233}
234
235/*
236 * rk3288 iova (IOMMU Virtual Address) format
237 * 31 22.21 12.11 0
238 * +-----------+-----------+-------------+
239 * | DTE index | PTE index | Page offset |
240 * +-----------+-----------+-------------+
241 * 31:22 - DTE index - index of DTE in DT
242 * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address
243 * 11: 0 - Page offset - offset into page @ PTE.page_address
244 */
245#define RK_IOVA_DTE_MASK 0xffc00000
246#define RK_IOVA_DTE_SHIFT 22
247#define RK_IOVA_PTE_MASK 0x003ff000
248#define RK_IOVA_PTE_SHIFT 12
249#define RK_IOVA_PAGE_MASK 0x00000fff
250#define RK_IOVA_PAGE_SHIFT 0
251
252static u32 rk_iova_dte_index(dma_addr_t iova)
253{
254 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
255}
256
257static u32 rk_iova_pte_index(dma_addr_t iova)
258{
259 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
260}
261
262static u32 rk_iova_page_offset(dma_addr_t iova)
263{
264 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
265}
266
267static u32 rk_iommu_read(struct rk_iommu *iommu, u32 offset)
268{
269 return readl(iommu->base + offset);
270}
271
272static void rk_iommu_write(struct rk_iommu *iommu, u32 offset, u32 value)
273{
274 writel(value, iommu->base + offset);
275}
276
277static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
278{
279 writel(command, iommu->base + RK_MMU_COMMAND);
280}
281
282static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova,
283 size_t size)
284{
285 dma_addr_t iova_end = iova + size;
286 /*
287 * TODO(djkurtz): Figure out when it is more efficient to shootdown the
288 * entire iotlb rather than iterate over individual iovas.
289 */
290 for (; iova < iova_end; iova += SPAGE_SIZE)
291 rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova);
292}
293
294static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
295{
296 return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE;
297}
298
299static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
300{
301 return rk_iommu_read(iommu, RK_MMU_STATUS) &
302 RK_MMU_STATUS_PAGING_ENABLED;
303}
304
305static int rk_iommu_enable_stall(struct rk_iommu *iommu)
306{
307 int ret;
308
309 if (rk_iommu_is_stall_active(iommu))
310 return 0;
311
312 /* Stall can only be enabled if paging is enabled */
313 if (!rk_iommu_is_paging_enabled(iommu))
314 return 0;
315
316 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
317
318 ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
319 if (ret)
320 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
321 rk_iommu_read(iommu, RK_MMU_STATUS));
322
323 return ret;
324}
325
326static int rk_iommu_disable_stall(struct rk_iommu *iommu)
327{
328 int ret;
329
330 if (!rk_iommu_is_stall_active(iommu))
331 return 0;
332
333 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
334
335 ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
336 if (ret)
337 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
338 rk_iommu_read(iommu, RK_MMU_STATUS));
339
340 return ret;
341}
342
343static int rk_iommu_enable_paging(struct rk_iommu *iommu)
344{
345 int ret;
346
347 if (rk_iommu_is_paging_enabled(iommu))
348 return 0;
349
350 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
351
352 ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
353 if (ret)
354 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
355 rk_iommu_read(iommu, RK_MMU_STATUS));
356
357 return ret;
358}
359
360static int rk_iommu_disable_paging(struct rk_iommu *iommu)
361{
362 int ret;
363
364 if (!rk_iommu_is_paging_enabled(iommu))
365 return 0;
366
367 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
368
369 ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
370 if (ret)
371 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
372 rk_iommu_read(iommu, RK_MMU_STATUS));
373
374 return ret;
375}
376
377static int rk_iommu_force_reset(struct rk_iommu *iommu)
378{
379 int ret;
380 u32 dte_addr;
381
382 /*
383 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
384 * and verifying that upper 5 nybbles are read back.
385 */
386 rk_iommu_write(iommu, RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
387
388 dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
389 if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
390 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
391 return -EFAULT;
392 }
393
394 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
395
396 ret = rk_wait_for(rk_iommu_read(iommu, RK_MMU_DTE_ADDR) == 0x00000000,
397 FORCE_RESET_TIMEOUT);
398 if (ret)
399 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
400
401 return ret;
402}
403
404static void log_iova(struct rk_iommu *iommu, dma_addr_t iova)
405{
406 u32 dte_index, pte_index, page_offset;
407 u32 mmu_dte_addr;
408 phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
409 u32 *dte_addr;
410 u32 dte;
411 phys_addr_t pte_addr_phys = 0;
412 u32 *pte_addr = NULL;
413 u32 pte = 0;
414 phys_addr_t page_addr_phys = 0;
415 u32 page_flags = 0;
416
417 dte_index = rk_iova_dte_index(iova);
418 pte_index = rk_iova_pte_index(iova);
419 page_offset = rk_iova_page_offset(iova);
420
421 mmu_dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
422 mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
423
424 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
425 dte_addr = phys_to_virt(dte_addr_phys);
426 dte = *dte_addr;
427
428 if (!rk_dte_is_pt_valid(dte))
429 goto print_it;
430
431 pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
432 pte_addr = phys_to_virt(pte_addr_phys);
433 pte = *pte_addr;
434
435 if (!rk_pte_is_page_valid(pte))
436 goto print_it;
437
438 page_addr_phys = rk_pte_page_address(pte) + page_offset;
439 page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
440
441print_it:
442 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
443 &iova, dte_index, pte_index, page_offset);
444 dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
445 &mmu_dte_addr_phys, &dte_addr_phys, dte,
446 rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
447 rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
448}
449
450static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
451{
452 struct rk_iommu *iommu = dev_id;
453 u32 status;
454 u32 int_status;
455 dma_addr_t iova;
456
457 int_status = rk_iommu_read(iommu, RK_MMU_INT_STATUS);
458 if (int_status == 0)
459 return IRQ_NONE;
460
461 iova = rk_iommu_read(iommu, RK_MMU_PAGE_FAULT_ADDR);
462
463 if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
464 int flags;
465
466 status = rk_iommu_read(iommu, RK_MMU_STATUS);
467 flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
468 IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
469
470 dev_err(iommu->dev, "Page fault at %pad of type %s\n",
471 &iova,
472 (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
473
474 log_iova(iommu, iova);
475
476 /*
477 * Report page fault to any installed handlers.
478 * Ignore the return code, though, since we always zap cache
479 * and clear the page fault anyway.
480 */
481 if (iommu->domain)
482 report_iommu_fault(iommu->domain, iommu->dev, iova,
483 flags);
484 else
485 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
486
487 rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
488 rk_iommu_command(iommu, RK_MMU_CMD_PAGE_FAULT_DONE);
489 }
490
491 if (int_status & RK_MMU_IRQ_BUS_ERROR)
492 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
493
494 if (int_status & ~RK_MMU_IRQ_MASK)
495 dev_err(iommu->dev, "unexpected int_status: %#08x\n",
496 int_status);
497
498 rk_iommu_write(iommu, RK_MMU_INT_CLEAR, int_status);
499
500 return IRQ_HANDLED;
501}
502
503static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
504 dma_addr_t iova)
505{
506 struct rk_iommu_domain *rk_domain = domain->priv;
507 unsigned long flags;
508 phys_addr_t pt_phys, phys = 0;
509 u32 dte, pte;
510 u32 *page_table;
511
512 spin_lock_irqsave(&rk_domain->dt_lock, flags);
513
514 dte = rk_domain->dt[rk_iova_dte_index(iova)];
515 if (!rk_dte_is_pt_valid(dte))
516 goto out;
517
518 pt_phys = rk_dte_pt_address(dte);
519 page_table = (u32 *)phys_to_virt(pt_phys);
520 pte = page_table[rk_iova_pte_index(iova)];
521 if (!rk_pte_is_page_valid(pte))
522 goto out;
523
524 phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
525out:
526 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
527
528 return phys;
529}
530
531static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
532 dma_addr_t iova, size_t size)
533{
534 struct list_head *pos;
535 unsigned long flags;
536
537 /* shootdown these iova from all iommus using this domain */
538 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
539 list_for_each(pos, &rk_domain->iommus) {
540 struct rk_iommu *iommu;
541 iommu = list_entry(pos, struct rk_iommu, node);
542 rk_iommu_zap_lines(iommu, iova, size);
543 }
544 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
545}
546
547static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
548 dma_addr_t iova)
549{
550 u32 *page_table, *dte_addr;
551 u32 dte;
552 phys_addr_t pt_phys;
553
554 assert_spin_locked(&rk_domain->dt_lock);
555
556 dte_addr = &rk_domain->dt[rk_iova_dte_index(iova)];
557 dte = *dte_addr;
558 if (rk_dte_is_pt_valid(dte))
559 goto done;
560
561 page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
562 if (!page_table)
563 return ERR_PTR(-ENOMEM);
564
565 dte = rk_mk_dte(page_table);
566 *dte_addr = dte;
567
568 rk_table_flush(page_table, NUM_PT_ENTRIES);
569 rk_table_flush(dte_addr, 1);
570
571 /*
572 * Zap the first iova of newly allocated page table so iommu evicts
573 * old cached value of new dte from the iotlb.
574 */
575 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
576
577done:
578 pt_phys = rk_dte_pt_address(dte);
579 return (u32 *)phys_to_virt(pt_phys);
580}
581
582static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
583 u32 *pte_addr, dma_addr_t iova, size_t size)
584{
585 unsigned int pte_count;
586 unsigned int pte_total = size / SPAGE_SIZE;
587
588 assert_spin_locked(&rk_domain->dt_lock);
589
590 for (pte_count = 0; pte_count < pte_total; pte_count++) {
591 u32 pte = pte_addr[pte_count];
592 if (!rk_pte_is_page_valid(pte))
593 break;
594
595 pte_addr[pte_count] = rk_mk_pte_invalid(pte);
596 }
597
598 rk_table_flush(pte_addr, pte_count);
599
600 return pte_count * SPAGE_SIZE;
601}
602
603static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
604 dma_addr_t iova, phys_addr_t paddr, size_t size,
605 int prot)
606{
607 unsigned int pte_count;
608 unsigned int pte_total = size / SPAGE_SIZE;
609 phys_addr_t page_phys;
610
611 assert_spin_locked(&rk_domain->dt_lock);
612
613 for (pte_count = 0; pte_count < pte_total; pte_count++) {
614 u32 pte = pte_addr[pte_count];
615
616 if (rk_pte_is_page_valid(pte))
617 goto unwind;
618
619 pte_addr[pte_count] = rk_mk_pte(paddr, prot);
620
621 paddr += SPAGE_SIZE;
622 }
623
624 rk_table_flush(pte_addr, pte_count);
625
626 return 0;
627unwind:
628 /* Unmap the range of iovas that we just mapped */
629 rk_iommu_unmap_iova(rk_domain, pte_addr, iova, pte_count * SPAGE_SIZE);
630
631 iova += pte_count * SPAGE_SIZE;
632 page_phys = rk_pte_page_address(pte_addr[pte_count]);
633 pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
634 &iova, &page_phys, &paddr, prot);
635
636 return -EADDRINUSE;
637}
638
639static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
640 phys_addr_t paddr, size_t size, int prot)
641{
642 struct rk_iommu_domain *rk_domain = domain->priv;
643 unsigned long flags;
644 dma_addr_t iova = (dma_addr_t)_iova;
645 u32 *page_table, *pte_addr;
646 int ret;
647
648 spin_lock_irqsave(&rk_domain->dt_lock, flags);
649
650 /*
651 * pgsize_bitmap specifies iova sizes that fit in one page table
652 * (1024 4-KiB pages = 4 MiB).
653 * So, size will always be 4096 <= size <= 4194304.
654 * Since iommu_map() guarantees that both iova and size will be
655 * aligned, we will always only be mapping from a single dte here.
656 */
657 page_table = rk_dte_get_page_table(rk_domain, iova);
658 if (IS_ERR(page_table)) {
659 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
660 return PTR_ERR(page_table);
661 }
662
663 pte_addr = &page_table[rk_iova_pte_index(iova)];
664 ret = rk_iommu_map_iova(rk_domain, pte_addr, iova, paddr, size, prot);
665 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
666
667 return ret;
668}
669
670static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
671 size_t size)
672{
673 struct rk_iommu_domain *rk_domain = domain->priv;
674 unsigned long flags;
675 dma_addr_t iova = (dma_addr_t)_iova;
676 phys_addr_t pt_phys;
677 u32 dte;
678 u32 *pte_addr;
679 size_t unmap_size;
680
681 spin_lock_irqsave(&rk_domain->dt_lock, flags);
682
683 /*
684 * pgsize_bitmap specifies iova sizes that fit in one page table
685 * (1024 4-KiB pages = 4 MiB).
686 * So, size will always be 4096 <= size <= 4194304.
687 * Since iommu_unmap() guarantees that both iova and size will be
688 * aligned, we will always only be unmapping from a single dte here.
689 */
690 dte = rk_domain->dt[rk_iova_dte_index(iova)];
691 /* Just return 0 if iova is unmapped */
692 if (!rk_dte_is_pt_valid(dte)) {
693 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
694 return 0;
695 }
696
697 pt_phys = rk_dte_pt_address(dte);
698 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
699 unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, iova, size);
700
701 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
702
703 /* Shootdown iotlb entries for iova range that was just unmapped */
704 rk_iommu_zap_iova(rk_domain, iova, unmap_size);
705
706 return unmap_size;
707}
708
709static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
710{
711 struct iommu_group *group;
712 struct device *iommu_dev;
713 struct rk_iommu *rk_iommu;
714
715 group = iommu_group_get(dev);
716 if (!group)
717 return NULL;
718 iommu_dev = iommu_group_get_iommudata(group);
719 rk_iommu = dev_get_drvdata(iommu_dev);
720 iommu_group_put(group);
721
722 return rk_iommu;
723}
724
725static int rk_iommu_attach_device(struct iommu_domain *domain,
726 struct device *dev)
727{
728 struct rk_iommu *iommu;
729 struct rk_iommu_domain *rk_domain = domain->priv;
730 unsigned long flags;
731 int ret;
732 phys_addr_t dte_addr;
733
734 /*
735 * Allow 'virtual devices' (e.g., drm) to attach to domain.
736 * Such a device does not belong to an iommu group.
737 */
738 iommu = rk_iommu_from_dev(dev);
739 if (!iommu)
740 return 0;
741
742 ret = rk_iommu_enable_stall(iommu);
743 if (ret)
744 return ret;
745
746 ret = rk_iommu_force_reset(iommu);
747 if (ret)
748 return ret;
749
750 iommu->domain = domain;
751
752 ret = devm_request_irq(dev, iommu->irq, rk_iommu_irq,
753 IRQF_SHARED, dev_name(dev), iommu);
754 if (ret)
755 return ret;
756
757 dte_addr = virt_to_phys(rk_domain->dt);
758 rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr);
759 rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
760 rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
761
762 ret = rk_iommu_enable_paging(iommu);
763 if (ret)
764 return ret;
765
766 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
767 list_add_tail(&iommu->node, &rk_domain->iommus);
768 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
769
770 dev_info(dev, "Attached to iommu domain\n");
771
772 rk_iommu_disable_stall(iommu);
773
774 return 0;
775}
776
777static void rk_iommu_detach_device(struct iommu_domain *domain,
778 struct device *dev)
779{
780 struct rk_iommu *iommu;
781 struct rk_iommu_domain *rk_domain = domain->priv;
782 unsigned long flags;
783
784 /* Allow 'virtual devices' (eg drm) to detach from domain */
785 iommu = rk_iommu_from_dev(dev);
786 if (!iommu)
787 return;
788
789 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
790 list_del_init(&iommu->node);
791 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
792
793 /* Ignore error while disabling, just keep going */
794 rk_iommu_enable_stall(iommu);
795 rk_iommu_disable_paging(iommu);
796 rk_iommu_write(iommu, RK_MMU_INT_MASK, 0);
797 rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0);
798 rk_iommu_disable_stall(iommu);
799
800 devm_free_irq(dev, iommu->irq, iommu);
801
802 iommu->domain = NULL;
803
804 dev_info(dev, "Detached from iommu domain\n");
805}
806
807static int rk_iommu_domain_init(struct iommu_domain *domain)
808{
809 struct rk_iommu_domain *rk_domain;
810
811 rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
812 if (!rk_domain)
813 return -ENOMEM;
814
815 /*
816 * rk32xx iommus use a 2 level pagetable.
817 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
818 * Allocate one 4 KiB page for each table.
819 */
820 rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
821 if (!rk_domain->dt)
822 goto err_dt;
823
824 rk_table_flush(rk_domain->dt, NUM_DT_ENTRIES);
825
826 spin_lock_init(&rk_domain->iommus_lock);
827 spin_lock_init(&rk_domain->dt_lock);
828 INIT_LIST_HEAD(&rk_domain->iommus);
829
830 domain->priv = rk_domain;
831
832 return 0;
833err_dt:
834 kfree(rk_domain);
835 return -ENOMEM;
836}
837
838static void rk_iommu_domain_destroy(struct iommu_domain *domain)
839{
840 struct rk_iommu_domain *rk_domain = domain->priv;
841 int i;
842
843 WARN_ON(!list_empty(&rk_domain->iommus));
844
845 for (i = 0; i < NUM_DT_ENTRIES; i++) {
846 u32 dte = rk_domain->dt[i];
847 if (rk_dte_is_pt_valid(dte)) {
848 phys_addr_t pt_phys = rk_dte_pt_address(dte);
849 u32 *page_table = phys_to_virt(pt_phys);
850 free_page((unsigned long)page_table);
851 }
852 }
853
854 free_page((unsigned long)rk_domain->dt);
855 kfree(domain->priv);
856 domain->priv = NULL;
857}
858
859static bool rk_iommu_is_dev_iommu_master(struct device *dev)
860{
861 struct device_node *np = dev->of_node;
862 int ret;
863
864 /*
865 * An iommu master has an iommus property containing a list of phandles
866 * to iommu nodes, each with an #iommu-cells property with value 0.
867 */
868 ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells");
869 return (ret > 0);
870}
871
872static int rk_iommu_group_set_iommudata(struct iommu_group *group,
873 struct device *dev)
874{
875 struct device_node *np = dev->of_node;
876 struct platform_device *pd;
877 int ret;
878 struct of_phandle_args args;
879
880 /*
881 * An iommu master has an iommus property containing a list of phandles
882 * to iommu nodes, each with an #iommu-cells property with value 0.
883 */
884 ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0,
885 &args);
886 if (ret) {
887 dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n",
888 np->full_name, ret);
889 return ret;
890 }
891 if (args.args_count != 0) {
892 dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n",
893 args.np->full_name, args.args_count);
894 return -EINVAL;
895 }
896
897 pd = of_find_device_by_node(args.np);
898 of_node_put(args.np);
899 if (!pd) {
900 dev_err(dev, "iommu %s not found\n", args.np->full_name);
901 return -EPROBE_DEFER;
902 }
903
904 /* TODO(djkurtz): handle multiple slave iommus for a single master */
905 iommu_group_set_iommudata(group, &pd->dev, NULL);
906
907 return 0;
908}
909
910static int rk_iommu_add_device(struct device *dev)
911{
912 struct iommu_group *group;
913 int ret;
914
915 if (!rk_iommu_is_dev_iommu_master(dev))
916 return -ENODEV;
917
918 group = iommu_group_get(dev);
919 if (!group) {
920 group = iommu_group_alloc();
921 if (IS_ERR(group)) {
922 dev_err(dev, "Failed to allocate IOMMU group\n");
923 return PTR_ERR(group);
924 }
925 }
926
927 ret = iommu_group_add_device(group, dev);
928 if (ret)
929 goto err_put_group;
930
931 ret = rk_iommu_group_set_iommudata(group, dev);
932 if (ret)
933 goto err_remove_device;
934
935 iommu_group_put(group);
936
937 return 0;
938
939err_remove_device:
940 iommu_group_remove_device(dev);
941err_put_group:
942 iommu_group_put(group);
943 return ret;
944}
945
946static void rk_iommu_remove_device(struct device *dev)
947{
948 if (!rk_iommu_is_dev_iommu_master(dev))
949 return;
950
951 iommu_group_remove_device(dev);
952}
953
954static const struct iommu_ops rk_iommu_ops = {
955 .domain_init = rk_iommu_domain_init,
956 .domain_destroy = rk_iommu_domain_destroy,
957 .attach_dev = rk_iommu_attach_device,
958 .detach_dev = rk_iommu_detach_device,
959 .map = rk_iommu_map,
960 .unmap = rk_iommu_unmap,
961 .add_device = rk_iommu_add_device,
962 .remove_device = rk_iommu_remove_device,
963 .iova_to_phys = rk_iommu_iova_to_phys,
964 .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
965};
966
967static int rk_iommu_probe(struct platform_device *pdev)
968{
969 struct device *dev = &pdev->dev;
970 struct rk_iommu *iommu;
971 struct resource *res;
972
973 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
974 if (!iommu)
975 return -ENOMEM;
976
977 platform_set_drvdata(pdev, iommu);
978 iommu->dev = dev;
979
980 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
981 iommu->base = devm_ioremap_resource(&pdev->dev, res);
982 if (IS_ERR(iommu->base))
983 return PTR_ERR(iommu->base);
984
985 iommu->irq = platform_get_irq(pdev, 0);
986 if (iommu->irq < 0) {
987 dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq);
988 return -ENXIO;
989 }
990
991 return 0;
992}
993
994static int rk_iommu_remove(struct platform_device *pdev)
995{
996 return 0;
997}
998
999#ifdef CONFIG_OF
1000static const struct of_device_id rk_iommu_dt_ids[] = {
1001 { .compatible = "rockchip,iommu" },
1002 { /* sentinel */ }
1003};
1004MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
1005#endif
1006
1007static struct platform_driver rk_iommu_driver = {
1008 .probe = rk_iommu_probe,
1009 .remove = rk_iommu_remove,
1010 .driver = {
1011 .name = "rk_iommu",
1012 .owner = THIS_MODULE,
1013 .of_match_table = of_match_ptr(rk_iommu_dt_ids),
1014 },
1015};
1016
1017static int __init rk_iommu_init(void)
1018{
1019 int ret;
1020
1021 ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1022 if (ret)
1023 return ret;
1024
1025 return platform_driver_register(&rk_iommu_driver);
1026}
1027static void __exit rk_iommu_exit(void)
1028{
1029 platform_driver_unregister(&rk_iommu_driver);
1030}
1031
1032subsys_initcall(rk_iommu_init);
1033module_exit(rk_iommu_exit);
1034
1035MODULE_DESCRIPTION("IOMMU API for Rockchip");
1036MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
1037MODULE_ALIAS("platform:rockchip-iommu");
1038MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 932ed9be9ff3..b10aaeda2bb4 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -2190,7 +2190,7 @@ static int smiapp_set_selection(struct v4l2_subdev *subdev,
2190 ret = smiapp_set_compose(subdev, fh, sel); 2190 ret = smiapp_set_compose(subdev, fh, sel);
2191 break; 2191 break;
2192 default: 2192 default:
2193 BUG(); 2193 ret = -EINVAL;
2194 } 2194 }
2195 2195
2196 mutex_unlock(&sensor->mutex); 2196 mutex_unlock(&sensor->mutex);
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 331eddac7222..3bd386c371f7 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -1078,7 +1078,7 @@ static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1078 for (line = 0; line < lines; line++) { 1078 for (line = 0; line < lines; line++) {
1079 while (offset && offset >= sg_dma_len(sg)) { 1079 while (offset && offset >= sg_dma_len(sg)) {
1080 offset -= sg_dma_len(sg); 1080 offset -= sg_dma_len(sg);
1081 sg++; 1081 sg = sg_next(sg);
1082 } 1082 }
1083 1083
1084 if (lpi && line > 0 && !(line % lpi)) 1084 if (lpi && line > 0 && !(line % lpi))
@@ -1101,14 +1101,14 @@ static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1101 *(rp++) = cpu_to_le32(0); /* bits 63-32 */ 1101 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1102 todo -= (sg_dma_len(sg)-offset); 1102 todo -= (sg_dma_len(sg)-offset);
1103 offset = 0; 1103 offset = 0;
1104 sg++; 1104 sg = sg_next(sg);
1105 while (todo > sg_dma_len(sg)) { 1105 while (todo > sg_dma_len(sg)) {
1106 *(rp++) = cpu_to_le32(RISC_WRITE| 1106 *(rp++) = cpu_to_le32(RISC_WRITE|
1107 sg_dma_len(sg)); 1107 sg_dma_len(sg));
1108 *(rp++) = cpu_to_le32(sg_dma_address(sg)); 1108 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1109 *(rp++) = cpu_to_le32(0); /* bits 63-32 */ 1109 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1110 todo -= sg_dma_len(sg); 1110 todo -= sg_dma_len(sg);
1111 sg++; 1111 sg = sg_next(sg);
1112 } 1112 }
1113 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo); 1113 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1114 *(rp++) = cpu_to_le32(sg_dma_address(sg)); 1114 *(rp++) = cpu_to_le32(sg_dma_address(sg));
diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
index 172583d736fe..8cbe6b49f4c2 100644
--- a/drivers/media/pci/solo6x10/solo6x10-core.c
+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
@@ -105,11 +105,8 @@ static irqreturn_t solo_isr(int irq, void *data)
105 if (!status) 105 if (!status)
106 return IRQ_NONE; 106 return IRQ_NONE;
107 107
108 if (status & ~solo_dev->irq_mask) { 108 /* Acknowledge all interrupts immediately */
109 solo_reg_write(solo_dev, SOLO_IRQ_STAT, 109 solo_reg_write(solo_dev, SOLO_IRQ_STAT, status);
110 status & ~solo_dev->irq_mask);
111 status &= solo_dev->irq_mask;
112 }
113 110
114 if (status & SOLO_IRQ_PCI_ERR) 111 if (status & SOLO_IRQ_PCI_ERR)
115 solo_p2m_error_isr(solo_dev); 112 solo_p2m_error_isr(solo_dev);
@@ -132,9 +129,6 @@ static irqreturn_t solo_isr(int irq, void *data)
132 if (status & SOLO_IRQ_G723) 129 if (status & SOLO_IRQ_G723)
133 solo_g723_isr(solo_dev); 130 solo_g723_isr(solo_dev);
134 131
135 /* Clear all interrupts handled */
136 solo_reg_write(solo_dev, SOLO_IRQ_STAT, status);
137
138 return IRQ_HANDLED; 132 return IRQ_HANDLED;
139} 133}
140 134
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index f1f098e22f7e..d16bc67af732 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -259,8 +259,8 @@ again:
259 case 32: 259 case 32:
260 if ((scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) { 260 if ((scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) {
261 protocol = RC_TYPE_RC6_MCE; 261 protocol = RC_TYPE_RC6_MCE;
262 scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
263 toggle = !!(scancode & RC6_6A_MCE_TOGGLE_MASK); 262 toggle = !!(scancode & RC6_6A_MCE_TOGGLE_MASK);
263 scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
264 } else { 264 } else {
265 protocol = RC_BIT_RC6_6A_32; 265 protocol = RC_BIT_RC6_6A_32;
266 toggle = 0; 266 toggle = 0;
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index ccc00099b261..1c0dbf428a3a 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -632,7 +632,7 @@ static void s2255_fillbuff(struct s2255_vc *vc,
632 break; 632 break;
633 case V4L2_PIX_FMT_JPEG: 633 case V4L2_PIX_FMT_JPEG:
634 case V4L2_PIX_FMT_MJPEG: 634 case V4L2_PIX_FMT_MJPEG:
635 buf->vb.v4l2_buf.length = jpgsize; 635 vb2_set_plane_payload(&buf->vb, 0, jpgsize);
636 memcpy(vbuf, tmpbuf, jpgsize); 636 memcpy(vbuf, tmpbuf, jpgsize);
637 break; 637 break;
638 case V4L2_PIX_FMT_YUV422P: 638 case V4L2_PIX_FMT_YUV422P:
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index c13d83e15ace..45f09a66e6c9 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -225,7 +225,12 @@ static int bond_changelink(struct net_device *bond_dev,
225 225
226 bond_option_arp_ip_targets_clear(bond); 226 bond_option_arp_ip_targets_clear(bond);
227 nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) { 227 nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
228 __be32 target = nla_get_be32(attr); 228 __be32 target;
229
230 if (nla_len(attr) < sizeof(target))
231 return -EINVAL;
232
233 target = nla_get_be32(attr);
229 234
230 bond_opt_initval(&newval, (__force u64)target); 235 bond_opt_initval(&newval, (__force u64)target);
231 err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS, 236 err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 8520d5529df8..279873cb6e3a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2442,9 +2442,13 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2442 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | 2442 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2443 SUPPORTED_10000baseKX4_Full; 2443 SUPPORTED_10000baseKX4_Full;
2444 else if (type == FW_PORT_TYPE_FIBER_XFI || 2444 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2445 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) 2445 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) {
2446 v |= SUPPORTED_FIBRE; 2446 v |= SUPPORTED_FIBRE;
2447 else if (type == FW_PORT_TYPE_BP40_BA) 2447 if (caps & FW_PORT_CAP_SPEED_1G)
2448 v |= SUPPORTED_1000baseT_Full;
2449 if (caps & FW_PORT_CAP_SPEED_10G)
2450 v |= SUPPORTED_10000baseT_Full;
2451 } else if (type == FW_PORT_TYPE_BP40_BA)
2448 v |= SUPPORTED_40000baseSR4_Full; 2452 v |= SUPPORTED_40000baseSR4_Full;
2449 2453
2450 if (caps & FW_PORT_CAP_ANEG) 2454 if (caps & FW_PORT_CAP_ANEG)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 60e9c2cd051e..b5db6b3f939f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -917,21 +917,13 @@ static int sh_eth_reset(struct net_device *ndev)
917 return ret; 917 return ret;
918} 918}
919 919
920#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
921static void sh_eth_set_receive_align(struct sk_buff *skb) 920static void sh_eth_set_receive_align(struct sk_buff *skb)
922{ 921{
923 int reserve; 922 uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
924 923
925 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
926 if (reserve) 924 if (reserve)
927 skb_reserve(skb, reserve); 925 skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
928} 926}
929#else
930static void sh_eth_set_receive_align(struct sk_buff *skb)
931{
932 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
933}
934#endif
935 927
936 928
937/* CPU <-> EDMAC endian convert */ 929/* CPU <-> EDMAC endian convert */
@@ -1119,6 +1111,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
1119 struct sh_eth_txdesc *txdesc = NULL; 1111 struct sh_eth_txdesc *txdesc = NULL;
1120 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; 1112 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1121 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; 1113 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1114 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1122 1115
1123 mdp->cur_rx = 0; 1116 mdp->cur_rx = 0;
1124 mdp->cur_tx = 0; 1117 mdp->cur_tx = 0;
@@ -1131,21 +1124,21 @@ static void sh_eth_ring_format(struct net_device *ndev)
1131 for (i = 0; i < mdp->num_rx_ring; i++) { 1124 for (i = 0; i < mdp->num_rx_ring; i++) {
1132 /* skb */ 1125 /* skb */
1133 mdp->rx_skbuff[i] = NULL; 1126 mdp->rx_skbuff[i] = NULL;
1134 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 1127 skb = netdev_alloc_skb(ndev, skbuff_size);
1135 mdp->rx_skbuff[i] = skb; 1128 mdp->rx_skbuff[i] = skb;
1136 if (skb == NULL) 1129 if (skb == NULL)
1137 break; 1130 break;
1138 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1139 DMA_FROM_DEVICE);
1140 sh_eth_set_receive_align(skb); 1131 sh_eth_set_receive_align(skb);
1141 1132
1142 /* RX descriptor */ 1133 /* RX descriptor */
1143 rxdesc = &mdp->rx_ring[i]; 1134 rxdesc = &mdp->rx_ring[i];
1135 /* The size of the buffer is a multiple of 16 bytes. */
1136 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1137 dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
1138 DMA_FROM_DEVICE);
1144 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 1139 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1145 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1140 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1146 1141
1147 /* The size of the buffer is 16 byte boundary. */
1148 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1149 /* Rx descriptor address set */ 1142 /* Rx descriptor address set */
1150 if (i == 0) { 1143 if (i == 0) {
1151 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); 1144 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
@@ -1397,6 +1390,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1397 struct sk_buff *skb; 1390 struct sk_buff *skb;
1398 u16 pkt_len = 0; 1391 u16 pkt_len = 0;
1399 u32 desc_status; 1392 u32 desc_status;
1393 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1400 1394
1401 rxdesc = &mdp->rx_ring[entry]; 1395 rxdesc = &mdp->rx_ring[entry];
1402 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { 1396 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
@@ -1448,7 +1442,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1448 if (mdp->cd->rpadir) 1442 if (mdp->cd->rpadir)
1449 skb_reserve(skb, NET_IP_ALIGN); 1443 skb_reserve(skb, NET_IP_ALIGN);
1450 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, 1444 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
1451 mdp->rx_buf_sz, 1445 ALIGN(mdp->rx_buf_sz, 16),
1452 DMA_FROM_DEVICE); 1446 DMA_FROM_DEVICE);
1453 skb_put(skb, pkt_len); 1447 skb_put(skb, pkt_len);
1454 skb->protocol = eth_type_trans(skb, ndev); 1448 skb->protocol = eth_type_trans(skb, ndev);
@@ -1468,13 +1462,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1468 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1462 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1469 1463
1470 if (mdp->rx_skbuff[entry] == NULL) { 1464 if (mdp->rx_skbuff[entry] == NULL) {
1471 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 1465 skb = netdev_alloc_skb(ndev, skbuff_size);
1472 mdp->rx_skbuff[entry] = skb; 1466 mdp->rx_skbuff[entry] = skb;
1473 if (skb == NULL) 1467 if (skb == NULL)
1474 break; /* Better luck next round. */ 1468 break; /* Better luck next round. */
1475 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1476 DMA_FROM_DEVICE);
1477 sh_eth_set_receive_align(skb); 1469 sh_eth_set_receive_align(skb);
1470 dma_map_single(&ndev->dev, skb->data,
1471 rxdesc->buffer_length, DMA_FROM_DEVICE);
1478 1472
1479 skb_checksum_none_assert(skb); 1473 skb_checksum_none_assert(skb);
1480 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 1474 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
@@ -2042,6 +2036,8 @@ static int sh_eth_open(struct net_device *ndev)
2042 if (ret) 2036 if (ret)
2043 goto out_free_irq; 2037 goto out_free_irq;
2044 2038
2039 mdp->is_opened = 1;
2040
2045 return ret; 2041 return ret;
2046 2042
2047out_free_irq: 2043out_free_irq:
@@ -2131,6 +2127,36 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2131 return NETDEV_TX_OK; 2127 return NETDEV_TX_OK;
2132} 2128}
2133 2129
2130static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2131{
2132 struct sh_eth_private *mdp = netdev_priv(ndev);
2133
2134 if (sh_eth_is_rz_fast_ether(mdp))
2135 return &ndev->stats;
2136
2137 if (!mdp->is_opened)
2138 return &ndev->stats;
2139
2140 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
2141 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
2142 ndev->stats.collisions += sh_eth_read(ndev, CDCR);
2143 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
2144 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
2145 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
2146
2147 if (sh_eth_is_gether(mdp)) {
2148 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
2149 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
2150 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
2151 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
2152 } else {
2153 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
2154 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
2155 }
2156
2157 return &ndev->stats;
2158}
2159
2134/* device close function */ 2160/* device close function */
2135static int sh_eth_close(struct net_device *ndev) 2161static int sh_eth_close(struct net_device *ndev)
2136{ 2162{
@@ -2145,6 +2171,7 @@ static int sh_eth_close(struct net_device *ndev)
2145 sh_eth_write(ndev, 0, EDTRR); 2171 sh_eth_write(ndev, 0, EDTRR);
2146 sh_eth_write(ndev, 0, EDRRR); 2172 sh_eth_write(ndev, 0, EDRRR);
2147 2173
2174 sh_eth_get_stats(ndev);
2148 /* PHY Disconnect */ 2175 /* PHY Disconnect */
2149 if (mdp->phydev) { 2176 if (mdp->phydev) {
2150 phy_stop(mdp->phydev); 2177 phy_stop(mdp->phydev);
@@ -2163,36 +2190,9 @@ static int sh_eth_close(struct net_device *ndev)
2163 2190
2164 pm_runtime_put_sync(&mdp->pdev->dev); 2191 pm_runtime_put_sync(&mdp->pdev->dev);
2165 2192
2166 return 0; 2193 mdp->is_opened = 0;
2167}
2168
2169static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2170{
2171 struct sh_eth_private *mdp = netdev_priv(ndev);
2172
2173 if (sh_eth_is_rz_fast_ether(mdp))
2174 return &ndev->stats;
2175 2194
2176 pm_runtime_get_sync(&mdp->pdev->dev); 2195 return 0;
2177
2178 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
2179 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
2180 ndev->stats.collisions += sh_eth_read(ndev, CDCR);
2181 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
2182 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
2183 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
2184 if (sh_eth_is_gether(mdp)) {
2185 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
2186 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
2187 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
2188 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
2189 } else {
2190 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
2191 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
2192 }
2193 pm_runtime_put_sync(&mdp->pdev->dev);
2194
2195 return &ndev->stats;
2196} 2196}
2197 2197
2198/* ioctl to device function */ 2198/* ioctl to device function */
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index b37c427144ee..22301bf9c21d 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -162,9 +162,9 @@ enum {
162 162
163/* Driver's parameters */ 163/* Driver's parameters */
164#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 164#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
165#define SH4_SKB_RX_ALIGN 32 165#define SH_ETH_RX_ALIGN 32
166#else 166#else
167#define SH2_SH3_SKB_RX_ALIGN 2 167#define SH_ETH_RX_ALIGN 2
168#endif 168#endif
169 169
170/* Register's bits 170/* Register's bits
@@ -522,6 +522,7 @@ struct sh_eth_private {
522 522
523 unsigned no_ether_link:1; 523 unsigned no_ether_link:1;
524 unsigned ether_link_active_low:1; 524 unsigned ether_link_active_low:1;
525 unsigned is_opened:1;
525}; 526};
526 527
527static inline void sh_eth_soft_swap(char *src, int len) 528static inline void sh_eth_soft_swap(char *src, int len)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 5b0da3986216..58a1a0a423d4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -265,6 +265,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
265 265
266 plat_dat = dev_get_platdata(&pdev->dev); 266 plat_dat = dev_get_platdata(&pdev->dev);
267 267
268 if (!plat_dat)
269 plat_dat = devm_kzalloc(&pdev->dev,
270 sizeof(struct plat_stmmacenet_data),
271 GFP_KERNEL);
272 if (!plat_dat) {
273 pr_err("%s: ERROR: no memory", __func__);
274 return -ENOMEM;
275 }
276
268 /* Set default value for multicast hash bins */ 277 /* Set default value for multicast hash bins */
269 plat_dat->multicast_filter_bins = HASH_TABLE_SIZE; 278 plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
270 279
@@ -272,15 +281,6 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
272 plat_dat->unicast_filter_entries = 1; 281 plat_dat->unicast_filter_entries = 1;
273 282
274 if (pdev->dev.of_node) { 283 if (pdev->dev.of_node) {
275 if (!plat_dat)
276 plat_dat = devm_kzalloc(&pdev->dev,
277 sizeof(struct plat_stmmacenet_data),
278 GFP_KERNEL);
279 if (!plat_dat) {
280 pr_err("%s: ERROR: no memory", __func__);
281 return -ENOMEM;
282 }
283
284 ret = stmmac_probe_config_dt(pdev, plat_dat, &mac); 284 ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
285 if (ret) { 285 if (ret) {
286 pr_err("%s: main dt probe failed", __func__); 286 pr_err("%s: main dt probe failed", __func__);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index cca871346a0f..ece8d1804d13 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -496,9 +496,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
496 len = skb_frag_size(frag); 496 len = skb_frag_size(frag);
497 offset = frag->page_offset; 497 offset = frag->page_offset;
498 498
499 /* Data must not cross a page boundary. */
500 BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
501
502 /* Skip unused frames from start of page */ 499 /* Skip unused frames from start of page */
503 page += offset >> PAGE_SHIFT; 500 page += offset >> PAGE_SHIFT;
504 offset &= ~PAGE_MASK; 501 offset &= ~PAGE_MASK;
@@ -506,8 +503,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
506 while (len > 0) { 503 while (len > 0) {
507 unsigned long bytes; 504 unsigned long bytes;
508 505
509 BUG_ON(offset >= PAGE_SIZE);
510
511 bytes = PAGE_SIZE - offset; 506 bytes = PAGE_SIZE - offset;
512 if (bytes > len) 507 if (bytes > len)
513 bytes = len; 508 bytes = len;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 30e97bcc4f88..d134710de96d 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -964,8 +964,6 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
964int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base, 964int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
965 phys_addr_t size, bool nomap) 965 phys_addr_t size, bool nomap)
966{ 966{
967 if (memblock_is_region_reserved(base, size))
968 return -EBUSY;
969 if (nomap) 967 if (nomap)
970 return memblock_remove(base, size); 968 return memblock_remove(base, size);
971 return memblock_reserve(base, size); 969 return memblock_reserve(base, size);
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 3d43874319be..19bb19c7db4a 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -276,6 +276,7 @@ struct tegra_pcie {
276 276
277 struct resource all; 277 struct resource all;
278 struct resource io; 278 struct resource io;
279 struct resource pio;
279 struct resource mem; 280 struct resource mem;
280 struct resource prefetch; 281 struct resource prefetch;
281 struct resource busn; 282 struct resource busn;
@@ -658,7 +659,6 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
658{ 659{
659 struct tegra_pcie *pcie = sys_to_pcie(sys); 660 struct tegra_pcie *pcie = sys_to_pcie(sys);
660 int err; 661 int err;
661 phys_addr_t io_start;
662 662
663 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem); 663 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
664 if (err < 0) 664 if (err < 0)
@@ -668,14 +668,12 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
668 if (err) 668 if (err)
669 return err; 669 return err;
670 670
671 io_start = pci_pio_to_address(pcie->io.start);
672
673 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); 671 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
674 pci_add_resource_offset(&sys->resources, &pcie->prefetch, 672 pci_add_resource_offset(&sys->resources, &pcie->prefetch,
675 sys->mem_offset); 673 sys->mem_offset);
676 pci_add_resource(&sys->resources, &pcie->busn); 674 pci_add_resource(&sys->resources, &pcie->busn);
677 675
678 pci_ioremap_io(nr * SZ_64K, io_start); 676 pci_ioremap_io(pcie->pio.start, pcie->io.start);
679 677
680 return 1; 678 return 1;
681} 679}
@@ -786,7 +784,6 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
786static void tegra_pcie_setup_translations(struct tegra_pcie *pcie) 784static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
787{ 785{
788 u32 fpci_bar, size, axi_address; 786 u32 fpci_bar, size, axi_address;
789 phys_addr_t io_start = pci_pio_to_address(pcie->io.start);
790 787
791 /* Bar 0: type 1 extended configuration space */ 788 /* Bar 0: type 1 extended configuration space */
792 fpci_bar = 0xfe100000; 789 fpci_bar = 0xfe100000;
@@ -799,7 +796,7 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
799 /* Bar 1: downstream IO bar */ 796 /* Bar 1: downstream IO bar */
800 fpci_bar = 0xfdfc0000; 797 fpci_bar = 0xfdfc0000;
801 size = resource_size(&pcie->io); 798 size = resource_size(&pcie->io);
802 axi_address = io_start; 799 axi_address = pcie->io.start;
803 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START); 800 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
804 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ); 801 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
805 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1); 802 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
@@ -1690,8 +1687,23 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1690 1687
1691 switch (res.flags & IORESOURCE_TYPE_BITS) { 1688 switch (res.flags & IORESOURCE_TYPE_BITS) {
1692 case IORESOURCE_IO: 1689 case IORESOURCE_IO:
1693 memcpy(&pcie->io, &res, sizeof(res)); 1690 memcpy(&pcie->pio, &res, sizeof(res));
1694 pcie->io.name = np->full_name; 1691 pcie->pio.name = np->full_name;
1692
1693 /*
1694 * The Tegra PCIe host bridge uses this to program the
1695 * mapping of the I/O space to the physical address,
1696 * so we override the .start and .end fields here that
1697 * of_pci_range_to_resource() converted to I/O space.
1698 * We also set the IORESOURCE_MEM type to clarify that
1699 * the resource is in the physical memory space.
1700 */
1701 pcie->io.start = range.cpu_addr;
1702 pcie->io.end = range.cpu_addr + range.size - 1;
1703 pcie->io.flags = IORESOURCE_MEM;
1704 pcie->io.name = "I/O";
1705
1706 memcpy(&res, &pcie->io, sizeof(res));
1695 break; 1707 break;
1696 1708
1697 case IORESOURCE_MEM: 1709 case IORESOURCE_MEM:
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 8532c3e2aea7..1626dc66e763 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -161,7 +161,7 @@ static const struct s3c2410_wdt_variant drv_data_exynos5420 = {
161static const struct s3c2410_wdt_variant drv_data_exynos7 = { 161static const struct s3c2410_wdt_variant drv_data_exynos7 = {
162 .disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET, 162 .disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET,
163 .mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET, 163 .mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET,
164 .mask_bit = 0, 164 .mask_bit = 23,
165 .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET, 165 .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
166 .rst_stat_bit = 23, /* A57 WDTRESET */ 166 .rst_stat_bit = 23, /* A57 WDTRESET */
167 .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT, 167 .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 6df8d3d885e5..b8b92c2f9683 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -736,7 +736,12 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
736 } 736 }
737 737
738 alias = d_find_alias(inode); 738 alias = d_find_alias(inode);
739 if (alias && !vfat_d_anon_disconn(alias)) { 739 /*
740 * Checking "alias->d_parent == dentry->d_parent" to make sure
741 * FS is not corrupted (especially double linked dir).
742 */
743 if (alias && alias->d_parent == dentry->d_parent &&
744 !vfat_d_anon_disconn(alias)) {
740 /* 745 /*
741 * This inode has non anonymous-DCACHE_DISCONNECTED 746 * This inode has non anonymous-DCACHE_DISCONNECTED
742 * dentry. This means, the user did ->lookup() by an 747 * dentry. This means, the user did ->lookup() by an
@@ -755,12 +760,9 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
755 760
756out: 761out:
757 mutex_unlock(&MSDOS_SB(sb)->s_lock); 762 mutex_unlock(&MSDOS_SB(sb)->s_lock);
758 dentry->d_time = dentry->d_parent->d_inode->i_version; 763 if (!inode)
759 dentry = d_splice_alias(inode, dentry); 764 dentry->d_time = dir->i_version;
760 if (dentry) 765 return d_splice_alias(inode, dentry);
761 dentry->d_time = dentry->d_parent->d_inode->i_version;
762 return dentry;
763
764error: 766error:
765 mutex_unlock(&MSDOS_SB(sb)->s_lock); 767 mutex_unlock(&MSDOS_SB(sb)->s_lock);
766 return ERR_PTR(err); 768 return ERR_PTR(err);
@@ -793,7 +795,6 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
793 inode->i_mtime = inode->i_atime = inode->i_ctime = ts; 795 inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
794 /* timestamp is already written, so mark_inode_dirty() is unneeded. */ 796 /* timestamp is already written, so mark_inode_dirty() is unneeded. */
795 797
796 dentry->d_time = dentry->d_parent->d_inode->i_version;
797 d_instantiate(dentry, inode); 798 d_instantiate(dentry, inode);
798out: 799out:
799 mutex_unlock(&MSDOS_SB(sb)->s_lock); 800 mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -824,6 +825,7 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
824 clear_nlink(inode); 825 clear_nlink(inode);
825 inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC; 826 inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
826 fat_detach(inode); 827 fat_detach(inode);
828 dentry->d_time = dir->i_version;
827out: 829out:
828 mutex_unlock(&MSDOS_SB(sb)->s_lock); 830 mutex_unlock(&MSDOS_SB(sb)->s_lock);
829 831
@@ -849,6 +851,7 @@ static int vfat_unlink(struct inode *dir, struct dentry *dentry)
849 clear_nlink(inode); 851 clear_nlink(inode);
850 inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC; 852 inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
851 fat_detach(inode); 853 fat_detach(inode);
854 dentry->d_time = dir->i_version;
852out: 855out:
853 mutex_unlock(&MSDOS_SB(sb)->s_lock); 856 mutex_unlock(&MSDOS_SB(sb)->s_lock);
854 857
@@ -889,7 +892,6 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
889 inode->i_mtime = inode->i_atime = inode->i_ctime = ts; 892 inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
890 /* timestamp is already written, so mark_inode_dirty() is unneeded. */ 893 /* timestamp is already written, so mark_inode_dirty() is unneeded. */
891 894
892 dentry->d_time = dentry->d_parent->d_inode->i_version;
893 d_instantiate(dentry, inode); 895 d_instantiate(dentry, inode);
894 896
895 mutex_unlock(&MSDOS_SB(sb)->s_lock); 897 mutex_unlock(&MSDOS_SB(sb)->s_lock);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index e4dc74713a43..1df94fabe4eb 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1853,13 +1853,12 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
1853 journal->j_chksum_driver = NULL; 1853 journal->j_chksum_driver = NULL;
1854 return 0; 1854 return 0;
1855 } 1855 }
1856 }
1857 1856
1858 /* Precompute checksum seed for all metadata */ 1857 /* Precompute checksum seed for all metadata */
1859 if (jbd2_journal_has_csum_v2or3(journal))
1860 journal->j_csum_seed = jbd2_chksum(journal, ~0, 1858 journal->j_csum_seed = jbd2_chksum(journal, ~0,
1861 sb->s_uuid, 1859 sb->s_uuid,
1862 sizeof(sb->s_uuid)); 1860 sizeof(sb->s_uuid));
1861 }
1863 } 1862 }
1864 1863
1865 /* If enabling v1 checksums, downgrade superblock */ 1864 /* If enabling v1 checksums, downgrade superblock */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index dd2c16e43333..b86329813ad3 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -137,6 +137,14 @@ struct drm_display_info {
137 u8 cea_rev; 137 u8 cea_rev;
138}; 138};
139 139
140/* data corresponds to displayid vend/prod/serial */
141struct drm_tile_group {
142 struct kref refcount;
143 struct drm_device *dev;
144 int id;
145 u8 group_data[8];
146};
147
140struct drm_framebuffer_funcs { 148struct drm_framebuffer_funcs {
141 /* note: use drm_framebuffer_remove() */ 149 /* note: use drm_framebuffer_remove() */
142 void (*destroy)(struct drm_framebuffer *framebuffer); 150 void (*destroy)(struct drm_framebuffer *framebuffer);
@@ -599,6 +607,15 @@ struct drm_encoder {
599 * @bad_edid_counter: track sinks that give us an EDID with invalid checksum 607 * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
600 * @debugfs_entry: debugfs directory for this connector 608 * @debugfs_entry: debugfs directory for this connector
601 * @state: current atomic state for this connector 609 * @state: current atomic state for this connector
610 * @has_tile: is this connector connected to a tiled monitor
611 * @tile_group: tile group for the connected monitor
612 * @tile_is_single_monitor: whether the tile is one monitor housing
613 * @num_h_tile: number of horizontal tiles in the tile group
614 * @num_v_tile: number of vertical tiles in the tile group
615 * @tile_h_loc: horizontal location of this tile
616 * @tile_v_loc: vertical location of this tile
617 * @tile_h_size: horizontal size of this tile.
618 * @tile_v_size: vertical size of this tile.
602 * 619 *
603 * Each connector may be connected to one or more CRTCs, or may be clonable by 620 * Each connector may be connected to one or more CRTCs, or may be clonable by
604 * another connector if they can share a CRTC. Each connector also has a specific 621 * another connector if they can share a CRTC. Each connector also has a specific
@@ -634,6 +651,8 @@ struct drm_connector {
634 651
635 struct drm_property_blob *path_blob_ptr; 652 struct drm_property_blob *path_blob_ptr;
636 653
654 struct drm_property_blob *tile_blob_ptr;
655
637 uint8_t polled; /* DRM_CONNECTOR_POLL_* */ 656 uint8_t polled; /* DRM_CONNECTOR_POLL_* */
638 657
639 /* requested DPMS state */ 658 /* requested DPMS state */
@@ -661,6 +680,15 @@ struct drm_connector {
661 struct dentry *debugfs_entry; 680 struct dentry *debugfs_entry;
662 681
663 struct drm_connector_state *state; 682 struct drm_connector_state *state;
683
684 /* DisplayID bits */
685 bool has_tile;
686 struct drm_tile_group *tile_group;
687 bool tile_is_single_monitor;
688
689 uint8_t num_h_tile, num_v_tile;
690 uint8_t tile_h_loc, tile_v_loc;
691 uint16_t tile_h_size, tile_v_size;
664}; 692};
665 693
666/** 694/**
@@ -978,6 +1006,7 @@ struct drm_mode_config {
978 struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */ 1006 struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
979 struct mutex idr_mutex; /* for IDR management */ 1007 struct mutex idr_mutex; /* for IDR management */
980 struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ 1008 struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
1009 struct idr tile_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
981 /* this is limited to one for now */ 1010 /* this is limited to one for now */
982 1011
983 struct mutex fb_lock; /* proctects global and per-file fb lists */ 1012 struct mutex fb_lock; /* proctects global and per-file fb lists */
@@ -1021,6 +1050,7 @@ struct drm_mode_config {
1021 struct drm_property *edid_property; 1050 struct drm_property *edid_property;
1022 struct drm_property *dpms_property; 1051 struct drm_property *dpms_property;
1023 struct drm_property *path_property; 1052 struct drm_property *path_property;
1053 struct drm_property *tile_property;
1024 struct drm_property *plane_type_property; 1054 struct drm_property *plane_type_property;
1025 struct drm_property *rotation_property; 1055 struct drm_property *rotation_property;
1026 1056
@@ -1190,6 +1220,7 @@ extern void drm_mode_config_cleanup(struct drm_device *dev);
1190 1220
1191extern int drm_mode_connector_set_path_property(struct drm_connector *connector, 1221extern int drm_mode_connector_set_path_property(struct drm_connector *connector,
1192 const char *path); 1222 const char *path);
1223int drm_mode_connector_set_tile_property(struct drm_connector *connector);
1193extern int drm_mode_connector_update_edid_property(struct drm_connector *connector, 1224extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
1194 const struct edid *edid); 1225 const struct edid *edid);
1195 1226
@@ -1326,6 +1357,13 @@ extern void drm_set_preferred_mode(struct drm_connector *connector,
1326extern int drm_edid_header_is_valid(const u8 *raw_edid); 1357extern int drm_edid_header_is_valid(const u8 *raw_edid);
1327extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid); 1358extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
1328extern bool drm_edid_is_valid(struct edid *edid); 1359extern bool drm_edid_is_valid(struct edid *edid);
1360
1361extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
1362 char topology[8]);
1363extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
1364 char topology[8]);
1365extern void drm_mode_put_tile_group(struct drm_device *dev,
1366 struct drm_tile_group *tg);
1329struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, 1367struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
1330 int hsize, int vsize, int fresh, 1368 int hsize, int vsize, int fresh,
1331 bool rb); 1369 bool rb);
diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
new file mode 100644
index 000000000000..623b4e98e748
--- /dev/null
+++ b/include/drm/drm_displayid.h
@@ -0,0 +1,76 @@
1/*
2 * Copyright © 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#ifndef DRM_DISPLAYID_H
23#define DRM_DISPLAYID_H
24
25#define DATA_BLOCK_PRODUCT_ID 0x00
26#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
27#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
28#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03
29#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04
30#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05
31#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06
32#define DATA_BLOCK_VESA_TIMING 0x07
33#define DATA_BLOCK_CEA_TIMING 0x08
34#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09
35#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a
36#define DATA_BLOCK_GP_ASCII_STRING 0x0b
37#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c
38#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d
39#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e
40#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
41#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
42#define DATA_BLOCK_TILED_DISPLAY 0x12
43
44#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
45
46#define PRODUCT_TYPE_EXTENSION 0
47#define PRODUCT_TYPE_TEST 1
48#define PRODUCT_TYPE_PANEL 2
49#define PRODUCT_TYPE_MONITOR 3
50#define PRODUCT_TYPE_TV 4
51#define PRODUCT_TYPE_REPEATER 5
52#define PRODUCT_TYPE_DIRECT_DRIVE 6
53
54struct displayid_hdr {
55 u8 rev;
56 u8 bytes;
57 u8 prod_id;
58 u8 ext_count;
59} __packed;
60
61struct displayid_block {
62 u8 tag;
63 u8 rev;
64 u8 num_bytes;
65} __packed;
66
67struct displayid_tiled_block {
68 struct displayid_block base;
69 u8 tile_cap;
70 u8 topo[3];
71 u8 tile_size[4];
72 u8 tile_pixel_bezel[5];
73 u8 topology_id[8];
74} __packed;
75
76#endif
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index cec6383bbdb8..00c1da927245 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -92,6 +92,8 @@ struct drm_dp_mst_port {
92 struct drm_dp_vcpi vcpi; 92 struct drm_dp_vcpi vcpi;
93 struct drm_connector *connector; 93 struct drm_connector *connector;
94 struct drm_dp_mst_topology_mgr *mgr; 94 struct drm_dp_mst_topology_mgr *mgr;
95
96 struct edid *cached_edid; /* for DP logical ports - make tiling work */
95}; 97};
96 98
97/** 99/**
@@ -474,7 +476,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
474int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled); 476int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
475 477
476 478
477enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); 479enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
478 480
479struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); 481struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
480 482
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index d59240ffb1f7..87d85e81d3a7 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -27,12 +27,14 @@
27 27
28#define EDID_LENGTH 128 28#define EDID_LENGTH 128
29#define DDC_ADDR 0x50 29#define DDC_ADDR 0x50
30#define DDC_ADDR2 0x52 /* E-DDC 1.2 - where DisplayID can hide */
30 31
31#define CEA_EXT 0x02 32#define CEA_EXT 0x02
32#define VTB_EXT 0x10 33#define VTB_EXT 0x10
33#define DI_EXT 0x40 34#define DI_EXT 0x40
34#define LS_EXT 0x50 35#define LS_EXT 0x50
35#define MI_EXT 0x60 36#define MI_EXT 0x60
37#define DISPLAYID_EXT 0x70
36 38
37struct est_timings { 39struct est_timings {
38 u8 t1; 40 u8 t1;
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index f4ad254e3488..b597068103aa 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -34,9 +34,14 @@ struct drm_fb_helper;
34 34
35#include <linux/kgdb.h> 35#include <linux/kgdb.h>
36 36
37struct drm_fb_offset {
38 int x, y;
39};
40
37struct drm_fb_helper_crtc { 41struct drm_fb_helper_crtc {
38 struct drm_mode_set mode_set; 42 struct drm_mode_set mode_set;
39 struct drm_display_mode *desired_mode; 43 struct drm_display_mode *desired_mode;
44 int x, y;
40}; 45};
41 46
42struct drm_fb_helper_surface_size { 47struct drm_fb_helper_surface_size {
@@ -72,6 +77,7 @@ struct drm_fb_helper_funcs {
72 bool (*initial_config)(struct drm_fb_helper *fb_helper, 77 bool (*initial_config)(struct drm_fb_helper *fb_helper,
73 struct drm_fb_helper_crtc **crtcs, 78 struct drm_fb_helper_crtc **crtcs,
74 struct drm_display_mode **modes, 79 struct drm_display_mode **modes,
80 struct drm_fb_offset *offsets,
75 bool *enabled, int width, int height); 81 bool *enabled, int width, int height);
76}; 82};
77 83
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index 460441714413..b620c317c772 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -68,6 +68,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
68 * non-blocking reserves should be tried. 68 * non-blocking reserves should be tried.
69 * @list: thread private list of ttm_validate_buffer structs. 69 * @list: thread private list of ttm_validate_buffer structs.
70 * @intr: should the wait be interruptible 70 * @intr: should the wait be interruptible
71 * @dups: [out] optional list of duplicates.
71 * 72 *
72 * Tries to reserve bos pointed to by the list entries for validation. 73 * Tries to reserve bos pointed to by the list entries for validation.
73 * If the function returns 0, all buffers are marked as "unfenced", 74 * If the function returns 0, all buffers are marked as "unfenced",
@@ -83,6 +84,11 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
83 * calling process receives a signal while waiting. In that case, no 84 * calling process receives a signal while waiting. In that case, no
84 * buffers on the list will be reserved upon return. 85 * buffers on the list will be reserved upon return.
85 * 86 *
87 * If dups is non NULL all buffers already reserved by the current thread
88 * (e.g. duplicates) are added to this list, otherwise -EALREADY is returned
89 * on the first already reserved buffer and all buffers from the list are
90 * unreserved again.
91 *
86 * Buffers reserved by this function should be unreserved by 92 * Buffers reserved by this function should be unreserved by
87 * a call to either ttm_eu_backoff_reservation() or 93 * a call to either ttm_eu_backoff_reservation() or
88 * ttm_eu_fence_buffer_objects() when command submission is complete or 94 * ttm_eu_fence_buffer_objects() when command submission is complete or
@@ -90,7 +96,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
90 */ 96 */
91 97
92extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, 98extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
93 struct list_head *list, bool intr); 99 struct list_head *list, bool intr,
100 struct list_head *dups);
94 101
95/** 102/**
96 * function ttm_eu_fence_buffer_objects. 103 * function ttm_eu_fence_buffer_objects.
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 4c94f31a8c99..8523f9bb72f2 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -427,7 +427,7 @@ header-y += virtio_net.h
427header-y += virtio_pci.h 427header-y += virtio_pci.h
428header-y += virtio_ring.h 428header-y += virtio_ring.h
429header-y += virtio_rng.h 429header-y += virtio_rng.h
430header=y += vm_sockets.h 430header-y += vm_sockets.h
431header-y += vt.h 431header-y += vt.h
432header-y += wait.h 432header-y += wait.h
433header-y += wanrouter.h 433header-y += wanrouter.h
diff --git a/ipc/sem.c b/ipc/sem.c
index 454f6c6020a8..53c3310f41c6 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -507,13 +507,6 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
507 return retval; 507 return retval;
508 } 508 }
509 509
510 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
511 if (id < 0) {
512 ipc_rcu_putref(sma, sem_rcu_free);
513 return id;
514 }
515 ns->used_sems += nsems;
516
517 sma->sem_base = (struct sem *) &sma[1]; 510 sma->sem_base = (struct sem *) &sma[1];
518 511
519 for (i = 0; i < nsems; i++) { 512 for (i = 0; i < nsems; i++) {
@@ -528,6 +521,14 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
528 INIT_LIST_HEAD(&sma->list_id); 521 INIT_LIST_HEAD(&sma->list_id);
529 sma->sem_nsems = nsems; 522 sma->sem_nsems = nsems;
530 sma->sem_ctime = get_seconds(); 523 sma->sem_ctime = get_seconds();
524
525 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
526 if (id < 0) {
527 ipc_rcu_putref(sma, sem_rcu_free);
528 return id;
529 }
530 ns->used_sems += nsems;
531
531 sem_unlock(sma, -1); 532 sem_unlock(sma, -1);
532 rcu_read_unlock(); 533 rcu_read_unlock();
533 534
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 24beb9bb4c3e..89e7283015a6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2874,10 +2874,14 @@ asmlinkage __visible void __sched schedule_user(void)
2874 * or we have been woken up remotely but the IPI has not yet arrived, 2874 * or we have been woken up remotely but the IPI has not yet arrived,
2875 * we haven't yet exited the RCU idle mode. Do it here manually until 2875 * we haven't yet exited the RCU idle mode. Do it here manually until
2876 * we find a better solution. 2876 * we find a better solution.
2877 *
2878 * NB: There are buggy callers of this function. Ideally we
2879 * should warn if prev_state != IN_USER, but that will trigger
2880 * too frequently to make sense yet.
2877 */ 2881 */
2878 user_exit(); 2882 enum ctx_state prev_state = exception_enter();
2879 schedule(); 2883 schedule();
2880 user_enter(); 2884 exception_exit(prev_state);
2881} 2885}
2882#endif 2886#endif
2883 2887
diff --git a/lib/genalloc.c b/lib/genalloc.c
index cce4dd68c40d..2e65d206b01c 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -598,6 +598,7 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
598 598
599 return pool; 599 return pool;
600} 600}
601EXPORT_SYMBOL(devm_gen_pool_create);
601 602
602/** 603/**
603 * dev_get_gen_pool - Obtain the gen_pool (if any) for a device 604 * dev_get_gen_pool - Obtain the gen_pool (if any) for a device
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 09225796991a..5e256271b47b 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -28,7 +28,7 @@ void show_mem(unsigned int filter)
28 continue; 28 continue;
29 29
30 total += zone->present_pages; 30 total += zone->present_pages;
31 reserved = zone->present_pages - zone->managed_pages; 31 reserved += zone->present_pages - zone->managed_pages;
32 32
33 if (is_highmem_idx(zoneid)) 33 if (is_highmem_idx(zoneid))
34 highmem += zone->present_pages; 34 highmem += zone->present_pages;
diff --git a/mm/frontswap.c b/mm/frontswap.c
index c30eec536f03..f2a3571c6e22 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -244,8 +244,10 @@ int __frontswap_store(struct page *page)
244 the (older) page from frontswap 244 the (older) page from frontswap
245 */ 245 */
246 inc_frontswap_failed_stores(); 246 inc_frontswap_failed_stores();
247 if (dup) 247 if (dup) {
248 __frontswap_clear(sis, offset); 248 __frontswap_clear(sis, offset);
249 frontswap_ops->invalidate_page(type, offset);
250 }
249 } 251 }
250 if (frontswap_writethrough_enabled) 252 if (frontswap_writethrough_enabled)
251 /* report failure so swap also writes to swap device */ 253 /* report failure so swap also writes to swap device */
diff --git a/mm/memory.c b/mm/memory.c
index 655fd3d34bb0..d3cb2ef66ee2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -816,20 +816,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
816 if (!pte_file(pte)) { 816 if (!pte_file(pte)) {
817 swp_entry_t entry = pte_to_swp_entry(pte); 817 swp_entry_t entry = pte_to_swp_entry(pte);
818 818
819 if (swap_duplicate(entry) < 0) 819 if (likely(!non_swap_entry(entry))) {
820 return entry.val; 820 if (swap_duplicate(entry) < 0)
821 821 return entry.val;
822 /* make sure dst_mm is on swapoff's mmlist. */ 822
823 if (unlikely(list_empty(&dst_mm->mmlist))) { 823 /* make sure dst_mm is on swapoff's mmlist. */
824 spin_lock(&mmlist_lock); 824 if (unlikely(list_empty(&dst_mm->mmlist))) {
825 if (list_empty(&dst_mm->mmlist)) 825 spin_lock(&mmlist_lock);
826 list_add(&dst_mm->mmlist, 826 if (list_empty(&dst_mm->mmlist))
827 &src_mm->mmlist); 827 list_add(&dst_mm->mmlist,
828 spin_unlock(&mmlist_lock); 828 &src_mm->mmlist);
829 } 829 spin_unlock(&mmlist_lock);
830 if (likely(!non_swap_entry(entry))) 830 }
831 rss[MM_SWAPENTS]++; 831 rss[MM_SWAPENTS]++;
832 else if (is_migration_entry(entry)) { 832 } else if (is_migration_entry(entry)) {
833 page = migration_entry_to_page(entry); 833 page = migration_entry_to_page(entry);
834 834
835 if (PageAnon(page)) 835 if (PageAnon(page))
diff --git a/mm/mmap.c b/mm/mmap.c
index 87e82b38453c..ae919891a087 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -776,8 +776,11 @@ again: remove_next = 1 + (end > next->vm_end);
776 * shrinking vma had, to cover any anon pages imported. 776 * shrinking vma had, to cover any anon pages imported.
777 */ 777 */
778 if (exporter && exporter->anon_vma && !importer->anon_vma) { 778 if (exporter && exporter->anon_vma && !importer->anon_vma) {
779 if (anon_vma_clone(importer, exporter)) 779 int error;
780 return -ENOMEM; 780
781 error = anon_vma_clone(importer, exporter);
782 if (error)
783 return error;
781 importer->anon_vma = exporter->anon_vma; 784 importer->anon_vma = exporter->anon_vma;
782 } 785 }
783 } 786 }
@@ -2469,7 +2472,8 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2469 if (err) 2472 if (err)
2470 goto out_free_vma; 2473 goto out_free_vma;
2471 2474
2472 if (anon_vma_clone(new, vma)) 2475 err = anon_vma_clone(new, vma);
2476 if (err)
2473 goto out_free_mpol; 2477 goto out_free_mpol;
2474 2478
2475 if (new->vm_file) 2479 if (new->vm_file)
diff --git a/mm/rmap.c b/mm/rmap.c
index d3eb1e02d1c6..a2a1eab077b0 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -274,6 +274,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
274{ 274{
275 struct anon_vma_chain *avc; 275 struct anon_vma_chain *avc;
276 struct anon_vma *anon_vma; 276 struct anon_vma *anon_vma;
277 int error;
277 278
278 /* Don't bother if the parent process has no anon_vma here. */ 279 /* Don't bother if the parent process has no anon_vma here. */
279 if (!pvma->anon_vma) 280 if (!pvma->anon_vma)
@@ -283,8 +284,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
283 * First, attach the new VMA to the parent VMA's anon_vmas, 284 * First, attach the new VMA to the parent VMA's anon_vmas,
284 * so rmap can find non-COWed pages in child processes. 285 * so rmap can find non-COWed pages in child processes.
285 */ 286 */
286 if (anon_vma_clone(vma, pvma)) 287 error = anon_vma_clone(vma, pvma);
287 return -ENOMEM; 288 if (error)
289 return error;
288 290
289 /* Then add our own anon_vma. */ 291 /* Then add our own anon_vma. */
290 anon_vma = anon_vma_alloc(); 292 anon_vma = anon_vma_alloc();
diff --git a/mm/slab.c b/mm/slab.c
index eb2b2ea30130..f34e053ec46e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3076,7 +3076,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3076 void *obj; 3076 void *obj;
3077 int x; 3077 int x;
3078 3078
3079 VM_BUG_ON(nodeid > num_online_nodes()); 3079 VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3080 n = get_node(cachep, nodeid); 3080 n = get_node(cachep, nodeid);
3081 BUG_ON(!n); 3081 BUG_ON(!n);
3082 3082
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index d4042e75f7c7..c5afd573d7da 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -165,6 +165,7 @@ static void vmpressure_work_fn(struct work_struct *work)
165 unsigned long scanned; 165 unsigned long scanned;
166 unsigned long reclaimed; 166 unsigned long reclaimed;
167 167
168 spin_lock(&vmpr->sr_lock);
168 /* 169 /*
169 * Several contexts might be calling vmpressure(), so it is 170 * Several contexts might be calling vmpressure(), so it is
170 * possible that the work was rescheduled again before the old 171 * possible that the work was rescheduled again before the old
@@ -173,11 +174,12 @@ static void vmpressure_work_fn(struct work_struct *work)
173 * here. No need for any locks here since we don't care if 174 * here. No need for any locks here since we don't care if
174 * vmpr->reclaimed is in sync. 175 * vmpr->reclaimed is in sync.
175 */ 176 */
176 if (!vmpr->scanned) 177 scanned = vmpr->scanned;
178 if (!scanned) {
179 spin_unlock(&vmpr->sr_lock);
177 return; 180 return;
181 }
178 182
179 spin_lock(&vmpr->sr_lock);
180 scanned = vmpr->scanned;
181 reclaimed = vmpr->reclaimed; 183 reclaimed = vmpr->reclaimed;
182 vmpr->scanned = 0; 184 vmpr->scanned = 0;
183 vmpr->reclaimed = 0; 185 vmpr->reclaimed = 0;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b9b7dfaf202b..76321ea442c3 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1498,6 +1498,7 @@ static int do_setlink(const struct sk_buff *skb,
1498 goto errout; 1498 goto errout;
1499 } 1499 }
1500 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { 1500 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
1501 put_net(net);
1501 err = -EPERM; 1502 err = -EPERM;
1502 goto errout; 1503 goto errout;
1503 } 1504 }
diff --git a/security/keys/internal.h b/security/keys/internal.h
index b8960c4959a5..200e37867336 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -117,6 +117,7 @@ struct keyring_search_context {
117#define KEYRING_SEARCH_NO_UPDATE_TIME 0x0004 /* Don't update times */ 117#define KEYRING_SEARCH_NO_UPDATE_TIME 0x0004 /* Don't update times */
118#define KEYRING_SEARCH_NO_CHECK_PERM 0x0008 /* Don't check permissions */ 118#define KEYRING_SEARCH_NO_CHECK_PERM 0x0008 /* Don't check permissions */
119#define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0010 /* Give an error on excessive depth */ 119#define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0010 /* Give an error on excessive depth */
120#define KEYRING_SEARCH_SKIP_EXPIRED 0x0020 /* Ignore expired keys (intention to replace) */
120 121
121 int (*iterator)(const void *object, void *iterator_data); 122 int (*iterator)(const void *object, void *iterator_data);
122 123
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index eff88a5f5d40..4743d71e4aa6 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -26,6 +26,8 @@
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27#include "internal.h" 27#include "internal.h"
28 28
29#define KEY_MAX_DESC_SIZE 4096
30
29static int key_get_type_from_user(char *type, 31static int key_get_type_from_user(char *type,
30 const char __user *_type, 32 const char __user *_type,
31 unsigned len) 33 unsigned len)
@@ -78,7 +80,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
78 80
79 description = NULL; 81 description = NULL;
80 if (_description) { 82 if (_description) {
81 description = strndup_user(_description, PAGE_SIZE); 83 description = strndup_user(_description, KEY_MAX_DESC_SIZE);
82 if (IS_ERR(description)) { 84 if (IS_ERR(description)) {
83 ret = PTR_ERR(description); 85 ret = PTR_ERR(description);
84 goto error; 86 goto error;
@@ -177,7 +179,7 @@ SYSCALL_DEFINE4(request_key, const char __user *, _type,
177 goto error; 179 goto error;
178 180
179 /* pull the description into kernel space */ 181 /* pull the description into kernel space */
180 description = strndup_user(_description, PAGE_SIZE); 182 description = strndup_user(_description, KEY_MAX_DESC_SIZE);
181 if (IS_ERR(description)) { 183 if (IS_ERR(description)) {
182 ret = PTR_ERR(description); 184 ret = PTR_ERR(description);
183 goto error; 185 goto error;
@@ -287,7 +289,7 @@ long keyctl_join_session_keyring(const char __user *_name)
287 /* fetch the name from userspace */ 289 /* fetch the name from userspace */
288 name = NULL; 290 name = NULL;
289 if (_name) { 291 if (_name) {
290 name = strndup_user(_name, PAGE_SIZE); 292 name = strndup_user(_name, KEY_MAX_DESC_SIZE);
291 if (IS_ERR(name)) { 293 if (IS_ERR(name)) {
292 ret = PTR_ERR(name); 294 ret = PTR_ERR(name);
293 goto error; 295 goto error;
@@ -562,8 +564,9 @@ long keyctl_describe_key(key_serial_t keyid,
562{ 564{
563 struct key *key, *instkey; 565 struct key *key, *instkey;
564 key_ref_t key_ref; 566 key_ref_t key_ref;
565 char *tmpbuf; 567 char *infobuf;
566 long ret; 568 long ret;
569 int desclen, infolen;
567 570
568 key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW); 571 key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
569 if (IS_ERR(key_ref)) { 572 if (IS_ERR(key_ref)) {
@@ -586,38 +589,31 @@ long keyctl_describe_key(key_serial_t keyid,
586 } 589 }
587 590
588okay: 591okay:
589 /* calculate how much description we're going to return */
590 ret = -ENOMEM;
591 tmpbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
592 if (!tmpbuf)
593 goto error2;
594
595 key = key_ref_to_ptr(key_ref); 592 key = key_ref_to_ptr(key_ref);
593 desclen = strlen(key->description);
596 594
597 ret = snprintf(tmpbuf, PAGE_SIZE - 1, 595 /* calculate how much information we're going to return */
598 "%s;%d;%d;%08x;%s", 596 ret = -ENOMEM;
599 key->type->name, 597 infobuf = kasprintf(GFP_KERNEL,
600 from_kuid_munged(current_user_ns(), key->uid), 598 "%s;%d;%d;%08x;",
601 from_kgid_munged(current_user_ns(), key->gid), 599 key->type->name,
602 key->perm, 600 from_kuid_munged(current_user_ns(), key->uid),
603 key->description ?: ""); 601 from_kgid_munged(current_user_ns(), key->gid),
604 602 key->perm);
605 /* include a NUL char at the end of the data */ 603 if (!infobuf)
606 if (ret > PAGE_SIZE - 1) 604 goto error2;
607 ret = PAGE_SIZE - 1; 605 infolen = strlen(infobuf);
608 tmpbuf[ret] = 0; 606 ret = infolen + desclen + 1;
609 ret++;
610 607
611 /* consider returning the data */ 608 /* consider returning the data */
612 if (buffer && buflen > 0) { 609 if (buffer && buflen >= ret) {
613 if (buflen > ret) 610 if (copy_to_user(buffer, infobuf, infolen) != 0 ||
614 buflen = ret; 611 copy_to_user(buffer + infolen, key->description,
615 612 desclen + 1) != 0)
616 if (copy_to_user(buffer, tmpbuf, buflen) != 0)
617 ret = -EFAULT; 613 ret = -EFAULT;
618 } 614 }
619 615
620 kfree(tmpbuf); 616 kfree(infobuf);
621error2: 617error2:
622 key_ref_put(key_ref); 618 key_ref_put(key_ref);
623error: 619error:
@@ -649,7 +645,7 @@ long keyctl_keyring_search(key_serial_t ringid,
649 if (ret < 0) 645 if (ret < 0)
650 goto error; 646 goto error;
651 647
652 description = strndup_user(_description, PAGE_SIZE); 648 description = strndup_user(_description, KEY_MAX_DESC_SIZE);
653 if (IS_ERR(description)) { 649 if (IS_ERR(description)) {
654 ret = PTR_ERR(description); 650 ret = PTR_ERR(description);
655 goto error; 651 goto error;
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 8177010174f7..e72548b5897e 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -546,7 +546,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
546 } 546 }
547 547
548 if (key->expiry && ctx->now.tv_sec >= key->expiry) { 548 if (key->expiry && ctx->now.tv_sec >= key->expiry) {
549 ctx->result = ERR_PTR(-EKEYEXPIRED); 549 if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED))
550 ctx->result = ERR_PTR(-EKEYEXPIRED);
550 kleave(" = %d [expire]", ctx->skipped_ret); 551 kleave(" = %d [expire]", ctx->skipped_ret);
551 goto skipped; 552 goto skipped;
552 } 553 }
@@ -628,6 +629,10 @@ static bool search_nested_keyrings(struct key *keyring,
628 ctx->index_key.type->name, 629 ctx->index_key.type->name,
629 ctx->index_key.description); 630 ctx->index_key.description);
630 631
632#define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK)
633 BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
634 (ctx->flags & STATE_CHECKS) == STATE_CHECKS);
635
631 if (ctx->index_key.description) 636 if (ctx->index_key.description)
632 ctx->index_key.desc_len = strlen(ctx->index_key.description); 637 ctx->index_key.desc_len = strlen(ctx->index_key.description);
633 638
@@ -637,7 +642,6 @@ static bool search_nested_keyrings(struct key *keyring,
637 if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE || 642 if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE ||
638 keyring_compare_object(keyring, &ctx->index_key)) { 643 keyring_compare_object(keyring, &ctx->index_key)) {
639 ctx->skipped_ret = 2; 644 ctx->skipped_ret = 2;
640 ctx->flags |= KEYRING_SEARCH_DO_STATE_CHECK;
641 switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) { 645 switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) {
642 case 1: 646 case 1:
643 goto found; 647 goto found;
@@ -649,8 +653,6 @@ static bool search_nested_keyrings(struct key *keyring,
649 } 653 }
650 654
651 ctx->skipped_ret = 0; 655 ctx->skipped_ret = 0;
652 if (ctx->flags & KEYRING_SEARCH_NO_STATE_CHECK)
653 ctx->flags &= ~KEYRING_SEARCH_DO_STATE_CHECK;
654 656
655 /* Start processing a new keyring */ 657 /* Start processing a new keyring */
656descend_to_keyring: 658descend_to_keyring:
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index bb4337c7ae1b..0c7aea4dea54 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -516,6 +516,8 @@ struct key *request_key_and_link(struct key_type *type,
516 .match_data.cmp = key_default_cmp, 516 .match_data.cmp = key_default_cmp,
517 .match_data.raw_data = description, 517 .match_data.raw_data = description,
518 .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, 518 .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
519 .flags = (KEYRING_SEARCH_DO_STATE_CHECK |
520 KEYRING_SEARCH_SKIP_EXPIRED),
519 }; 521 };
520 struct key *key; 522 struct key *key;
521 key_ref_t key_ref; 523 key_ref_t key_ref;
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 6639e2cb8853..5d672f7580dd 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -249,6 +249,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
249 .match_data.cmp = key_default_cmp, 249 .match_data.cmp = key_default_cmp,
250 .match_data.raw_data = description, 250 .match_data.raw_data = description,
251 .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, 251 .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
252 .flags = KEYRING_SEARCH_DO_STATE_CHECK,
252 }; 253 };
253 struct key *authkey; 254 struct key *authkey;
254 key_ref_t authkey_ref; 255 key_ref_t authkey_ref;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 14f16be3f374..b118a5be18df 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4790,6 +4790,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4790 SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK), 4790 SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
4791 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 4791 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4792 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 4792 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4793 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4794 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4793 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 4795 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4794 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 4796 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4795 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 4797 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),