aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/gpu/samsung-rotator.txt27
-rw-r--r--arch/x86/kernel/early-quirks.c154
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/exynos/Kconfig6
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c38
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c263
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c60
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c17
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c117
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c87
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c9
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c11
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c164
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c41
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c23
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h34
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c36
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c83
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c14
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c14
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c99
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h6
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c17
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c9
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c23
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c10
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c39
-rw-r--r--drivers/gpu/drm/radeon/cik.c36
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c12
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c164
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.h1
-rw-r--r--drivers/gpu/drm/radeon/kv_smc.c8
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/ppsmc.h2
-rw-r--r--drivers/gpu/drm/radeon/r100.c7
-rw-r--r--drivers/gpu/drm/radeon/r420.c7
-rw-r--r--drivers/gpu/drm/radeon/r600.c19
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c38
-rw-r--r--drivers/gpu/drm/radeon/r600d.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h82
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c69
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c81
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h27
-rw-r--r--drivers/gpu/drm/radeon/rs400.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c12
-rw-r--r--drivers/gpu/drm/radeon/rs690.c7
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c112
-rw-r--r--drivers/gpu/drm/radeon/rv515.c8
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c16
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.c44
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.h2
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h2
-rw-r--r--drivers/gpu/drm/radeon/si.c21
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/si_smc.c43
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c17
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/trinity_smc.c8
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c1
-rw-r--r--drivers/gpu/vga/vgaarb.c51
-rw-r--r--include/drm/drm_pciids.h3
-rw-r--r--include/drm/exynos_drm.h3
-rw-r--r--include/drm/i915_drm.h34
-rw-r--r--include/drm/i915_pciids.h211
-rw-r--r--include/linux/vgaarb.h7
105 files changed, 1934 insertions, 966 deletions
diff --git a/Documentation/devicetree/bindings/gpu/samsung-rotator.txt b/Documentation/devicetree/bindings/gpu/samsung-rotator.txt
new file mode 100644
index 000000000000..82cd1ed0be93
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/samsung-rotator.txt
@@ -0,0 +1,27 @@
1* Samsung Image Rotator
2
3Required properties:
4 - compatible : value should be one of the following:
5 (a) "samsung,exynos4210-rotator" for Rotator IP in Exynos4210
6 (b) "samsung,exynos4212-rotator" for Rotator IP in Exynos4212/4412
7 (c) "samsung,exynos5250-rotator" for Rotator IP in Exynos5250
8
9 - reg : Physical base address of the IP registers and length of memory
10 mapped region.
11
12 - interrupts : Interrupt specifier for rotator interrupt, according to format
13 specific to interrupt parent.
14
15 - clocks : Clock specifier for rotator clock, according to generic clock
16 bindings. (See Documentation/devicetree/bindings/clock/exynos*.txt)
17
18 - clock-names : Names of clocks. For exynos rotator, it should be "rotator".
19
20Example:
21 rotator@12810000 {
22 compatible = "samsung,exynos4210-rotator";
23 reg = <0x12810000 0x1000>;
24 interrupts = <0 83 0>;
25 clocks = <&clock 278>;
26 clock-names = "rotator";
27 };
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 63bdb29b2549..b3cd3ebae077 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -12,6 +12,7 @@
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <linux/acpi.h> 13#include <linux/acpi.h>
14#include <linux/pci_ids.h> 14#include <linux/pci_ids.h>
15#include <drm/i915_drm.h>
15#include <asm/pci-direct.h> 16#include <asm/pci-direct.h>
16#include <asm/dma.h> 17#include <asm/dma.h>
17#include <asm/io_apic.h> 18#include <asm/io_apic.h>
@@ -216,6 +217,157 @@ static void __init intel_remapping_check(int num, int slot, int func)
216 217
217} 218}
218 219
220/*
221 * Systems with Intel graphics controllers set aside memory exclusively
222 * for gfx driver use. This memory is not marked in the E820 as reserved
223 * or as RAM, and so is subject to overlap from E820 manipulation later
224 * in the boot process. On some systems, MMIO space is allocated on top,
225 * despite the efforts of the "RAM buffer" approach, which simply rounds
226 * memory boundaries up to 64M to try to catch space that may decode
227 * as RAM and so is not suitable for MMIO.
228 *
229 * And yes, so far on current devices the base addr is always under 4G.
230 */
231static u32 __init intel_stolen_base(int num, int slot, int func)
232{
233 u32 base;
234
235 /*
236 * For the PCI IDs in this quirk, the stolen base is always
237 * in 0x5c, aka the BDSM register (yes that's really what
238 * it's called).
239 */
240 base = read_pci_config(num, slot, func, 0x5c);
241 base &= ~((1<<20) - 1);
242
243 return base;
244}
245
246#define KB(x) ((x) * 1024)
247#define MB(x) (KB (KB (x)))
248#define GB(x) (MB (KB (x)))
249
250static size_t __init gen3_stolen_size(int num, int slot, int func)
251{
252 size_t stolen_size;
253 u16 gmch_ctrl;
254
255 gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
256
257 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
258 case I855_GMCH_GMS_STOLEN_1M:
259 stolen_size = MB(1);
260 break;
261 case I855_GMCH_GMS_STOLEN_4M:
262 stolen_size = MB(4);
263 break;
264 case I855_GMCH_GMS_STOLEN_8M:
265 stolen_size = MB(8);
266 break;
267 case I855_GMCH_GMS_STOLEN_16M:
268 stolen_size = MB(16);
269 break;
270 case I855_GMCH_GMS_STOLEN_32M:
271 stolen_size = MB(32);
272 break;
273 case I915_GMCH_GMS_STOLEN_48M:
274 stolen_size = MB(48);
275 break;
276 case I915_GMCH_GMS_STOLEN_64M:
277 stolen_size = MB(64);
278 break;
279 case G33_GMCH_GMS_STOLEN_128M:
280 stolen_size = MB(128);
281 break;
282 case G33_GMCH_GMS_STOLEN_256M:
283 stolen_size = MB(256);
284 break;
285 case INTEL_GMCH_GMS_STOLEN_96M:
286 stolen_size = MB(96);
287 break;
288 case INTEL_GMCH_GMS_STOLEN_160M:
289 stolen_size = MB(160);
290 break;
291 case INTEL_GMCH_GMS_STOLEN_224M:
292 stolen_size = MB(224);
293 break;
294 case INTEL_GMCH_GMS_STOLEN_352M:
295 stolen_size = MB(352);
296 break;
297 default:
298 stolen_size = 0;
299 break;
300 }
301
302 return stolen_size;
303}
304
305static size_t __init gen6_stolen_size(int num, int slot, int func)
306{
307 u16 gmch_ctrl;
308
309 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
310 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
311 gmch_ctrl &= SNB_GMCH_GMS_MASK;
312
313 return gmch_ctrl << 25; /* 32 MB units */
314}
315
316typedef size_t (*stolen_size_fn)(int num, int slot, int func);
317
318static struct pci_device_id intel_stolen_ids[] __initdata = {
319 INTEL_I915G_IDS(gen3_stolen_size),
320 INTEL_I915GM_IDS(gen3_stolen_size),
321 INTEL_I945G_IDS(gen3_stolen_size),
322 INTEL_I945GM_IDS(gen3_stolen_size),
323 INTEL_VLV_M_IDS(gen3_stolen_size),
324 INTEL_VLV_D_IDS(gen3_stolen_size),
325 INTEL_PINEVIEW_IDS(gen3_stolen_size),
326 INTEL_I965G_IDS(gen3_stolen_size),
327 INTEL_G33_IDS(gen3_stolen_size),
328 INTEL_I965GM_IDS(gen3_stolen_size),
329 INTEL_GM45_IDS(gen3_stolen_size),
330 INTEL_G45_IDS(gen3_stolen_size),
331 INTEL_IRONLAKE_D_IDS(gen3_stolen_size),
332 INTEL_IRONLAKE_M_IDS(gen3_stolen_size),
333 INTEL_SNB_D_IDS(gen6_stolen_size),
334 INTEL_SNB_M_IDS(gen6_stolen_size),
335 INTEL_IVB_M_IDS(gen6_stolen_size),
336 INTEL_IVB_D_IDS(gen6_stolen_size),
337 INTEL_HSW_D_IDS(gen6_stolen_size),
338 INTEL_HSW_M_IDS(gen6_stolen_size),
339};
340
341static void __init intel_graphics_stolen(int num, int slot, int func)
342{
343 size_t size;
344 int i;
345 u32 start;
346 u16 device, subvendor, subdevice;
347
348 device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
349 subvendor = read_pci_config_16(num, slot, func,
350 PCI_SUBSYSTEM_VENDOR_ID);
351 subdevice = read_pci_config_16(num, slot, func, PCI_SUBSYSTEM_ID);
352
353 for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) {
354 if (intel_stolen_ids[i].device == device) {
355 stolen_size_fn stolen_size =
356 (stolen_size_fn)intel_stolen_ids[i].driver_data;
357 size = stolen_size(num, slot, func);
358 start = intel_stolen_base(num, slot, func);
359 if (size && start) {
360 /* Mark this space as reserved */
361 e820_add_region(start, size, E820_RESERVED);
362 sanitize_e820_map(e820.map,
363 ARRAY_SIZE(e820.map),
364 &e820.nr_map);
365 }
366 return;
367 }
368 }
369}
370
219#define QFLAG_APPLY_ONCE 0x1 371#define QFLAG_APPLY_ONCE 0x1
220#define QFLAG_APPLIED 0x2 372#define QFLAG_APPLIED 0x2
221#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) 373#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -251,6 +403,8 @@ static struct chipset early_qrk[] __initdata = {
251 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 403 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
252 { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, 404 { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
253 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 405 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
406 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
407 QFLAG_APPLY_ONCE, intel_graphics_stolen },
254 {} 408 {}
255}; 409};
256 410
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 796dbb212a41..8492b68e873c 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -177,7 +177,7 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
177 177
178static inline void ast_open_key(struct ast_private *ast) 178static inline void ast_open_key(struct ast_private *ast)
179{ 179{
180 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xA1, 0xFF, 0x04); 180 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
181} 181}
182 182
183#define AST_VIDMEM_SIZE_8M 0x00800000 183#define AST_VIDMEM_SIZE_8M 0x00800000
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 772c62a6e2ac..4752f223e5b2 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,11 +1,12 @@
1config DRM_EXYNOS 1config DRM_EXYNOS
2 tristate "DRM Support for Samsung SoC EXYNOS Series" 2 tristate "DRM Support for Samsung SoC EXYNOS Series"
3 depends on DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM) 3 depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select FB_CFB_FILLRECT 5 select FB_CFB_FILLRECT
6 select FB_CFB_COPYAREA 6 select FB_CFB_COPYAREA
7 select FB_CFB_IMAGEBLIT 7 select FB_CFB_IMAGEBLIT
8 select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE 8 select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
9 select VIDEOMODE_HELPERS
9 help 10 help
10 Choose this option if you have a Samsung SoC EXYNOS chipset. 11 Choose this option if you have a Samsung SoC EXYNOS chipset.
11 If M is selected the module will be called exynosdrm. 12 If M is selected the module will be called exynosdrm.
@@ -24,9 +25,8 @@ config DRM_EXYNOS_DMABUF
24 25
25config DRM_EXYNOS_FIMD 26config DRM_EXYNOS_FIMD
26 bool "Exynos DRM FIMD" 27 bool "Exynos DRM FIMD"
27 depends on OF && DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM 28 depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
28 select FB_MODE_HELPERS 29 select FB_MODE_HELPERS
29 select VIDEOMODE_HELPERS
30 help 30 help
31 Choose this option if you want to use Exynos FIMD for DRM. 31 Choose this option if you want to use Exynos FIMD for DRM.
32 32
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index 30ef41bcd7b8..6a8c84e7c839 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -15,7 +15,7 @@
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/i2c.h> 17#include <linux/i2c.h>
18 18#include <linux/of.h>
19 19
20#include "exynos_drm_drv.h" 20#include "exynos_drm_drv.h"
21#include "exynos_hdmi.h" 21#include "exynos_hdmi.h"
@@ -41,13 +41,6 @@ static int s5p_ddc_remove(struct i2c_client *client)
41 return 0; 41 return 0;
42} 42}
43 43
44static struct i2c_device_id ddc_idtable[] = {
45 {"s5p_ddc", 0},
46 {"exynos5-hdmiddc", 0},
47 { },
48};
49
50#ifdef CONFIG_OF
51static struct of_device_id hdmiddc_match_types[] = { 44static struct of_device_id hdmiddc_match_types[] = {
52 { 45 {
53 .compatible = "samsung,exynos5-hdmiddc", 46 .compatible = "samsung,exynos5-hdmiddc",
@@ -57,15 +50,13 @@ static struct of_device_id hdmiddc_match_types[] = {
57 /* end node */ 50 /* end node */
58 } 51 }
59}; 52};
60#endif
61 53
62struct i2c_driver ddc_driver = { 54struct i2c_driver ddc_driver = {
63 .driver = { 55 .driver = {
64 .name = "exynos-hdmiddc", 56 .name = "exynos-hdmiddc",
65 .owner = THIS_MODULE, 57 .owner = THIS_MODULE,
66 .of_match_table = of_match_ptr(hdmiddc_match_types), 58 .of_match_table = hdmiddc_match_types,
67 }, 59 },
68 .id_table = ddc_idtable,
69 .probe = s5p_ddc_probe, 60 .probe = s5p_ddc_probe,
70 .remove = s5p_ddc_remove, 61 .remove = s5p_ddc_remove,
71 .command = NULL, 62 .command = NULL,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index b8ac06d92fbf..3445a0f3a6b2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -149,10 +149,8 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
149 DRM_DEBUG_KMS("desired size = 0x%x\n", size); 149 DRM_DEBUG_KMS("desired size = 0x%x\n", size);
150 150
151 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); 151 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
152 if (!buffer) { 152 if (!buffer)
153 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
154 return NULL; 153 return NULL;
155 }
156 154
157 buffer->size = size; 155 buffer->size = size;
158 return buffer; 156 return buffer;
@@ -161,11 +159,6 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
161void exynos_drm_fini_buf(struct drm_device *dev, 159void exynos_drm_fini_buf(struct drm_device *dev,
162 struct exynos_drm_gem_buf *buffer) 160 struct exynos_drm_gem_buf *buffer)
163{ 161{
164 if (!buffer) {
165 DRM_DEBUG_KMS("buffer is null.\n");
166 return;
167 }
168
169 kfree(buffer); 162 kfree(buffer);
170 buffer = NULL; 163 buffer = NULL;
171} 164}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 02a8bc5226ca..e082efb2fece 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -17,6 +17,7 @@
17#include <drm/exynos_drm.h> 17#include <drm/exynos_drm.h>
18#include "exynos_drm_drv.h" 18#include "exynos_drm_drv.h"
19#include "exynos_drm_encoder.h" 19#include "exynos_drm_encoder.h"
20#include "exynos_drm_connector.h"
20 21
21#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\ 22#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\
22 drm_connector) 23 drm_connector)
@@ -28,35 +29,6 @@ struct exynos_drm_connector {
28 uint32_t dpms; 29 uint32_t dpms;
29}; 30};
30 31
31/* convert exynos_video_timings to drm_display_mode */
32static inline void
33convert_to_display_mode(struct drm_display_mode *mode,
34 struct exynos_drm_panel_info *panel)
35{
36 struct fb_videomode *timing = &panel->timing;
37
38 mode->clock = timing->pixclock / 1000;
39 mode->vrefresh = timing->refresh;
40
41 mode->hdisplay = timing->xres;
42 mode->hsync_start = mode->hdisplay + timing->right_margin;
43 mode->hsync_end = mode->hsync_start + timing->hsync_len;
44 mode->htotal = mode->hsync_end + timing->left_margin;
45
46 mode->vdisplay = timing->yres;
47 mode->vsync_start = mode->vdisplay + timing->lower_margin;
48 mode->vsync_end = mode->vsync_start + timing->vsync_len;
49 mode->vtotal = mode->vsync_end + timing->upper_margin;
50 mode->width_mm = panel->width_mm;
51 mode->height_mm = panel->height_mm;
52
53 if (timing->vmode & FB_VMODE_INTERLACED)
54 mode->flags |= DRM_MODE_FLAG_INTERLACE;
55
56 if (timing->vmode & FB_VMODE_DOUBLE)
57 mode->flags |= DRM_MODE_FLAG_DBLSCAN;
58}
59
60static int exynos_drm_connector_get_modes(struct drm_connector *connector) 32static int exynos_drm_connector_get_modes(struct drm_connector *connector)
61{ 33{
62 struct exynos_drm_connector *exynos_connector = 34 struct exynos_drm_connector *exynos_connector =
@@ -111,7 +83,9 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
111 return 0; 83 return 0;
112 } 84 }
113 85
114 convert_to_display_mode(mode, panel); 86 drm_display_mode_from_videomode(&panel->vm, mode);
87 mode->width_mm = panel->width_mm;
88 mode->height_mm = panel->height_mm;
115 connector->display_info.width_mm = mode->width_mm; 89 connector->display_info.width_mm = mode->width_mm;
116 connector->display_info.height_mm = mode->height_mm; 90 connector->display_info.height_mm = mode->height_mm;
117 91
@@ -278,10 +252,8 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
278 int err; 252 int err;
279 253
280 exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL); 254 exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL);
281 if (!exynos_connector) { 255 if (!exynos_connector)
282 DRM_ERROR("failed to allocate connector\n");
283 return NULL; 256 return NULL;
284 }
285 257
286 connector = &exynos_connector->drm_connector; 258 connector = &exynos_connector->drm_connector;
287 259
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 14f5c1d34028..ebc01503d50e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -15,6 +15,7 @@
15#include <drm/drmP.h> 15#include <drm/drmP.h>
16#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
17 17
18#include "exynos_drm_crtc.h"
18#include "exynos_drm_drv.h" 19#include "exynos_drm_drv.h"
19#include "exynos_drm_encoder.h" 20#include "exynos_drm_encoder.h"
20#include "exynos_drm_plane.h" 21#include "exynos_drm_plane.h"
@@ -324,10 +325,8 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
324 struct drm_crtc *crtc; 325 struct drm_crtc *crtc;
325 326
326 exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL); 327 exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL);
327 if (!exynos_crtc) { 328 if (!exynos_crtc)
328 DRM_ERROR("failed to allocate exynos crtc\n");
329 return -ENOMEM; 329 return -ENOMEM;
330 }
331 330
332 exynos_crtc->pipe = nr; 331 exynos_crtc->pipe = nr;
333 exynos_crtc->dpms = DRM_MODE_DPMS_OFF; 332 exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index fd76449cf452..59827cc5e770 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -11,6 +11,7 @@
11 11
12#include <drm/drmP.h> 12#include <drm/drmP.h>
13#include <drm/exynos_drm.h> 13#include <drm/exynos_drm.h>
14#include "exynos_drm_dmabuf.h"
14#include "exynos_drm_drv.h" 15#include "exynos_drm_drv.h"
15#include "exynos_drm_gem.h" 16#include "exynos_drm_gem.h"
16 17
@@ -230,7 +231,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
230 231
231 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); 232 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
232 if (!buffer) { 233 if (!buffer) {
233 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
234 ret = -ENOMEM; 234 ret = -ENOMEM;
235 goto err_unmap_attach; 235 goto err_unmap_attach;
236 } 236 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index df81d3c959b4..bb82ef78ca85 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -47,10 +47,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
47 int nr; 47 int nr;
48 48
49 private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL); 49 private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
50 if (!private) { 50 if (!private)
51 DRM_ERROR("failed to allocate private\n");
52 return -ENOMEM; 51 return -ENOMEM;
53 }
54 52
55 INIT_LIST_HEAD(&private->pageflip_event_list); 53 INIT_LIST_HEAD(&private->pageflip_event_list);
56 dev->dev_private = (void *)private; 54 dev->dev_private = (void *)private;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index a99a033793bc..06f1b2a09da7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -324,10 +324,8 @@ exynos_drm_encoder_create(struct drm_device *dev,
324 return NULL; 324 return NULL;
325 325
326 exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL); 326 exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL);
327 if (!exynos_encoder) { 327 if (!exynos_encoder)
328 DRM_ERROR("failed to allocate encoder\n");
329 return NULL; 328 return NULL;
330 }
331 329
332 exynos_encoder->dpms = DRM_MODE_DPMS_OFF; 330 exynos_encoder->dpms = DRM_MODE_DPMS_OFF;
333 exynos_encoder->manager = manager; 331 exynos_encoder->manager = manager;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index c2d149f0408a..ea39e0ef2ae4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -156,10 +156,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
156 } 156 }
157 157
158 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); 158 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
159 if (!exynos_fb) { 159 if (!exynos_fb)
160 DRM_ERROR("failed to allocate exynos drm framebuffer\n");
161 return ERR_PTR(-ENOMEM); 160 return ERR_PTR(-ENOMEM);
162 }
163 161
164 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); 162 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
165 exynos_fb->exynos_gem_obj[0] = exynos_gem_obj; 163 exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
@@ -220,10 +218,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
220 int i, ret; 218 int i, ret;
221 219
222 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); 220 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
223 if (!exynos_fb) { 221 if (!exynos_fb)
224 DRM_ERROR("failed to allocate exynos drm framebuffer\n");
225 return ERR_PTR(-ENOMEM); 222 return ERR_PTR(-ENOMEM);
226 }
227 223
228 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); 224 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
229 if (!obj) { 225 if (!obj) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 8e60bd61137f..78e868bcf1ec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -16,9 +16,11 @@
16#include <drm/drm_crtc.h> 16#include <drm/drm_crtc.h>
17#include <drm/drm_fb_helper.h> 17#include <drm/drm_fb_helper.h>
18#include <drm/drm_crtc_helper.h> 18#include <drm/drm_crtc_helper.h>
19#include <drm/exynos_drm.h>
19 20
20#include "exynos_drm_drv.h" 21#include "exynos_drm_drv.h"
21#include "exynos_drm_fb.h" 22#include "exynos_drm_fb.h"
23#include "exynos_drm_fbdev.h"
22#include "exynos_drm_gem.h" 24#include "exynos_drm_gem.h"
23#include "exynos_drm_iommu.h" 25#include "exynos_drm_iommu.h"
24 26
@@ -165,8 +167,18 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
165 167
166 size = mode_cmd.pitches[0] * mode_cmd.height; 168 size = mode_cmd.pitches[0] * mode_cmd.height;
167 169
168 /* 0 means to allocate physically continuous memory */ 170 exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
169 exynos_gem_obj = exynos_drm_gem_create(dev, 0, size); 171 /*
172 * If physically contiguous memory allocation fails and if IOMMU is
173 * supported then try to get buffer from non physically contiguous
174 * memory area.
175 */
176 if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
177 dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
178 exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
179 size);
180 }
181
170 if (IS_ERR(exynos_gem_obj)) { 182 if (IS_ERR(exynos_gem_obj)) {
171 ret = PTR_ERR(exynos_gem_obj); 183 ret = PTR_ERR(exynos_gem_obj);
172 goto err_release_framebuffer; 184 goto err_release_framebuffer;
@@ -236,10 +248,8 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
236 return 0; 248 return 0;
237 249
238 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); 250 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
239 if (!fbdev) { 251 if (!fbdev)
240 DRM_ERROR("failed to allocate drm fbdev.\n");
241 return -ENOMEM; 252 return -ENOMEM;
242 }
243 253
244 private->fb_helper = helper = &fbdev->drm_fb_helper; 254 private->fb_helper = helper = &fbdev->drm_fb_helper;
245 helper->funcs = &exynos_drm_fb_helper_funcs; 255 helper->funcs = &exynos_drm_fb_helper_funcs;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 6e047bd53e2f..8adfc8f1e08f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -17,10 +17,12 @@
17#include <linux/regmap.h> 17#include <linux/regmap.h>
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/pm_runtime.h> 19#include <linux/pm_runtime.h>
20#include <linux/of.h>
20 21
21#include <drm/drmP.h> 22#include <drm/drmP.h>
22#include <drm/exynos_drm.h> 23#include <drm/exynos_drm.h>
23#include "regs-fimc.h" 24#include "regs-fimc.h"
25#include "exynos_drm_drv.h"
24#include "exynos_drm_ipp.h" 26#include "exynos_drm_ipp.h"
25#include "exynos_drm_fimc.h" 27#include "exynos_drm_fimc.h"
26 28
@@ -1343,10 +1345,8 @@ static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
1343 struct drm_exynos_ipp_prop_list *prop_list; 1345 struct drm_exynos_ipp_prop_list *prop_list;
1344 1346
1345 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL); 1347 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
1346 if (!prop_list) { 1348 if (!prop_list)
1347 DRM_ERROR("failed to alloc property list.\n");
1348 return -ENOMEM; 1349 return -ENOMEM;
1349 }
1350 1350
1351 prop_list->version = 1; 1351 prop_list->version = 1;
1352 prop_list->writeback = 1; 1352 prop_list->writeback = 1;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 1c263dac3c1c..868a14d52995 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -16,10 +16,12 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/of.h>
19#include <linux/of_device.h> 20#include <linux/of_device.h>
20#include <linux/pm_runtime.h> 21#include <linux/pm_runtime.h>
21 22
22#include <video/of_display_timing.h> 23#include <video/of_display_timing.h>
24#include <video/of_videomode.h>
23#include <video/samsung_fimd.h> 25#include <video/samsung_fimd.h>
24#include <drm/exynos_drm.h> 26#include <drm/exynos_drm.h>
25 27
@@ -35,6 +37,8 @@
35 * CPU Interface. 37 * CPU Interface.
36 */ 38 */
37 39
40#define FIMD_DEFAULT_FRAMERATE 60
41
38/* position control register for hardware window 0, 2 ~ 4.*/ 42/* position control register for hardware window 0, 2 ~ 4.*/
39#define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16) 43#define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16)
40#define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16) 44#define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16)
@@ -65,11 +69,13 @@ struct fimd_driver_data {
65 69
66 unsigned int has_shadowcon:1; 70 unsigned int has_shadowcon:1;
67 unsigned int has_clksel:1; 71 unsigned int has_clksel:1;
72 unsigned int has_limited_fmt:1;
68}; 73};
69 74
70static struct fimd_driver_data s3c64xx_fimd_driver_data = { 75static struct fimd_driver_data s3c64xx_fimd_driver_data = {
71 .timing_base = 0x0, 76 .timing_base = 0x0,
72 .has_clksel = 1, 77 .has_clksel = 1,
78 .has_limited_fmt = 1,
73}; 79};
74 80
75static struct fimd_driver_data exynos4_fimd_driver_data = { 81static struct fimd_driver_data exynos4_fimd_driver_data = {
@@ -90,6 +96,7 @@ struct fimd_win_data {
90 unsigned int fb_width; 96 unsigned int fb_width;
91 unsigned int fb_height; 97 unsigned int fb_height;
92 unsigned int bpp; 98 unsigned int bpp;
99 unsigned int pixel_format;
93 dma_addr_t dma_addr; 100 dma_addr_t dma_addr;
94 unsigned int buf_offsize; 101 unsigned int buf_offsize;
95 unsigned int line_size; /* bytes */ 102 unsigned int line_size; /* bytes */
@@ -115,11 +122,10 @@ struct fimd_context {
115 wait_queue_head_t wait_vsync_queue; 122 wait_queue_head_t wait_vsync_queue;
116 atomic_t wait_vsync_event; 123 atomic_t wait_vsync_event;
117 124
118 struct exynos_drm_panel_info *panel; 125 struct exynos_drm_panel_info panel;
119 struct fimd_driver_data *driver_data; 126 struct fimd_driver_data *driver_data;
120}; 127};
121 128
122#ifdef CONFIG_OF
123static const struct of_device_id fimd_driver_dt_match[] = { 129static const struct of_device_id fimd_driver_dt_match[] = {
124 { .compatible = "samsung,s3c6400-fimd", 130 { .compatible = "samsung,s3c6400-fimd",
125 .data = &s3c64xx_fimd_driver_data }, 131 .data = &s3c64xx_fimd_driver_data },
@@ -129,21 +135,14 @@ static const struct of_device_id fimd_driver_dt_match[] = {
129 .data = &exynos5_fimd_driver_data }, 135 .data = &exynos5_fimd_driver_data },
130 {}, 136 {},
131}; 137};
132#endif
133 138
134static inline struct fimd_driver_data *drm_fimd_get_driver_data( 139static inline struct fimd_driver_data *drm_fimd_get_driver_data(
135 struct platform_device *pdev) 140 struct platform_device *pdev)
136{ 141{
137#ifdef CONFIG_OF
138 const struct of_device_id *of_id = 142 const struct of_device_id *of_id =
139 of_match_device(fimd_driver_dt_match, &pdev->dev); 143 of_match_device(fimd_driver_dt_match, &pdev->dev);
140 144
141 if (of_id) 145 return (struct fimd_driver_data *)of_id->data;
142 return (struct fimd_driver_data *)of_id->data;
143#endif
144
145 return (struct fimd_driver_data *)
146 platform_get_device_id(pdev)->driver_data;
147} 146}
148 147
149static bool fimd_display_is_connected(struct device *dev) 148static bool fimd_display_is_connected(struct device *dev)
@@ -157,7 +156,7 @@ static void *fimd_get_panel(struct device *dev)
157{ 156{
158 struct fimd_context *ctx = get_fimd_context(dev); 157 struct fimd_context *ctx = get_fimd_context(dev);
159 158
160 return ctx->panel; 159 return &ctx->panel;
161} 160}
162 161
163static int fimd_check_mode(struct device *dev, struct drm_display_mode *mode) 162static int fimd_check_mode(struct device *dev, struct drm_display_mode *mode)
@@ -237,8 +236,8 @@ static void fimd_apply(struct device *subdrv_dev)
237static void fimd_commit(struct device *dev) 236static void fimd_commit(struct device *dev)
238{ 237{
239 struct fimd_context *ctx = get_fimd_context(dev); 238 struct fimd_context *ctx = get_fimd_context(dev);
240 struct exynos_drm_panel_info *panel = ctx->panel; 239 struct exynos_drm_panel_info *panel = &ctx->panel;
241 struct fb_videomode *timing = &panel->timing; 240 struct videomode *vm = &panel->vm;
242 struct fimd_driver_data *driver_data; 241 struct fimd_driver_data *driver_data;
243 u32 val; 242 u32 val;
244 243
@@ -250,22 +249,22 @@ static void fimd_commit(struct device *dev)
250 writel(ctx->vidcon1, ctx->regs + driver_data->timing_base + VIDCON1); 249 writel(ctx->vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
251 250
252 /* setup vertical timing values. */ 251 /* setup vertical timing values. */
253 val = VIDTCON0_VBPD(timing->upper_margin - 1) | 252 val = VIDTCON0_VBPD(vm->vback_porch - 1) |
254 VIDTCON0_VFPD(timing->lower_margin - 1) | 253 VIDTCON0_VFPD(vm->vfront_porch - 1) |
255 VIDTCON0_VSPW(timing->vsync_len - 1); 254 VIDTCON0_VSPW(vm->vsync_len - 1);
256 writel(val, ctx->regs + driver_data->timing_base + VIDTCON0); 255 writel(val, ctx->regs + driver_data->timing_base + VIDTCON0);
257 256
258 /* setup horizontal timing values. */ 257 /* setup horizontal timing values. */
259 val = VIDTCON1_HBPD(timing->left_margin - 1) | 258 val = VIDTCON1_HBPD(vm->hback_porch - 1) |
260 VIDTCON1_HFPD(timing->right_margin - 1) | 259 VIDTCON1_HFPD(vm->hfront_porch - 1) |
261 VIDTCON1_HSPW(timing->hsync_len - 1); 260 VIDTCON1_HSPW(vm->hsync_len - 1);
262 writel(val, ctx->regs + driver_data->timing_base + VIDTCON1); 261 writel(val, ctx->regs + driver_data->timing_base + VIDTCON1);
263 262
264 /* setup horizontal and vertical display size. */ 263 /* setup horizontal and vertical display size. */
265 val = VIDTCON2_LINEVAL(timing->yres - 1) | 264 val = VIDTCON2_LINEVAL(vm->vactive - 1) |
266 VIDTCON2_HOZVAL(timing->xres - 1) | 265 VIDTCON2_HOZVAL(vm->hactive - 1) |
267 VIDTCON2_LINEVAL_E(timing->yres - 1) | 266 VIDTCON2_LINEVAL_E(vm->vactive - 1) |
268 VIDTCON2_HOZVAL_E(timing->xres - 1); 267 VIDTCON2_HOZVAL_E(vm->hactive - 1);
269 writel(val, ctx->regs + driver_data->timing_base + VIDTCON2); 268 writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
270 269
271 /* setup clock source, clock divider, enable dma. */ 270 /* setup clock source, clock divider, enable dma. */
@@ -396,6 +395,7 @@ static void fimd_win_mode_set(struct device *dev,
396 win_data->fb_height = overlay->fb_height; 395 win_data->fb_height = overlay->fb_height;
397 win_data->dma_addr = overlay->dma_addr[0] + offset; 396 win_data->dma_addr = overlay->dma_addr[0] + offset;
398 win_data->bpp = overlay->bpp; 397 win_data->bpp = overlay->bpp;
398 win_data->pixel_format = overlay->pixel_format;
399 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * 399 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
400 (overlay->bpp >> 3); 400 (overlay->bpp >> 3);
401 win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3); 401 win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3);
@@ -417,39 +417,38 @@ static void fimd_win_set_pixfmt(struct device *dev, unsigned int win)
417 417
418 val = WINCONx_ENWIN; 418 val = WINCONx_ENWIN;
419 419
420 switch (win_data->bpp) { 420 /*
421 case 1: 421 * In case of s3c64xx, window 0 doesn't support alpha channel.
422 val |= WINCON0_BPPMODE_1BPP; 422 * So the request format is ARGB8888 then change it to XRGB8888.
423 val |= WINCONx_BITSWP; 423 */
424 val |= WINCONx_BURSTLEN_4WORD; 424 if (ctx->driver_data->has_limited_fmt && !win) {
425 break; 425 if (win_data->pixel_format == DRM_FORMAT_ARGB8888)
426 case 2: 426 win_data->pixel_format = DRM_FORMAT_XRGB8888;
427 val |= WINCON0_BPPMODE_2BPP; 427 }
428 val |= WINCONx_BITSWP; 428
429 val |= WINCONx_BURSTLEN_8WORD; 429 switch (win_data->pixel_format) {
430 break; 430 case DRM_FORMAT_C8:
431 case 4:
432 val |= WINCON0_BPPMODE_4BPP;
433 val |= WINCONx_BITSWP;
434 val |= WINCONx_BURSTLEN_8WORD;
435 break;
436 case 8:
437 val |= WINCON0_BPPMODE_8BPP_PALETTE; 431 val |= WINCON0_BPPMODE_8BPP_PALETTE;
438 val |= WINCONx_BURSTLEN_8WORD; 432 val |= WINCONx_BURSTLEN_8WORD;
439 val |= WINCONx_BYTSWP; 433 val |= WINCONx_BYTSWP;
440 break; 434 break;
441 case 16: 435 case DRM_FORMAT_XRGB1555:
436 val |= WINCON0_BPPMODE_16BPP_1555;
437 val |= WINCONx_HAWSWP;
438 val |= WINCONx_BURSTLEN_16WORD;
439 break;
440 case DRM_FORMAT_RGB565:
442 val |= WINCON0_BPPMODE_16BPP_565; 441 val |= WINCON0_BPPMODE_16BPP_565;
443 val |= WINCONx_HAWSWP; 442 val |= WINCONx_HAWSWP;
444 val |= WINCONx_BURSTLEN_16WORD; 443 val |= WINCONx_BURSTLEN_16WORD;
445 break; 444 break;
446 case 24: 445 case DRM_FORMAT_XRGB8888:
447 val |= WINCON0_BPPMODE_24BPP_888; 446 val |= WINCON0_BPPMODE_24BPP_888;
448 val |= WINCONx_WSWP; 447 val |= WINCONx_WSWP;
449 val |= WINCONx_BURSTLEN_16WORD; 448 val |= WINCONx_BURSTLEN_16WORD;
450 break; 449 break;
451 case 32: 450 case DRM_FORMAT_ARGB8888:
452 val |= WINCON1_BPPMODE_28BPP_A4888 451 val |= WINCON1_BPPMODE_25BPP_A1888
453 | WINCON1_BLD_PIX | WINCON1_ALPHA_SEL; 452 | WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
454 val |= WINCONx_WSWP; 453 val |= WINCONx_WSWP;
455 val |= WINCONx_BURSTLEN_16WORD; 454 val |= WINCONx_BURSTLEN_16WORD;
@@ -746,45 +745,54 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
746 drm_iommu_detach_device(drm_dev, dev); 745 drm_iommu_detach_device(drm_dev, dev);
747} 746}
748 747
749static int fimd_calc_clkdiv(struct fimd_context *ctx, 748static int fimd_configure_clocks(struct fimd_context *ctx, struct device *dev)
750 struct fb_videomode *timing)
751{ 749{
752 unsigned long clk = clk_get_rate(ctx->lcd_clk); 750 struct videomode *vm = &ctx->panel.vm;
753 u32 retrace; 751 unsigned long clk;
754 u32 clkdiv; 752
755 u32 best_framerate = 0; 753 ctx->bus_clk = devm_clk_get(dev, "fimd");
756 u32 framerate; 754 if (IS_ERR(ctx->bus_clk)) {
757 755 dev_err(dev, "failed to get bus clock\n");
758 retrace = timing->left_margin + timing->hsync_len + 756 return PTR_ERR(ctx->bus_clk);
759 timing->right_margin + timing->xres; 757 }
760 retrace *= timing->upper_margin + timing->vsync_len + 758
761 timing->lower_margin + timing->yres; 759 ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
762 760 if (IS_ERR(ctx->lcd_clk)) {
763 /* default framerate is 60Hz */ 761 dev_err(dev, "failed to get lcd clock\n");
764 if (!timing->refresh) 762 return PTR_ERR(ctx->lcd_clk);
765 timing->refresh = 60; 763 }
766 764
767 clk /= retrace; 765 clk = clk_get_rate(ctx->lcd_clk);
768 766 if (clk == 0) {
769 for (clkdiv = 1; clkdiv < 0x100; clkdiv++) { 767 dev_err(dev, "error getting sclk_fimd clock rate\n");
770 int tmp; 768 return -EINVAL;
771 769 }
772 /* get best framerate */ 770
773 framerate = clk / clkdiv; 771 if (vm->pixelclock == 0) {
774 tmp = timing->refresh - framerate; 772 unsigned long c;
775 if (tmp < 0) { 773 c = vm->hactive + vm->hback_porch + vm->hfront_porch +
776 best_framerate = framerate; 774 vm->hsync_len;
777 continue; 775 c *= vm->vactive + vm->vback_porch + vm->vfront_porch +
778 } else { 776 vm->vsync_len;
779 if (!best_framerate) 777 vm->pixelclock = c * FIMD_DEFAULT_FRAMERATE;
780 best_framerate = framerate; 778 if (vm->pixelclock == 0) {
781 else if (tmp < (best_framerate - framerate)) 779 dev_err(dev, "incorrect display timings\n");
782 best_framerate = framerate; 780 return -EINVAL;
783 break;
784 } 781 }
782 dev_warn(dev, "pixel clock recalculated to %luHz (%dHz frame rate)\n",
783 vm->pixelclock, FIMD_DEFAULT_FRAMERATE);
785 } 784 }
785 ctx->clkdiv = DIV_ROUND_UP(clk, vm->pixelclock);
786 if (ctx->clkdiv > 256) {
787 dev_warn(dev, "calculated pixel clock divider too high (%u), lowered to 256\n",
788 ctx->clkdiv);
789 ctx->clkdiv = 256;
790 }
791 vm->pixelclock = clk / ctx->clkdiv;
792 DRM_DEBUG_KMS("pixel clock = %lu, clkdiv = %d\n", vm->pixelclock,
793 ctx->clkdiv);
786 794
787 return clkdiv; 795 return 0;
788} 796}
789 797
790static void fimd_clear_win(struct fimd_context *ctx, int win) 798static void fimd_clear_win(struct fimd_context *ctx, int win)
@@ -876,59 +884,53 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
876 return 0; 884 return 0;
877} 885}
878 886
887static int fimd_get_platform_data(struct fimd_context *ctx, struct device *dev)
888{
889 struct videomode *vm;
890 int ret;
891
892 vm = &ctx->panel.vm;
893 ret = of_get_videomode(dev->of_node, vm, OF_USE_NATIVE_MODE);
894 if (ret) {
895 DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
896 return ret;
897 }
898
899 if (vm->flags & DISPLAY_FLAGS_VSYNC_LOW)
900 ctx->vidcon1 |= VIDCON1_INV_VSYNC;
901 if (vm->flags & DISPLAY_FLAGS_HSYNC_LOW)
902 ctx->vidcon1 |= VIDCON1_INV_HSYNC;
903 if (vm->flags & DISPLAY_FLAGS_DE_LOW)
904 ctx->vidcon1 |= VIDCON1_INV_VDEN;
905 if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
906 ctx->vidcon1 |= VIDCON1_INV_VCLK;
907
908 return 0;
909}
910
879static int fimd_probe(struct platform_device *pdev) 911static int fimd_probe(struct platform_device *pdev)
880{ 912{
881 struct device *dev = &pdev->dev; 913 struct device *dev = &pdev->dev;
882 struct fimd_context *ctx; 914 struct fimd_context *ctx;
883 struct exynos_drm_subdrv *subdrv; 915 struct exynos_drm_subdrv *subdrv;
884 struct exynos_drm_fimd_pdata *pdata;
885 struct exynos_drm_panel_info *panel;
886 struct resource *res; 916 struct resource *res;
887 int win; 917 int win;
888 int ret = -EINVAL; 918 int ret = -EINVAL;
889 919
890 if (dev->of_node) { 920 if (!dev->of_node)
891 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 921 return -ENODEV;
892 if (!pdata) {
893 DRM_ERROR("memory allocation for pdata failed\n");
894 return -ENOMEM;
895 }
896
897 ret = of_get_fb_videomode(dev->of_node, &pdata->panel.timing,
898 OF_USE_NATIVE_MODE);
899 if (ret) {
900 DRM_ERROR("failed: of_get_fb_videomode() : %d\n", ret);
901 return ret;
902 }
903 } else {
904 pdata = dev->platform_data;
905 if (!pdata) {
906 DRM_ERROR("no platform data specified\n");
907 return -EINVAL;
908 }
909 }
910
911 panel = &pdata->panel;
912 if (!panel) {
913 dev_err(dev, "panel is null.\n");
914 return -EINVAL;
915 }
916 922
917 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 923 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
918 if (!ctx) 924 if (!ctx)
919 return -ENOMEM; 925 return -ENOMEM;
920 926
921 ctx->bus_clk = devm_clk_get(dev, "fimd"); 927 ret = fimd_get_platform_data(ctx, dev);
922 if (IS_ERR(ctx->bus_clk)) { 928 if (ret)
923 dev_err(dev, "failed to get bus clock\n"); 929 return ret;
924 return PTR_ERR(ctx->bus_clk);
925 }
926 930
927 ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd"); 931 ret = fimd_configure_clocks(ctx, dev);
928 if (IS_ERR(ctx->lcd_clk)) { 932 if (ret)
929 dev_err(dev, "failed to get lcd clock\n"); 933 return ret;
930 return PTR_ERR(ctx->lcd_clk);
931 }
932 934
933 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 935 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
934 936
@@ -952,10 +954,6 @@ static int fimd_probe(struct platform_device *pdev)
952 } 954 }
953 955
954 ctx->driver_data = drm_fimd_get_driver_data(pdev); 956 ctx->driver_data = drm_fimd_get_driver_data(pdev);
955 ctx->vidcon0 = pdata->vidcon0;
956 ctx->vidcon1 = pdata->vidcon1;
957 ctx->default_win = pdata->default_win;
958 ctx->panel = panel;
959 DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue); 957 DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
960 atomic_set(&ctx->wait_vsync_event, 0); 958 atomic_set(&ctx->wait_vsync_event, 0);
961 959
@@ -973,12 +971,6 @@ static int fimd_probe(struct platform_device *pdev)
973 pm_runtime_enable(dev); 971 pm_runtime_enable(dev);
974 pm_runtime_get_sync(dev); 972 pm_runtime_get_sync(dev);
975 973
976 ctx->clkdiv = fimd_calc_clkdiv(ctx, &panel->timing);
977 panel->timing.pixclock = clk_get_rate(ctx->lcd_clk) / ctx->clkdiv;
978
979 DRM_DEBUG_KMS("pixel clock = %d, clkdiv = %d\n",
980 panel->timing.pixclock, ctx->clkdiv);
981
982 for (win = 0; win < WINDOWS_NR; win++) 974 for (win = 0; win < WINDOWS_NR; win++)
983 fimd_clear_win(ctx, win); 975 fimd_clear_win(ctx, win);
984 976
@@ -1067,20 +1059,6 @@ static int fimd_runtime_resume(struct device *dev)
1067} 1059}
1068#endif 1060#endif
1069 1061
1070static struct platform_device_id fimd_driver_ids[] = {
1071 {
1072 .name = "s3c64xx-fb",
1073 .driver_data = (unsigned long)&s3c64xx_fimd_driver_data,
1074 }, {
1075 .name = "exynos4-fb",
1076 .driver_data = (unsigned long)&exynos4_fimd_driver_data,
1077 }, {
1078 .name = "exynos5-fb",
1079 .driver_data = (unsigned long)&exynos5_fimd_driver_data,
1080 },
1081 {},
1082};
1083
1084static const struct dev_pm_ops fimd_pm_ops = { 1062static const struct dev_pm_ops fimd_pm_ops = {
1085 SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume) 1063 SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
1086 SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL) 1064 SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL)
@@ -1089,11 +1067,10 @@ static const struct dev_pm_ops fimd_pm_ops = {
1089struct platform_driver fimd_driver = { 1067struct platform_driver fimd_driver = {
1090 .probe = fimd_probe, 1068 .probe = fimd_probe,
1091 .remove = fimd_remove, 1069 .remove = fimd_remove,
1092 .id_table = fimd_driver_ids,
1093 .driver = { 1070 .driver = {
1094 .name = "exynos4-fb", 1071 .name = "exynos4-fb",
1095 .owner = THIS_MODULE, 1072 .owner = THIS_MODULE,
1096 .pm = &fimd_pm_ops, 1073 .pm = &fimd_pm_ops,
1097 .of_match_table = of_match_ptr(fimd_driver_dt_match), 1074 .of_match_table = fimd_driver_dt_match,
1098 }, 1075 },
1099}; 1076};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index eddea4941483..3271fd4b1724 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -23,6 +23,7 @@
23#include <drm/drmP.h> 23#include <drm/drmP.h>
24#include <drm/exynos_drm.h> 24#include <drm/exynos_drm.h>
25#include "exynos_drm_drv.h" 25#include "exynos_drm_drv.h"
26#include "exynos_drm_g2d.h"
26#include "exynos_drm_gem.h" 27#include "exynos_drm_gem.h"
27#include "exynos_drm_iommu.h" 28#include "exynos_drm_iommu.h"
28 29
@@ -446,10 +447,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
446 } 447 }
447 448
448 g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL); 449 g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
449 if (!g2d_userptr) { 450 if (!g2d_userptr)
450 DRM_ERROR("failed to allocate g2d_userptr.\n");
451 return ERR_PTR(-ENOMEM); 451 return ERR_PTR(-ENOMEM);
452 }
453 452
454 atomic_set(&g2d_userptr->refcount, 1); 453 atomic_set(&g2d_userptr->refcount, 1);
455 454
@@ -499,7 +498,6 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
499 498
500 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 499 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
501 if (!sgt) { 500 if (!sgt) {
502 DRM_ERROR("failed to allocate sg table.\n");
503 ret = -ENOMEM; 501 ret = -ENOMEM;
504 goto err_free_userptr; 502 goto err_free_userptr;
505 } 503 }
@@ -808,17 +806,8 @@ static void g2d_dma_start(struct g2d_data *g2d,
808 int ret; 806 int ret;
809 807
810 ret = pm_runtime_get_sync(g2d->dev); 808 ret = pm_runtime_get_sync(g2d->dev);
811 if (ret < 0) { 809 if (ret < 0)
812 dev_warn(g2d->dev, "failed pm power on.\n");
813 return;
814 }
815
816 ret = clk_prepare_enable(g2d->gate_clk);
817 if (ret < 0) {
818 dev_warn(g2d->dev, "failed to enable clock.\n");
819 pm_runtime_put_sync(g2d->dev);
820 return; 810 return;
821 }
822 811
823 writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); 812 writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
824 writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); 813 writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
@@ -871,7 +860,6 @@ static void g2d_runqueue_worker(struct work_struct *work)
871 runqueue_work); 860 runqueue_work);
872 861
873 mutex_lock(&g2d->runqueue_mutex); 862 mutex_lock(&g2d->runqueue_mutex);
874 clk_disable_unprepare(g2d->gate_clk);
875 pm_runtime_put_sync(g2d->dev); 863 pm_runtime_put_sync(g2d->dev);
876 864
877 complete(&g2d->runqueue_node->complete); 865 complete(&g2d->runqueue_node->complete);
@@ -1096,8 +1084,6 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
1096 1084
1097 e = kzalloc(sizeof(*node->event), GFP_KERNEL); 1085 e = kzalloc(sizeof(*node->event), GFP_KERNEL);
1098 if (!e) { 1086 if (!e) {
1099 dev_err(dev, "failed to allocate event\n");
1100
1101 spin_lock_irqsave(&drm_dev->event_lock, flags); 1087 spin_lock_irqsave(&drm_dev->event_lock, flags);
1102 file->event_space += sizeof(e->event); 1088 file->event_space += sizeof(e->event);
1103 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 1089 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -1327,10 +1313,8 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
1327 struct exynos_drm_g2d_private *g2d_priv; 1313 struct exynos_drm_g2d_private *g2d_priv;
1328 1314
1329 g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL); 1315 g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
1330 if (!g2d_priv) { 1316 if (!g2d_priv)
1331 dev_err(dev, "failed to allocate g2d private data\n");
1332 return -ENOMEM; 1317 return -ENOMEM;
1333 }
1334 1318
1335 g2d_priv->dev = dev; 1319 g2d_priv->dev = dev;
1336 file_priv->g2d_priv = g2d_priv; 1320 file_priv->g2d_priv = g2d_priv;
@@ -1386,10 +1370,8 @@ static int g2d_probe(struct platform_device *pdev)
1386 int ret; 1370 int ret;
1387 1371
1388 g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL); 1372 g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL);
1389 if (!g2d) { 1373 if (!g2d)
1390 dev_err(dev, "failed to allocate driver data\n");
1391 return -ENOMEM; 1374 return -ENOMEM;
1392 }
1393 1375
1394 g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab", 1376 g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
1395 sizeof(struct g2d_runqueue_node), 0, 0, NULL); 1377 sizeof(struct g2d_runqueue_node), 0, 0, NULL);
@@ -1524,14 +1506,38 @@ static int g2d_resume(struct device *dev)
1524} 1506}
1525#endif 1507#endif
1526 1508
1527static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume); 1509#ifdef CONFIG_PM_RUNTIME
1510static int g2d_runtime_suspend(struct device *dev)
1511{
1512 struct g2d_data *g2d = dev_get_drvdata(dev);
1513
1514 clk_disable_unprepare(g2d->gate_clk);
1515
1516 return 0;
1517}
1518
1519static int g2d_runtime_resume(struct device *dev)
1520{
1521 struct g2d_data *g2d = dev_get_drvdata(dev);
1522 int ret;
1523
1524 ret = clk_prepare_enable(g2d->gate_clk);
1525 if (ret < 0)
1526 dev_warn(dev, "failed to enable clock.\n");
1527
1528 return ret;
1529}
1530#endif
1531
1532static const struct dev_pm_ops g2d_pm_ops = {
1533 SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
1534 SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
1535};
1528 1536
1529#ifdef CONFIG_OF
1530static const struct of_device_id exynos_g2d_match[] = { 1537static const struct of_device_id exynos_g2d_match[] = {
1531 { .compatible = "samsung,exynos5250-g2d" }, 1538 { .compatible = "samsung,exynos5250-g2d" },
1532 {}, 1539 {},
1533}; 1540};
1534#endif
1535 1541
1536struct platform_driver g2d_driver = { 1542struct platform_driver g2d_driver = {
1537 .probe = g2d_probe, 1543 .probe = g2d_probe,
@@ -1540,6 +1546,6 @@ struct platform_driver g2d_driver = {
1540 .name = "s5p-g2d", 1546 .name = "s5p-g2d",
1541 .owner = THIS_MODULE, 1547 .owner = THIS_MODULE,
1542 .pm = &g2d_pm_ops, 1548 .pm = &g2d_pm_ops,
1543 .of_match_table = of_match_ptr(exynos_g2d_match), 1549 .of_match_table = exynos_g2d_match,
1544 }, 1550 },
1545}; 1551};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index f3c6f40666e1..49f9cd232757 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -18,6 +18,7 @@
18#include "exynos_drm_drv.h" 18#include "exynos_drm_drv.h"
19#include "exynos_drm_gem.h" 19#include "exynos_drm_gem.h"
20#include "exynos_drm_buf.h" 20#include "exynos_drm_buf.h"
21#include "exynos_drm_iommu.h"
21 22
22static unsigned int convert_to_vm_err_msg(int msg) 23static unsigned int convert_to_vm_err_msg(int msg)
23{ 24{
@@ -191,10 +192,8 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
191 int ret; 192 int ret;
192 193
193 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL); 194 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
194 if (!exynos_gem_obj) { 195 if (!exynos_gem_obj)
195 DRM_ERROR("failed to allocate exynos gem object\n");
196 return NULL; 196 return NULL;
197 }
198 197
199 exynos_gem_obj->size = size; 198 exynos_gem_obj->size = size;
200 obj = &exynos_gem_obj->base; 199 obj = &exynos_gem_obj->base;
@@ -668,6 +667,18 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
668 667
669 exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG | 668 exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG |
670 EXYNOS_BO_WC, args->size); 669 EXYNOS_BO_WC, args->size);
670 /*
671 * If physically contiguous memory allocation fails and if IOMMU is
672 * supported then try to get buffer from non physically contiguous
673 * memory area.
674 */
675 if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
676 dev_warn(dev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
677 exynos_gem_obj = exynos_drm_gem_create(dev,
678 EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
679 args->size);
680 }
681
671 if (IS_ERR(exynos_gem_obj)) 682 if (IS_ERR(exynos_gem_obj))
672 return PTR_ERR(exynos_gem_obj); 683 return PTR_ERR(exynos_gem_obj);
673 684
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 90b8a1a5344c..cd6aebd53bd0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -20,6 +20,7 @@
20#include <drm/drmP.h> 20#include <drm/drmP.h>
21#include <drm/exynos_drm.h> 21#include <drm/exynos_drm.h>
22#include "regs-gsc.h" 22#include "regs-gsc.h"
23#include "exynos_drm_drv.h"
23#include "exynos_drm_ipp.h" 24#include "exynos_drm_ipp.h"
24#include "exynos_drm_gsc.h" 25#include "exynos_drm_gsc.h"
25 26
@@ -1337,10 +1338,8 @@ static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
1337 struct drm_exynos_ipp_prop_list *prop_list; 1338 struct drm_exynos_ipp_prop_list *prop_list;
1338 1339
1339 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL); 1340 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
1340 if (!prop_list) { 1341 if (!prop_list)
1341 DRM_ERROR("failed to alloc property list.\n");
1342 return -ENOMEM; 1342 return -ENOMEM;
1343 }
1344 1343
1345 prop_list->version = 1; 1344 prop_list->version = 1;
1346 prop_list->writeback = 1; 1345 prop_list->writeback = 1;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 8d3bc01d6834..8548b974bd59 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -403,10 +403,8 @@ static int exynos_drm_hdmi_probe(struct platform_device *pdev)
403 struct drm_hdmi_context *ctx; 403 struct drm_hdmi_context *ctx;
404 404
405 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 405 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
406 if (!ctx) { 406 if (!ctx)
407 DRM_LOG_KMS("failed to alloc common hdmi context.\n");
408 return -ENOMEM; 407 return -ENOMEM;
409 }
410 408
411 subdrv = &ctx->subdrv; 409 subdrv = &ctx->subdrv;
412 410
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index 3799d5c2b5df..fb8db0378274 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -47,10 +47,16 @@ int drm_create_iommu_mapping(struct drm_device *drm_dev)
47 47
48 dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), 48 dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
49 GFP_KERNEL); 49 GFP_KERNEL);
50 if (!dev->dma_parms)
51 goto error;
52
50 dma_set_max_seg_size(dev, 0xffffffffu); 53 dma_set_max_seg_size(dev, 0xffffffffu);
51 dev->archdata.mapping = mapping; 54 dev->archdata.mapping = mapping;
52 55
53 return 0; 56 return 0;
57error:
58 arm_iommu_release_mapping(mapping);
59 return -ENOMEM;
54} 60}
55 61
56/* 62/*
@@ -91,6 +97,9 @@ int drm_iommu_attach_device(struct drm_device *drm_dev,
91 subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev, 97 subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
92 sizeof(*subdrv_dev->dma_parms), 98 sizeof(*subdrv_dev->dma_parms),
93 GFP_KERNEL); 99 GFP_KERNEL);
100 if (!subdrv_dev->dma_parms)
101 return -ENOMEM;
102
94 dma_set_max_seg_size(subdrv_dev, 0xffffffffu); 103 dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
95 104
96 ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping); 105 ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index d2b6ab4def93..824e0705c8d3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -408,10 +408,8 @@ static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
408 struct drm_exynos_ipp_cmd_work *cmd_work; 408 struct drm_exynos_ipp_cmd_work *cmd_work;
409 409
410 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL); 410 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
411 if (!cmd_work) { 411 if (!cmd_work)
412 DRM_ERROR("failed to alloc cmd_work.\n");
413 return ERR_PTR(-ENOMEM); 412 return ERR_PTR(-ENOMEM);
414 }
415 413
416 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd); 414 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
417 415
@@ -423,10 +421,8 @@ static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
423 struct drm_exynos_ipp_event_work *event_work; 421 struct drm_exynos_ipp_event_work *event_work;
424 422
425 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL); 423 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
426 if (!event_work) { 424 if (!event_work)
427 DRM_ERROR("failed to alloc event_work.\n");
428 return ERR_PTR(-ENOMEM); 425 return ERR_PTR(-ENOMEM);
429 }
430 426
431 INIT_WORK((struct work_struct *)event_work, ipp_sched_event); 427 INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
432 428
@@ -482,10 +478,8 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
482 478
483 /* allocate command node */ 479 /* allocate command node */
484 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL); 480 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
485 if (!c_node) { 481 if (!c_node)
486 DRM_ERROR("failed to allocate map node.\n");
487 return -ENOMEM; 482 return -ENOMEM;
488 }
489 483
490 /* create property id */ 484 /* create property id */
491 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node, 485 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
@@ -694,10 +688,8 @@ static struct drm_exynos_ipp_mem_node
694 mutex_lock(&c_node->mem_lock); 688 mutex_lock(&c_node->mem_lock);
695 689
696 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL); 690 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
697 if (!m_node) { 691 if (!m_node)
698 DRM_ERROR("failed to allocate queue node.\n");
699 goto err_unlock; 692 goto err_unlock;
700 }
701 693
702 /* clear base address for error handling */ 694 /* clear base address for error handling */
703 memset(&buf_info, 0x0, sizeof(buf_info)); 695 memset(&buf_info, 0x0, sizeof(buf_info));
@@ -798,9 +790,7 @@ static int ipp_get_event(struct drm_device *drm_dev,
798 DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id); 790 DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
799 791
800 e = kzalloc(sizeof(*e), GFP_KERNEL); 792 e = kzalloc(sizeof(*e), GFP_KERNEL);
801
802 if (!e) { 793 if (!e) {
803 DRM_ERROR("failed to allocate event.\n");
804 spin_lock_irqsave(&drm_dev->event_lock, flags); 794 spin_lock_irqsave(&drm_dev->event_lock, flags);
805 file->event_space += sizeof(e->event); 795 file->event_space += sizeof(e->event);
806 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 796 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -1780,10 +1770,8 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1780 struct exynos_drm_ipp_private *priv; 1770 struct exynos_drm_ipp_private *priv;
1781 1771
1782 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 1772 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1783 if (!priv) { 1773 if (!priv)
1784 DRM_ERROR("failed to allocate priv.\n");
1785 return -ENOMEM; 1774 return -ENOMEM;
1786 }
1787 priv->dev = dev; 1775 priv->dev = dev;
1788 file_priv->ipp_priv = priv; 1776 file_priv->ipp_priv = priv;
1789 1777
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 6ee55e68e0a2..fcb0652e77d0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -16,6 +16,7 @@
16#include "exynos_drm_encoder.h" 16#include "exynos_drm_encoder.h"
17#include "exynos_drm_fb.h" 17#include "exynos_drm_fb.h"
18#include "exynos_drm_gem.h" 18#include "exynos_drm_gem.h"
19#include "exynos_drm_plane.h"
19 20
20#define to_exynos_plane(x) container_of(x, struct exynos_plane, base) 21#define to_exynos_plane(x) container_of(x, struct exynos_plane, base)
21 22
@@ -264,10 +265,8 @@ struct drm_plane *exynos_plane_init(struct drm_device *dev,
264 int err; 265 int err;
265 266
266 exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL); 267 exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
267 if (!exynos_plane) { 268 if (!exynos_plane)
268 DRM_ERROR("failed to allocate plane\n");
269 return NULL; 269 return NULL;
270 }
271 270
272 err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs, 271 err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
273 &exynos_plane_funcs, formats, ARRAY_SIZE(formats), 272 &exynos_plane_funcs, formats, ARRAY_SIZE(formats),
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 49669aa24c45..7b901688defa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -21,6 +21,7 @@
21#include <drm/exynos_drm.h> 21#include <drm/exynos_drm.h>
22#include "regs-rotator.h" 22#include "regs-rotator.h"
23#include "exynos_drm.h" 23#include "exynos_drm.h"
24#include "exynos_drm_drv.h"
24#include "exynos_drm_ipp.h" 25#include "exynos_drm_ipp.h"
25 26
26/* 27/*
@@ -471,10 +472,8 @@ static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
471 struct drm_exynos_ipp_prop_list *prop_list; 472 struct drm_exynos_ipp_prop_list *prop_list;
472 473
473 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL); 474 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
474 if (!prop_list) { 475 if (!prop_list)
475 DRM_ERROR("failed to alloc property list.\n");
476 return -ENOMEM; 476 return -ENOMEM;
477 }
478 477
479 prop_list->version = 1; 478 prop_list->version = 1;
480 prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) | 479 prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
@@ -631,21 +630,96 @@ static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
631 return 0; 630 return 0;
632} 631}
633 632
633static struct rot_limit_table rot_limit_tbl_4210 = {
634 .ycbcr420_2p = {
635 .min_w = 32,
636 .min_h = 32,
637 .max_w = SZ_64K,
638 .max_h = SZ_64K,
639 .align = 3,
640 },
641 .rgb888 = {
642 .min_w = 8,
643 .min_h = 8,
644 .max_w = SZ_16K,
645 .max_h = SZ_16K,
646 .align = 2,
647 },
648};
649
650static struct rot_limit_table rot_limit_tbl_4x12 = {
651 .ycbcr420_2p = {
652 .min_w = 32,
653 .min_h = 32,
654 .max_w = SZ_32K,
655 .max_h = SZ_32K,
656 .align = 3,
657 },
658 .rgb888 = {
659 .min_w = 8,
660 .min_h = 8,
661 .max_w = SZ_8K,
662 .max_h = SZ_8K,
663 .align = 2,
664 },
665};
666
667static struct rot_limit_table rot_limit_tbl_5250 = {
668 .ycbcr420_2p = {
669 .min_w = 32,
670 .min_h = 32,
671 .max_w = SZ_32K,
672 .max_h = SZ_32K,
673 .align = 3,
674 },
675 .rgb888 = {
676 .min_w = 8,
677 .min_h = 8,
678 .max_w = SZ_8K,
679 .max_h = SZ_8K,
680 .align = 1,
681 },
682};
683
684static const struct of_device_id exynos_rotator_match[] = {
685 {
686 .compatible = "samsung,exynos4210-rotator",
687 .data = &rot_limit_tbl_4210,
688 },
689 {
690 .compatible = "samsung,exynos4212-rotator",
691 .data = &rot_limit_tbl_4x12,
692 },
693 {
694 .compatible = "samsung,exynos5250-rotator",
695 .data = &rot_limit_tbl_5250,
696 },
697 {},
698};
699
634static int rotator_probe(struct platform_device *pdev) 700static int rotator_probe(struct platform_device *pdev)
635{ 701{
636 struct device *dev = &pdev->dev; 702 struct device *dev = &pdev->dev;
637 struct rot_context *rot; 703 struct rot_context *rot;
638 struct exynos_drm_ippdrv *ippdrv; 704 struct exynos_drm_ippdrv *ippdrv;
705 const struct of_device_id *match;
639 int ret; 706 int ret;
640 707
708 if (!dev->of_node) {
709 dev_err(dev, "cannot find of_node.\n");
710 return -ENODEV;
711 }
712
641 rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL); 713 rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
642 if (!rot) { 714 if (!rot)
643 dev_err(dev, "failed to allocate rot\n");
644 return -ENOMEM; 715 return -ENOMEM;
645 }
646 716
647 rot->limit_tbl = (struct rot_limit_table *) 717 match = of_match_node(exynos_rotator_match, dev->of_node);
648 platform_get_device_id(pdev)->driver_data; 718 if (!match) {
719 dev_err(dev, "failed to match node\n");
720 return -ENODEV;
721 }
722 rot->limit_tbl = (struct rot_limit_table *)match->data;
649 723
650 rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 724 rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
651 rot->regs = devm_ioremap_resource(dev, rot->regs_res); 725 rot->regs = devm_ioremap_resource(dev, rot->regs_res);
@@ -717,31 +791,6 @@ static int rotator_remove(struct platform_device *pdev)
717 return 0; 791 return 0;
718} 792}
719 793
720static struct rot_limit_table rot_limit_tbl = {
721 .ycbcr420_2p = {
722 .min_w = 32,
723 .min_h = 32,
724 .max_w = SZ_32K,
725 .max_h = SZ_32K,
726 .align = 3,
727 },
728 .rgb888 = {
729 .min_w = 8,
730 .min_h = 8,
731 .max_w = SZ_8K,
732 .max_h = SZ_8K,
733 .align = 2,
734 },
735};
736
737static struct platform_device_id rotator_driver_ids[] = {
738 {
739 .name = "exynos-rot",
740 .driver_data = (unsigned long)&rot_limit_tbl,
741 },
742 {},
743};
744
745static int rotator_clk_crtl(struct rot_context *rot, bool enable) 794static int rotator_clk_crtl(struct rot_context *rot, bool enable)
746{ 795{
747 if (enable) { 796 if (enable) {
@@ -803,10 +852,10 @@ static const struct dev_pm_ops rotator_pm_ops = {
803struct platform_driver rotator_driver = { 852struct platform_driver rotator_driver = {
804 .probe = rotator_probe, 853 .probe = rotator_probe,
805 .remove = rotator_remove, 854 .remove = rotator_remove,
806 .id_table = rotator_driver_ids,
807 .driver = { 855 .driver = {
808 .name = "exynos-rot", 856 .name = "exynos-rot",
809 .owner = THIS_MODULE, 857 .owner = THIS_MODULE,
810 .pm = &rotator_pm_ops, 858 .pm = &rotator_pm_ops,
859 .of_match_table = exynos_rotator_match,
811 }, 860 },
812}; 861};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index c57c56519add..4400330e4449 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -23,6 +23,7 @@
23#include "exynos_drm_drv.h" 23#include "exynos_drm_drv.h"
24#include "exynos_drm_crtc.h" 24#include "exynos_drm_crtc.h"
25#include "exynos_drm_encoder.h" 25#include "exynos_drm_encoder.h"
26#include "exynos_drm_vidi.h"
26 27
27/* vidi has totally three virtual windows. */ 28/* vidi has totally three virtual windows. */
28#define WINDOWS_NR 3 29#define WINDOWS_NR 3
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 2f5c6942c968..a0e10aeb0e67 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -32,6 +32,7 @@
32#include <linux/clk.h> 32#include <linux/clk.h>
33#include <linux/regulator/consumer.h> 33#include <linux/regulator/consumer.h>
34#include <linux/io.h> 34#include <linux/io.h>
35#include <linux/of.h>
35#include <linux/of_gpio.h> 36#include <linux/of_gpio.h>
36 37
37#include <drm/exynos_drm.h> 38#include <drm/exynos_drm.h>
@@ -1824,10 +1825,8 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
1824 1825
1825 res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) * 1826 res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
1826 sizeof(res->regul_bulk[0]), GFP_KERNEL); 1827 sizeof(res->regul_bulk[0]), GFP_KERNEL);
1827 if (!res->regul_bulk) { 1828 if (!res->regul_bulk)
1828 DRM_ERROR("failed to get memory for regulators\n");
1829 goto fail; 1829 goto fail;
1830 }
1831 for (i = 0; i < ARRAY_SIZE(supply); ++i) { 1830 for (i = 0; i < ARRAY_SIZE(supply); ++i) {
1832 res->regul_bulk[i].supply = supply[i]; 1831 res->regul_bulk[i].supply = supply[i];
1833 res->regul_bulk[i].consumer = NULL; 1832 res->regul_bulk[i].consumer = NULL;
@@ -1859,7 +1858,6 @@ void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy)
1859 hdmi_hdmiphy = hdmiphy; 1858 hdmi_hdmiphy = hdmiphy;
1860} 1859}
1861 1860
1862#ifdef CONFIG_OF
1863static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata 1861static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
1864 (struct device *dev) 1862 (struct device *dev)
1865{ 1863{
@@ -1868,10 +1866,8 @@ static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
1868 u32 value; 1866 u32 value;
1869 1867
1870 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); 1868 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1871 if (!pd) { 1869 if (!pd)
1872 DRM_ERROR("memory allocation for pdata failed\n");
1873 goto err_data; 1870 goto err_data;
1874 }
1875 1871
1876 if (!of_find_property(np, "hpd-gpio", &value)) { 1872 if (!of_find_property(np, "hpd-gpio", &value)) {
1877 DRM_ERROR("no hpd gpio property found\n"); 1873 DRM_ERROR("no hpd gpio property found\n");
@@ -1885,33 +1881,7 @@ static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
1885err_data: 1881err_data:
1886 return NULL; 1882 return NULL;
1887} 1883}
1888#else
1889static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
1890 (struct device *dev)
1891{
1892 return NULL;
1893}
1894#endif
1895
1896static struct platform_device_id hdmi_driver_types[] = {
1897 {
1898 .name = "s5pv210-hdmi",
1899 .driver_data = HDMI_TYPE13,
1900 }, {
1901 .name = "exynos4-hdmi",
1902 .driver_data = HDMI_TYPE13,
1903 }, {
1904 .name = "exynos4-hdmi14",
1905 .driver_data = HDMI_TYPE14,
1906 }, {
1907 .name = "exynos5-hdmi",
1908 .driver_data = HDMI_TYPE14,
1909 }, {
1910 /* end node */
1911 }
1912};
1913 1884
1914#ifdef CONFIG_OF
1915static struct of_device_id hdmi_match_types[] = { 1885static struct of_device_id hdmi_match_types[] = {
1916 { 1886 {
1917 .compatible = "samsung,exynos5-hdmi", 1887 .compatible = "samsung,exynos5-hdmi",
@@ -1923,7 +1893,6 @@ static struct of_device_id hdmi_match_types[] = {
1923 /* end node */ 1893 /* end node */
1924 } 1894 }
1925}; 1895};
1926#endif
1927 1896
1928static int hdmi_probe(struct platform_device *pdev) 1897static int hdmi_probe(struct platform_device *pdev)
1929{ 1898{
@@ -1932,36 +1901,23 @@ static int hdmi_probe(struct platform_device *pdev)
1932 struct hdmi_context *hdata; 1901 struct hdmi_context *hdata;
1933 struct s5p_hdmi_platform_data *pdata; 1902 struct s5p_hdmi_platform_data *pdata;
1934 struct resource *res; 1903 struct resource *res;
1904 const struct of_device_id *match;
1935 int ret; 1905 int ret;
1936 1906
1937 if (dev->of_node) { 1907 if (!dev->of_node)
1938 pdata = drm_hdmi_dt_parse_pdata(dev); 1908 return -ENODEV;
1939 if (IS_ERR(pdata)) {
1940 DRM_ERROR("failed to parse dt\n");
1941 return PTR_ERR(pdata);
1942 }
1943 } else {
1944 pdata = dev->platform_data;
1945 }
1946 1909
1947 if (!pdata) { 1910 pdata = drm_hdmi_dt_parse_pdata(dev);
1948 DRM_ERROR("no platform data specified\n"); 1911 if (!pdata)
1949 return -EINVAL; 1912 return -EINVAL;
1950 }
1951 1913
1952 drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), 1914 drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), GFP_KERNEL);
1953 GFP_KERNEL); 1915 if (!drm_hdmi_ctx)
1954 if (!drm_hdmi_ctx) {
1955 DRM_ERROR("failed to allocate common hdmi context.\n");
1956 return -ENOMEM; 1916 return -ENOMEM;
1957 }
1958 1917
1959 hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), 1918 hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
1960 GFP_KERNEL); 1919 if (!hdata)
1961 if (!hdata) {
1962 DRM_ERROR("out of memory\n");
1963 return -ENOMEM; 1920 return -ENOMEM;
1964 }
1965 1921
1966 mutex_init(&hdata->hdmi_mutex); 1922 mutex_init(&hdata->hdmi_mutex);
1967 1923
@@ -1970,23 +1926,15 @@ static int hdmi_probe(struct platform_device *pdev)
1970 1926
1971 platform_set_drvdata(pdev, drm_hdmi_ctx); 1927 platform_set_drvdata(pdev, drm_hdmi_ctx);
1972 1928
1973 if (dev->of_node) { 1929 match = of_match_node(hdmi_match_types, dev->of_node);
1974 const struct of_device_id *match; 1930 if (!match)
1975 match = of_match_node(of_match_ptr(hdmi_match_types), 1931 return -ENODEV;
1976 dev->of_node); 1932 hdata->type = (enum hdmi_type)match->data;
1977 if (match == NULL)
1978 return -ENODEV;
1979 hdata->type = (enum hdmi_type)match->data;
1980 } else {
1981 hdata->type = (enum hdmi_type)platform_get_device_id
1982 (pdev)->driver_data;
1983 }
1984 1933
1985 hdata->hpd_gpio = pdata->hpd_gpio; 1934 hdata->hpd_gpio = pdata->hpd_gpio;
1986 hdata->dev = dev; 1935 hdata->dev = dev;
1987 1936
1988 ret = hdmi_resources_init(hdata); 1937 ret = hdmi_resources_init(hdata);
1989
1990 if (ret) { 1938 if (ret) {
1991 DRM_ERROR("hdmi_resources_init failed\n"); 1939 DRM_ERROR("hdmi_resources_init failed\n");
1992 return -EINVAL; 1940 return -EINVAL;
@@ -2141,11 +2089,10 @@ static const struct dev_pm_ops hdmi_pm_ops = {
2141struct platform_driver hdmi_driver = { 2089struct platform_driver hdmi_driver = {
2142 .probe = hdmi_probe, 2090 .probe = hdmi_probe,
2143 .remove = hdmi_remove, 2091 .remove = hdmi_remove,
2144 .id_table = hdmi_driver_types,
2145 .driver = { 2092 .driver = {
2146 .name = "exynos-hdmi", 2093 .name = "exynos-hdmi",
2147 .owner = THIS_MODULE, 2094 .owner = THIS_MODULE,
2148 .pm = &hdmi_pm_ops, 2095 .pm = &hdmi_pm_ops,
2149 .of_match_table = of_match_ptr(hdmi_match_types), 2096 .of_match_table = hdmi_match_types,
2150 }, 2097 },
2151}; 2098};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index 6e320ae9afed..59abb1494ceb 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -15,6 +15,7 @@
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/i2c.h> 17#include <linux/i2c.h>
18#include <linux/of.h>
18 19
19#include "exynos_drm_drv.h" 20#include "exynos_drm_drv.h"
20#include "exynos_hdmi.h" 21#include "exynos_hdmi.h"
@@ -39,13 +40,6 @@ static int hdmiphy_remove(struct i2c_client *client)
39 return 0; 40 return 0;
40} 41}
41 42
42static const struct i2c_device_id hdmiphy_id[] = {
43 { "s5p_hdmiphy", 0 },
44 { "exynos5-hdmiphy", 0 },
45 { },
46};
47
48#ifdef CONFIG_OF
49static struct of_device_id hdmiphy_match_types[] = { 43static struct of_device_id hdmiphy_match_types[] = {
50 { 44 {
51 .compatible = "samsung,exynos5-hdmiphy", 45 .compatible = "samsung,exynos5-hdmiphy",
@@ -57,15 +51,13 @@ static struct of_device_id hdmiphy_match_types[] = {
57 /* end node */ 51 /* end node */
58 } 52 }
59}; 53};
60#endif
61 54
62struct i2c_driver hdmiphy_driver = { 55struct i2c_driver hdmiphy_driver = {
63 .driver = { 56 .driver = {
64 .name = "exynos-hdmiphy", 57 .name = "exynos-hdmiphy",
65 .owner = THIS_MODULE, 58 .owner = THIS_MODULE,
66 .of_match_table = of_match_ptr(hdmiphy_match_types), 59 .of_match_table = hdmiphy_match_types,
67 }, 60 },
68 .id_table = hdmiphy_id,
69 .probe = hdmiphy_probe, 61 .probe = hdmiphy_probe,
70 .remove = hdmiphy_remove, 62 .remove = hdmiphy_remove,
71 .command = NULL, 63 .command = NULL,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index c9a137caea41..63bc5f92fbb3 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -30,6 +30,7 @@
30#include <linux/pm_runtime.h> 30#include <linux/pm_runtime.h>
31#include <linux/clk.h> 31#include <linux/clk.h>
32#include <linux/regulator/consumer.h> 32#include <linux/regulator/consumer.h>
33#include <linux/of.h>
33 34
34#include <drm/exynos_drm.h> 35#include <drm/exynos_drm.h>
35 36
@@ -1185,16 +1186,12 @@ static int mixer_probe(struct platform_device *pdev)
1185 1186
1186 drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), 1187 drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx),
1187 GFP_KERNEL); 1188 GFP_KERNEL);
1188 if (!drm_hdmi_ctx) { 1189 if (!drm_hdmi_ctx)
1189 DRM_ERROR("failed to allocate common hdmi context.\n");
1190 return -ENOMEM; 1190 return -ENOMEM;
1191 }
1192 1191
1193 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 1192 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1194 if (!ctx) { 1193 if (!ctx)
1195 DRM_ERROR("failed to alloc mixer context.\n");
1196 return -ENOMEM; 1194 return -ENOMEM;
1197 }
1198 1195
1199 mutex_init(&ctx->mixer_mutex); 1196 mutex_init(&ctx->mixer_mutex);
1200 1197
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index c2bd711e86e9..b1f8fc69023f 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -596,6 +596,10 @@ tda998x_configure_audio(struct drm_encoder *encoder,
596 cts_n = CTS_N_M(3) | CTS_N_K(3); 596 cts_n = CTS_N_M(3) | CTS_N_K(3);
597 ca_i2s = CA_I2S_CA_I2S(0); 597 ca_i2s = CA_I2S_CA_I2S(0);
598 break; 598 break;
599
600 default:
601 BUG();
602 return;
599 } 603 }
600 604
601 reg_write(encoder, REG_AIP_CLKSEL, clksel_aip); 605 reg_write(encoder, REG_AIP_CLKSEL, clksel_aip);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 55ab9246e1b9..a6f4cb5af185 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -857,7 +857,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
857 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 857 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
858 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 858 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
859 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 859 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
860 u32 rpstat, cagf; 860 u32 rpstat, cagf, reqf;
861 u32 rpupei, rpcurup, rpprevup; 861 u32 rpupei, rpcurup, rpprevup;
862 u32 rpdownei, rpcurdown, rpprevdown; 862 u32 rpdownei, rpcurdown, rpprevdown;
863 int max_freq; 863 int max_freq;
@@ -869,6 +869,14 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
869 869
870 gen6_gt_force_wake_get(dev_priv); 870 gen6_gt_force_wake_get(dev_priv);
871 871
872 reqf = I915_READ(GEN6_RPNSWREQ);
873 reqf &= ~GEN6_TURBO_DISABLE;
874 if (IS_HASWELL(dev))
875 reqf >>= 24;
876 else
877 reqf >>= 25;
878 reqf *= GT_FREQUENCY_MULTIPLIER;
879
872 rpstat = I915_READ(GEN6_RPSTAT1); 880 rpstat = I915_READ(GEN6_RPSTAT1);
873 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 881 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
874 rpcurup = I915_READ(GEN6_RP_CUR_UP); 882 rpcurup = I915_READ(GEN6_RP_CUR_UP);
@@ -893,6 +901,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
893 gt_perf_status & 0xff); 901 gt_perf_status & 0xff);
894 seq_printf(m, "Render p-state limit: %d\n", 902 seq_printf(m, "Render p-state limit: %d\n",
895 rp_state_limits & 0xff); 903 rp_state_limits & 0xff);
904 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
896 seq_printf(m, "CAGF: %dMHz\n", cagf); 905 seq_printf(m, "CAGF: %dMHz\n", cagf);
897 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 906 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
898 GEN6_CURICONT_MASK); 907 GEN6_CURICONT_MASK);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index fdaa0915ce56..9b265a4c6a3d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1290,9 +1290,12 @@ static int i915_load_modeset_init(struct drm_device *dev)
1290 * then we do not take part in VGA arbitration and the 1290 * then we do not take part in VGA arbitration and the
1291 * vga_client_register() fails with -ENODEV. 1291 * vga_client_register() fails with -ENODEV.
1292 */ 1292 */
1293 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 1293 if (!HAS_PCH_SPLIT(dev)) {
1294 if (ret && ret != -ENODEV) 1294 ret = vga_client_register(dev->pdev, dev, NULL,
1295 goto out; 1295 i915_vga_set_decode);
1296 if (ret && ret != -ENODEV)
1297 goto out;
1298 }
1296 1299
1297 intel_register_dsm_handler(); 1300 intel_register_dsm_handler();
1298 1301
@@ -1348,6 +1351,12 @@ static int i915_load_modeset_init(struct drm_device *dev)
1348 */ 1351 */
1349 intel_fbdev_initial_config(dev); 1352 intel_fbdev_initial_config(dev);
1350 1353
1354 /*
1355 * Must do this after fbcon init so that
1356 * vgacon_save_screen() works during the handover.
1357 */
1358 i915_disable_vga_mem(dev);
1359
1351 /* Only enable hotplug handling once the fbdev is fully set up. */ 1360 /* Only enable hotplug handling once the fbdev is fully set up. */
1352 dev_priv->enable_hotplug_processing = true; 1361 dev_priv->enable_hotplug_processing = true;
1353 1362
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ccb28ead3501..69d8ed5416c3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -157,25 +157,6 @@ MODULE_PARM_DESC(prefault_disable,
157static struct drm_driver driver; 157static struct drm_driver driver;
158extern int intel_agp_enabled; 158extern int intel_agp_enabled;
159 159
160#define INTEL_VGA_DEVICE(id, info) { \
161 .class = PCI_BASE_CLASS_DISPLAY << 16, \
162 .class_mask = 0xff0000, \
163 .vendor = 0x8086, \
164 .device = id, \
165 .subvendor = PCI_ANY_ID, \
166 .subdevice = PCI_ANY_ID, \
167 .driver_data = (unsigned long) info }
168
169#define INTEL_QUANTA_VGA_DEVICE(info) { \
170 .class = PCI_BASE_CLASS_DISPLAY << 16, \
171 .class_mask = 0xff0000, \
172 .vendor = 0x8086, \
173 .device = 0x16a, \
174 .subvendor = 0x152d, \
175 .subdevice = 0x8990, \
176 .driver_data = (unsigned long) info }
177
178
179static const struct intel_device_info intel_i830_info = { 160static const struct intel_device_info intel_i830_info = {
180 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, 161 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
181 .has_overlay = 1, .overlay_needs_physical = 1, 162 .has_overlay = 1, .overlay_needs_physical = 1,
@@ -350,118 +331,41 @@ static const struct intel_device_info intel_haswell_m_info = {
350 .has_vebox_ring = 1, 331 .has_vebox_ring = 1,
351}; 332};
352 333
334/*
335 * Make sure any device matches here are from most specific to most
336 * general. For example, since the Quanta match is based on the subsystem
337 * and subvendor IDs, we need it to come before the more general IVB
338 * PCI ID matches, otherwise we'll use the wrong info struct above.
339 */
340#define INTEL_PCI_IDS \
341 INTEL_I830_IDS(&intel_i830_info), \
342 INTEL_I845G_IDS(&intel_845g_info), \
343 INTEL_I85X_IDS(&intel_i85x_info), \
344 INTEL_I865G_IDS(&intel_i865g_info), \
345 INTEL_I915G_IDS(&intel_i915g_info), \
346 INTEL_I915GM_IDS(&intel_i915gm_info), \
347 INTEL_I945G_IDS(&intel_i945g_info), \
348 INTEL_I945GM_IDS(&intel_i945gm_info), \
349 INTEL_I965G_IDS(&intel_i965g_info), \
350 INTEL_G33_IDS(&intel_g33_info), \
351 INTEL_I965GM_IDS(&intel_i965gm_info), \
352 INTEL_GM45_IDS(&intel_gm45_info), \
353 INTEL_G45_IDS(&intel_g45_info), \
354 INTEL_PINEVIEW_IDS(&intel_pineview_info), \
355 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \
356 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \
357 INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \
358 INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \
359 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
360 INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \
361 INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \
362 INTEL_HSW_D_IDS(&intel_haswell_d_info), \
363 INTEL_HSW_M_IDS(&intel_haswell_m_info), \
364 INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
365 INTEL_VLV_D_IDS(&intel_valleyview_d_info)
366
353static const struct pci_device_id pciidlist[] = { /* aka */ 367static const struct pci_device_id pciidlist[] = { /* aka */
354 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ 368 INTEL_PCI_IDS,
355 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */
356 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */
357 INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
358 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */
359 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */
360 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */
361 INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */
362 INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */
363 INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */
364 INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */
365 INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */
366 INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */
367 INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */
368 INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */
369 INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */
370 INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */
371 INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */
372 INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */
373 INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */
374 INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */
375 INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */
376 INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */
377 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
378 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
379 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
380 INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
381 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
382 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
383 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
384 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
385 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
386 INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
387 INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
388 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
389 INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
390 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
391 INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
392 INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
393 INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
394 INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
395 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
396 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
397 INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */
398 INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
399 INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
400 INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
401 INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
402 INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
403 INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
404 INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
405 INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
406 INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
407 INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
408 INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
409 INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
410 INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
411 INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
412 INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
413 INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
414 INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
415 INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
416 INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
417 INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
418 INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
419 INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
420 INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
421 INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
422 INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
423 INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
424 INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
425 INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
426 INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
427 INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
428 INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
429 INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
430 INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
431 INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
432 INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
433 INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
434 INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
435 INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
436 INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
437 INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
438 INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
439 INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
440 INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
441 INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
442 INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
443 INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
444 INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
445 INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
446 INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
447 INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
448 INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
449 INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
450 INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
451 INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
452 INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
453 INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
454 INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
455 INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
456 INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
457 INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
458 INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
459 INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
460 INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
461 INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
462 INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info),
463 INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
464 INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
465 {0, 0, 0} 369 {0, 0, 0}
466}; 370};
467 371
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 52a3785a3fdf..35874b3a86dc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1236,6 +1236,13 @@ typedef struct drm_i915_private {
1236 1236
1237 unsigned int fsb_freq, mem_freq, is_ddr3; 1237 unsigned int fsb_freq, mem_freq, is_ddr3;
1238 1238
1239 /**
1240 * wq - Driver workqueue for GEM.
1241 *
1242 * NOTE: Work items scheduled here are not allowed to grab any modeset
1243 * locks, for otherwise the flushing done in the pageflip code will
1244 * result in deadlocks.
1245 */
1239 struct workqueue_struct *wq; 1246 struct workqueue_struct *wq;
1240 1247
1241 /* Display functions */ 1248 /* Display functions */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2d1cb10d846f..d9e337feef14 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -212,7 +212,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
212void *i915_gem_object_alloc(struct drm_device *dev) 212void *i915_gem_object_alloc(struct drm_device *dev)
213{ 213{
214 struct drm_i915_private *dev_priv = dev->dev_private; 214 struct drm_i915_private *dev_priv = dev->dev_private;
215 return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO); 215 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
216} 216}
217 217
218void i915_gem_object_free(struct drm_i915_gem_object *obj) 218void i915_gem_object_free(struct drm_i915_gem_object *obj)
@@ -1695,6 +1695,7 @@ static long
1695__i915_gem_shrink(struct drm_i915_private *dev_priv, long target, 1695__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1696 bool purgeable_only) 1696 bool purgeable_only)
1697{ 1697{
1698 struct list_head still_bound_list;
1698 struct drm_i915_gem_object *obj, *next; 1699 struct drm_i915_gem_object *obj, *next;
1699 long count = 0; 1700 long count = 0;
1700 1701
@@ -1709,23 +1710,55 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1709 } 1710 }
1710 } 1711 }
1711 1712
1712 list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list, 1713 /*
1713 global_list) { 1714 * As we may completely rewrite the bound list whilst unbinding
1715 * (due to retiring requests) we have to strictly process only
1716 * one element of the list at the time, and recheck the list
1717 * on every iteration.
1718 */
1719 INIT_LIST_HEAD(&still_bound_list);
1720 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
1714 struct i915_vma *vma, *v; 1721 struct i915_vma *vma, *v;
1715 1722
1723 obj = list_first_entry(&dev_priv->mm.bound_list,
1724 typeof(*obj), global_list);
1725 list_move_tail(&obj->global_list, &still_bound_list);
1726
1716 if (!i915_gem_object_is_purgeable(obj) && purgeable_only) 1727 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1717 continue; 1728 continue;
1718 1729
1730 /*
1731 * Hold a reference whilst we unbind this object, as we may
1732 * end up waiting for and retiring requests. This might
1733 * release the final reference (held by the active list)
1734 * and result in the object being freed from under us.
1735 * in this object being freed.
1736 *
1737 * Note 1: Shrinking the bound list is special since only active
1738 * (and hence bound objects) can contain such limbo objects, so
1739 * we don't need special tricks for shrinking the unbound list.
1740 * The only other place where we have to be careful with active
1741 * objects suddenly disappearing due to retiring requests is the
1742 * eviction code.
1743 *
1744 * Note 2: Even though the bound list doesn't hold a reference
1745 * to the object we can safely grab one here: The final object
1746 * unreferencing and the bound_list are both protected by the
1747 * dev->struct_mutex and so we won't ever be able to observe an
1748 * object on the bound_list with a reference count equals 0.
1749 */
1750 drm_gem_object_reference(&obj->base);
1751
1719 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) 1752 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1720 if (i915_vma_unbind(vma)) 1753 if (i915_vma_unbind(vma))
1721 break; 1754 break;
1722 1755
1723 if (!i915_gem_object_put_pages(obj)) { 1756 if (i915_gem_object_put_pages(obj) == 0)
1724 count += obj->base.size >> PAGE_SHIFT; 1757 count += obj->base.size >> PAGE_SHIFT;
1725 if (count >= target) 1758
1726 return count; 1759 drm_gem_object_unreference(&obj->base);
1727 }
1728 } 1760 }
1761 list_splice(&still_bound_list, &dev_priv->mm.bound_list);
1729 1762
1730 return count; 1763 return count;
1731} 1764}
@@ -1774,7 +1807,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1774 1807
1775 page_count = obj->base.size / PAGE_SIZE; 1808 page_count = obj->base.size / PAGE_SIZE;
1776 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { 1809 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1777 sg_free_table(st);
1778 kfree(st); 1810 kfree(st);
1779 return -ENOMEM; 1811 return -ENOMEM;
1780 } 1812 }
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index e918b05fcbdd..7d5752fda5f1 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -42,27 +42,24 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
42 42
43 ret = i915_mutex_lock_interruptible(obj->base.dev); 43 ret = i915_mutex_lock_interruptible(obj->base.dev);
44 if (ret) 44 if (ret)
45 return ERR_PTR(ret); 45 goto err;
46 46
47 ret = i915_gem_object_get_pages(obj); 47 ret = i915_gem_object_get_pages(obj);
48 if (ret) { 48 if (ret)
49 st = ERR_PTR(ret); 49 goto err_unlock;
50 goto out; 50
51 } 51 i915_gem_object_pin_pages(obj);
52 52
53 /* Copy sg so that we make an independent mapping */ 53 /* Copy sg so that we make an independent mapping */
54 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 54 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
55 if (st == NULL) { 55 if (st == NULL) {
56 st = ERR_PTR(-ENOMEM); 56 ret = -ENOMEM;
57 goto out; 57 goto err_unpin;
58 } 58 }
59 59
60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); 60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
61 if (ret) { 61 if (ret)
62 kfree(st); 62 goto err_free;
63 st = ERR_PTR(ret);
64 goto out;
65 }
66 63
67 src = obj->pages->sgl; 64 src = obj->pages->sgl;
68 dst = st->sgl; 65 dst = st->sgl;
@@ -73,17 +70,23 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
73 } 70 }
74 71
75 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { 72 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
76 sg_free_table(st); 73 ret =-ENOMEM;
77 kfree(st); 74 goto err_free_sg;
78 st = ERR_PTR(-ENOMEM);
79 goto out;
80 } 75 }
81 76
82 i915_gem_object_pin_pages(obj);
83
84out:
85 mutex_unlock(&obj->base.dev->struct_mutex); 77 mutex_unlock(&obj->base.dev->struct_mutex);
86 return st; 78 return st;
79
80err_free_sg:
81 sg_free_table(st);
82err_free:
83 kfree(st);
84err_unpin:
85 i915_gem_object_unpin_pages(obj);
86err_unlock:
87 mutex_unlock(&obj->base.dev->struct_mutex);
88err:
89 return ERR_PTR(ret);
87} 90}
88 91
89static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 92static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 792c52a235ee..bf345777ae9f 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -310,6 +310,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
310 else 310 else
311 ret = relocate_entry_gtt(obj, reloc); 311 ret = relocate_entry_gtt(obj, reloc);
312 312
313 if (ret)
314 return ret;
315
313 /* and update the user's relocation entry */ 316 /* and update the user's relocation entry */
314 reloc->presumed_offset = target_offset; 317 reloc->presumed_offset = target_offset;
315 318
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 9969d10b80f5..e15a1d90037d 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -201,6 +201,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
201 struct drm_i915_private *dev_priv = dev->dev_private; 201 struct drm_i915_private *dev_priv = dev->dev_private;
202 int bios_reserved = 0; 202 int bios_reserved = 0;
203 203
204 if (dev_priv->gtt.stolen_size == 0)
205 return 0;
206
204 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); 207 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
205 if (dev_priv->mm.stolen_base == 0) 208 if (dev_priv->mm.stolen_base == 0)
206 return 0; 209 return 0;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 558e568d5b45..aba9d7498996 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -641,7 +641,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
641 if (WARN_ON(ring->id != RCS)) 641 if (WARN_ON(ring->id != RCS))
642 return NULL; 642 return NULL;
643 643
644 obj = ring->private; 644 obj = ring->scratch.obj;
645 if (acthd >= i915_gem_obj_ggtt_offset(obj) && 645 if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
646 acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) 646 acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
647 return i915_error_object_create(dev_priv, obj); 647 return i915_error_object_create(dev_priv, obj);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a03b445ceb5f..83cce0cdb769 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1027,8 +1027,13 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
1027 dev_priv->display.hpd_irq_setup(dev); 1027 dev_priv->display.hpd_irq_setup(dev);
1028 spin_unlock(&dev_priv->irq_lock); 1028 spin_unlock(&dev_priv->irq_lock);
1029 1029
1030 queue_work(dev_priv->wq, 1030 /*
1031 &dev_priv->hotplug_work); 1031 * Our hotplug handler can grab modeset locks (by calling down into the
1032 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1033 * queue for otherwise the flush_work in the pageflip code will
1034 * deadlock.
1035 */
1036 schedule_work(&dev_priv->hotplug_work);
1032} 1037}
1033 1038
1034static void gmbus_irq_handler(struct drm_device *dev) 1039static void gmbus_irq_handler(struct drm_device *dev)
@@ -1655,7 +1660,13 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
1655 wake_up_all(&ring->irq_queue); 1660 wake_up_all(&ring->irq_queue);
1656 } 1661 }
1657 1662
1658 queue_work(dev_priv->wq, &dev_priv->gpu_error.work); 1663 /*
1664 * Our reset work can grab modeset locks (since it needs to reset the
1665 * state of outstanding pagelips). Hence it must not be run on our own
1666 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
1667 * code will deadlock.
1668 */
1669 schedule_work(&dev_priv->gpu_error.work);
1659} 1670}
1660 1671
1661static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 1672static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
@@ -2027,9 +2038,9 @@ static void i915_hangcheck_elapsed(unsigned long data)
2027 2038
2028 for_each_ring(ring, dev_priv, i) { 2039 for_each_ring(ring, dev_priv, i) {
2029 if (ring->hangcheck.score > FIRE) { 2040 if (ring->hangcheck.score > FIRE) {
2030 DRM_ERROR("%s on %s\n", 2041 DRM_INFO("%s on %s\n",
2031 stuck[i] ? "stuck" : "no progress", 2042 stuck[i] ? "stuck" : "no progress",
2032 ring->name); 2043 ring->name);
2033 rings_hung++; 2044 rings_hung++;
2034 } 2045 }
2035 } 2046 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b6a58f720f9a..c159e1a6810f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -33,21 +33,6 @@
33#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) 33#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
34#define _MASKED_BIT_DISABLE(a) ((a) << 16) 34#define _MASKED_BIT_DISABLE(a) ((a) << 16)
35 35
36/*
37 * The Bridge device's PCI config space has information about the
38 * fb aperture size and the amount of pre-reserved memory.
39 * This is all handled in the intel-gtt.ko module. i915.ko only
40 * cares about the vga bit for the vga rbiter.
41 */
42#define INTEL_GMCH_CTRL 0x52
43#define INTEL_GMCH_VGA_DISABLE (1 << 1)
44#define SNB_GMCH_CTRL 0x50
45#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
46#define SNB_GMCH_GGMS_MASK 0x3
47#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
48#define SNB_GMCH_GMS_MASK 0x1f
49
50
51/* PCI config space */ 36/* PCI config space */
52 37
53#define HPLLCC 0xc0 /* 855 only */ 38#define HPLLCC 0xc0 /* 855 only */
@@ -245,6 +230,7 @@
245 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! 230 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
246 */ 231 */
247#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) 232#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
233#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
248#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ 234#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
249#define MI_FLUSH_DW_STORE_INDEX (1<<21) 235#define MI_FLUSH_DW_STORE_INDEX (1<<21)
250#define MI_INVALIDATE_TLB (1<<18) 236#define MI_INVALIDATE_TLB (1<<18)
@@ -693,6 +679,23 @@
693#define FPGA_DBG_RM_NOCLAIM (1<<31) 679#define FPGA_DBG_RM_NOCLAIM (1<<31)
694 680
695#define DERRMR 0x44050 681#define DERRMR 0x44050
682#define DERRMR_PIPEA_SCANLINE (1<<0)
683#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1)
684#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2)
685#define DERRMR_PIPEA_VBLANK (1<<3)
686#define DERRMR_PIPEA_HBLANK (1<<5)
687#define DERRMR_PIPEB_SCANLINE (1<<8)
688#define DERRMR_PIPEB_PRI_FLIP_DONE (1<<9)
689#define DERRMR_PIPEB_SPR_FLIP_DONE (1<<10)
690#define DERRMR_PIPEB_VBLANK (1<<11)
691#define DERRMR_PIPEB_HBLANK (1<<13)
692/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
693#define DERRMR_PIPEC_SCANLINE (1<<14)
694#define DERRMR_PIPEC_PRI_FLIP_DONE (1<<15)
695#define DERRMR_PIPEC_SPR_FLIP_DONE (1<<20)
696#define DERRMR_PIPEC_VBLANK (1<<21)
697#define DERRMR_PIPEC_HBLANK (1<<22)
698
696 699
697/* GM45+ chicken bits -- debug workaround bits that may be required 700/* GM45+ chicken bits -- debug workaround bits that may be required
698 * for various sorts of correct behavior. The top 16 bits of each are 701 * for various sorts of correct behavior. The top 16 bits of each are
@@ -3310,6 +3313,7 @@
3310#define MCURSOR_PIPE_A 0x00 3313#define MCURSOR_PIPE_A 0x00
3311#define MCURSOR_PIPE_B (1 << 28) 3314#define MCURSOR_PIPE_B (1 << 28)
3312#define MCURSOR_GAMMA_ENABLE (1 << 26) 3315#define MCURSOR_GAMMA_ENABLE (1 << 26)
3316#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
3313#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084) 3317#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084)
3314#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088) 3318#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088)
3315#define CURSOR_POS_MASK 0x007FF 3319#define CURSOR_POS_MASK 0x007FF
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index a777e7f3b0df..c8c4112de110 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -224,6 +224,18 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
224 return snprintf(buf, PAGE_SIZE, "%d\n", ret); 224 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
225} 225}
226 226
227static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
228 struct device_attribute *attr, char *buf)
229{
230 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
231 struct drm_device *dev = minor->dev;
232 struct drm_i915_private *dev_priv = dev->dev_private;
233
234 return snprintf(buf, PAGE_SIZE, "%d\n",
235 vlv_gpu_freq(dev_priv->mem_freq,
236 dev_priv->rps.rpe_delay));
237}
238
227static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 239static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
228{ 240{
229 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 241 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
@@ -366,6 +378,7 @@ static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
366static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); 378static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
367static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); 379static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
368 380
381static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
369 382
370static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf); 383static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
371static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); 384static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
@@ -409,6 +422,14 @@ static const struct attribute *gen6_attrs[] = {
409 NULL, 422 NULL,
410}; 423};
411 424
425static const struct attribute *vlv_attrs[] = {
426 &dev_attr_gt_cur_freq_mhz.attr,
427 &dev_attr_gt_max_freq_mhz.attr,
428 &dev_attr_gt_min_freq_mhz.attr,
429 &dev_attr_vlv_rpe_freq_mhz.attr,
430 NULL,
431};
432
412static ssize_t error_state_read(struct file *filp, struct kobject *kobj, 433static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
413 struct bin_attribute *attr, char *buf, 434 struct bin_attribute *attr, char *buf,
414 loff_t off, size_t count) 435 loff_t off, size_t count)
@@ -492,11 +513,13 @@ void i915_setup_sysfs(struct drm_device *dev)
492 DRM_ERROR("l3 parity sysfs setup failed\n"); 513 DRM_ERROR("l3 parity sysfs setup failed\n");
493 } 514 }
494 515
495 if (INTEL_INFO(dev)->gen >= 6) { 516 ret = 0;
517 if (IS_VALLEYVIEW(dev))
518 ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs);
519 else if (INTEL_INFO(dev)->gen >= 6)
496 ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs); 520 ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
497 if (ret) 521 if (ret)
498 DRM_ERROR("gen6 sysfs setup failed\n"); 522 DRM_ERROR("RPS sysfs setup failed\n");
499 }
500 523
501 ret = sysfs_create_bin_file(&dev->primary->kdev.kobj, 524 ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
502 &error_state_attr); 525 &error_state_attr);
@@ -507,7 +530,10 @@ void i915_setup_sysfs(struct drm_device *dev)
507void i915_teardown_sysfs(struct drm_device *dev) 530void i915_teardown_sysfs(struct drm_device *dev)
508{ 531{
509 sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr); 532 sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
510 sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); 533 if (IS_VALLEYVIEW(dev))
534 sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
535 else
536 sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
511 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); 537 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
512#ifdef CONFIG_PM 538#ifdef CONFIG_PM
513 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); 539 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index b5a3875f22c7..ea9022ef15d5 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -688,7 +688,7 @@ static void intel_crt_reset(struct drm_connector *connector)
688 struct drm_i915_private *dev_priv = dev->dev_private; 688 struct drm_i915_private *dev_priv = dev->dev_private;
689 struct intel_crt *crt = intel_attached_crt(connector); 689 struct intel_crt *crt = intel_attached_crt(connector);
690 690
691 if (HAS_PCH_SPLIT(dev)) { 691 if (INTEL_INFO(dev)->gen >= 5) {
692 u32 adpa; 692 u32 adpa;
693 693
694 adpa = I915_READ(crt->adpa_reg); 694 adpa = I915_READ(crt->adpa_reg);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 38452d82ac7d..2489d0b4c7d2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2077,8 +2077,10 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2077 else 2077 else
2078 dspcntr &= ~DISPPLANE_TILED; 2078 dspcntr &= ~DISPPLANE_TILED;
2079 2079
2080 /* must disable */ 2080 if (IS_HASWELL(dev))
2081 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2081 dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
2082 else
2083 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2082 2084
2083 I915_WRITE(reg, dspcntr); 2085 I915_WRITE(reg, dspcntr);
2084 2086
@@ -6762,8 +6764,10 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
6762 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 6764 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6763 cntl |= CURSOR_MODE_DISABLE; 6765 cntl |= CURSOR_MODE_DISABLE;
6764 } 6766 }
6765 if (IS_HASWELL(dev)) 6767 if (IS_HASWELL(dev)) {
6766 cntl |= CURSOR_PIPE_CSC_ENABLE; 6768 cntl |= CURSOR_PIPE_CSC_ENABLE;
6769 cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
6770 }
6767 I915_WRITE(CURCNTR_IVB(pipe), cntl); 6771 I915_WRITE(CURCNTR_IVB(pipe), cntl);
6768 6772
6769 intel_crtc->cursor_visible = visible; 6773 intel_crtc->cursor_visible = visible;
@@ -7309,8 +7313,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7309 } 7313 }
7310 } 7314 }
7311 7315
7312 pipe_config->adjusted_mode.clock = clock.dot * 7316 pipe_config->adjusted_mode.clock = clock.dot;
7313 pipe_config->pixel_multiplier;
7314} 7317}
7315 7318
7316static void ironlake_crtc_clock_get(struct intel_crtc *crtc, 7319static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
@@ -7828,12 +7831,6 @@ err:
7828 return ret; 7831 return ret;
7829} 7832}
7830 7833
7831/*
7832 * On gen7 we currently use the blit ring because (in early silicon at least)
7833 * the render ring doesn't give us interrpts for page flip completion, which
7834 * means clients will hang after the first flip is queued. Fortunately the
7835 * blit ring generates interrupts properly, so use it instead.
7836 */
7837static int intel_gen7_queue_flip(struct drm_device *dev, 7834static int intel_gen7_queue_flip(struct drm_device *dev,
7838 struct drm_crtc *crtc, 7835 struct drm_crtc *crtc,
7839 struct drm_framebuffer *fb, 7836 struct drm_framebuffer *fb,
@@ -7842,9 +7839,13 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
7842{ 7839{
7843 struct drm_i915_private *dev_priv = dev->dev_private; 7840 struct drm_i915_private *dev_priv = dev->dev_private;
7844 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7841 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7845 struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; 7842 struct intel_ring_buffer *ring;
7846 uint32_t plane_bit = 0; 7843 uint32_t plane_bit = 0;
7847 int ret; 7844 int len, ret;
7845
7846 ring = obj->ring;
7847 if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
7848 ring = &dev_priv->ring[BCS];
7848 7849
7849 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 7850 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7850 if (ret) 7851 if (ret)
@@ -7866,10 +7867,34 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
7866 goto err_unpin; 7867 goto err_unpin;
7867 } 7868 }
7868 7869
7869 ret = intel_ring_begin(ring, 4); 7870 len = 4;
7871 if (ring->id == RCS)
7872 len += 6;
7873
7874 ret = intel_ring_begin(ring, len);
7870 if (ret) 7875 if (ret)
7871 goto err_unpin; 7876 goto err_unpin;
7872 7877
7878 /* Unmask the flip-done completion message. Note that the bspec says that
7879 * we should do this for both the BCS and RCS, and that we must not unmask
7880 * more than one flip event at any time (or ensure that one flip message
7881 * can be sent by waiting for flip-done prior to queueing new flips).
7882 * Experimentation says that BCS works despite DERRMR masking all
7883 * flip-done completion events and that unmasking all planes at once
7884 * for the RCS also doesn't appear to drop events. Setting the DERRMR
7885 * to zero does lead to lockups within MI_DISPLAY_FLIP.
7886 */
7887 if (ring->id == RCS) {
7888 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
7889 intel_ring_emit(ring, DERRMR);
7890 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
7891 DERRMR_PIPEB_PRI_FLIP_DONE |
7892 DERRMR_PIPEC_PRI_FLIP_DONE));
7893 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1));
7894 intel_ring_emit(ring, DERRMR);
7895 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
7896 }
7897
7873 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 7898 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
7874 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 7899 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7875 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 7900 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
@@ -10022,6 +10047,33 @@ static void i915_disable_vga(struct drm_device *dev)
10022 POSTING_READ(vga_reg); 10047 POSTING_READ(vga_reg);
10023} 10048}
10024 10049
10050static void i915_enable_vga_mem(struct drm_device *dev)
10051{
10052 /* Enable VGA memory on Intel HD */
10053 if (HAS_PCH_SPLIT(dev)) {
10054 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
10055 outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE);
10056 vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
10057 VGA_RSRC_LEGACY_MEM |
10058 VGA_RSRC_NORMAL_IO |
10059 VGA_RSRC_NORMAL_MEM);
10060 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
10061 }
10062}
10063
10064void i915_disable_vga_mem(struct drm_device *dev)
10065{
10066 /* Disable VGA memory on Intel HD */
10067 if (HAS_PCH_SPLIT(dev)) {
10068 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
10069 outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE);
10070 vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
10071 VGA_RSRC_NORMAL_IO |
10072 VGA_RSRC_NORMAL_MEM);
10073 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
10074 }
10075}
10076
10025void intel_modeset_init_hw(struct drm_device *dev) 10077void intel_modeset_init_hw(struct drm_device *dev)
10026{ 10078{
10027 intel_init_power_well(dev); 10079 intel_init_power_well(dev);
@@ -10300,6 +10352,7 @@ void i915_redisable_vga(struct drm_device *dev)
10300 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { 10352 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
10301 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 10353 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
10302 i915_disable_vga(dev); 10354 i915_disable_vga(dev);
10355 i915_disable_vga_mem(dev);
10303 } 10356 }
10304} 10357}
10305 10358
@@ -10513,6 +10566,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
10513 10566
10514 intel_disable_fbc(dev); 10567 intel_disable_fbc(dev);
10515 10568
10569 i915_enable_vga_mem(dev);
10570
10516 intel_disable_gt_powersave(dev); 10571 intel_disable_gt_powersave(dev);
10517 10572
10518 ironlake_teardown_rc6(dev); 10573 ironlake_teardown_rc6(dev);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 176080822a74..a47799e832c6 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -551,7 +551,7 @@ extern int intel_panel_init(struct intel_panel *panel,
551 struct drm_display_mode *fixed_mode); 551 struct drm_display_mode *fixed_mode);
552extern void intel_panel_fini(struct intel_panel *panel); 552extern void intel_panel_fini(struct intel_panel *panel);
553 553
554extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 554extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
555 struct drm_display_mode *adjusted_mode); 555 struct drm_display_mode *adjusted_mode);
556extern void intel_pch_panel_fitting(struct intel_crtc *crtc, 556extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
557 struct intel_crtc_config *pipe_config, 557 struct intel_crtc_config *pipe_config,
@@ -792,5 +792,6 @@ extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
792extern void hsw_pc8_restore_interrupts(struct drm_device *dev); 792extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
793extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); 793extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
794extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); 794extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
795extern void i915_disable_vga_mem(struct drm_device *dev);
795 796
796#endif /* __INTEL_DRV_H__ */ 797#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 4d33278e31fb..831a5c021c4b 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -128,8 +128,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
128 struct drm_device *dev = encoder->base.dev; 128 struct drm_device *dev = encoder->base.dev;
129 struct drm_i915_private *dev_priv = dev->dev_private; 129 struct drm_i915_private *dev_priv = dev->dev_private;
130 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 130 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
131 struct drm_display_mode *fixed_mode = 131 const struct drm_display_mode *adjusted_mode =
132 lvds_encoder->attached_connector->base.panel.fixed_mode; 132 &crtc->config.adjusted_mode;
133 int pipe = crtc->pipe; 133 int pipe = crtc->pipe;
134 u32 temp; 134 u32 temp;
135 135
@@ -183,9 +183,9 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
183 temp &= ~LVDS_ENABLE_DITHER; 183 temp &= ~LVDS_ENABLE_DITHER;
184 } 184 }
185 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 185 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
186 if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC) 186 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
187 temp |= LVDS_HSYNC_POLARITY; 187 temp |= LVDS_HSYNC_POLARITY;
188 if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC) 188 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
189 temp |= LVDS_VSYNC_POLARITY; 189 temp |= LVDS_VSYNC_POLARITY;
190 190
191 I915_WRITE(lvds_encoder->reg, temp); 191 I915_WRITE(lvds_encoder->reg, temp);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index cfb8fb68f09c..119771ff46ab 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -173,7 +173,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
173 return ASLE_BACKLIGHT_FAILED; 173 return ASLE_BACKLIGHT_FAILED;
174 174
175 intel_panel_set_backlight(dev, bclp, 255); 175 intel_panel_set_backlight(dev, bclp, 255);
176 iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv); 176 iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
177 177
178 return 0; 178 return 0;
179} 179}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a43c33bc4a35..42114ecbae0e 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -36,20 +36,12 @@
36#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ 36#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
37 37
38void 38void
39intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 39intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
40 struct drm_display_mode *adjusted_mode) 40 struct drm_display_mode *adjusted_mode)
41{ 41{
42 adjusted_mode->hdisplay = fixed_mode->hdisplay; 42 drm_mode_copy(adjusted_mode, fixed_mode);
43 adjusted_mode->hsync_start = fixed_mode->hsync_start;
44 adjusted_mode->hsync_end = fixed_mode->hsync_end;
45 adjusted_mode->htotal = fixed_mode->htotal;
46 43
47 adjusted_mode->vdisplay = fixed_mode->vdisplay; 44 drm_mode_set_crtcinfo(adjusted_mode, 0);
48 adjusted_mode->vsync_start = fixed_mode->vsync_start;
49 adjusted_mode->vsync_end = fixed_mode->vsync_end;
50 adjusted_mode->vtotal = fixed_mode->vtotal;
51
52 adjusted_mode->clock = fixed_mode->clock;
53} 45}
54 46
55/* adjusted_mode has been preset to be the panel's fixed mode */ 47/* adjusted_mode has been preset to be the panel's fixed mode */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 46056820d1d2..0c115cc4899f 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3447,14 +3447,24 @@ int intel_enable_rc6(const struct drm_device *dev)
3447static void gen6_enable_rps_interrupts(struct drm_device *dev) 3447static void gen6_enable_rps_interrupts(struct drm_device *dev)
3448{ 3448{
3449 struct drm_i915_private *dev_priv = dev->dev_private; 3449 struct drm_i915_private *dev_priv = dev->dev_private;
3450 u32 enabled_intrs;
3450 3451
3451 spin_lock_irq(&dev_priv->irq_lock); 3452 spin_lock_irq(&dev_priv->irq_lock);
3452 WARN_ON(dev_priv->rps.pm_iir); 3453 WARN_ON(dev_priv->rps.pm_iir);
3453 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 3454 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
3454 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); 3455 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3455 spin_unlock_irq(&dev_priv->irq_lock); 3456 spin_unlock_irq(&dev_priv->irq_lock);
3457
3456 /* only unmask PM interrupts we need. Mask all others. */ 3458 /* only unmask PM interrupts we need. Mask all others. */
3457 I915_WRITE(GEN6_PMINTRMSK, ~GEN6_PM_RPS_EVENTS); 3459 enabled_intrs = GEN6_PM_RPS_EVENTS;
3460
3461 /* IVB and SNB hard hangs on looping batchbuffer
3462 * if GEN6_PM_UP_EI_EXPIRED is masked.
3463 */
3464 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
3465 enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED;
3466
3467 I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
3458} 3468}
3459 3469
3460static void gen6_enable_rps(struct drm_device *dev) 3470static void gen6_enable_rps(struct drm_device *dev)
@@ -4950,8 +4960,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
4950 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 4960 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
4951 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 4961 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
4952 4962
4953 g4x_disable_trickle_feed(dev);
4954
4955 /* WaVSRefCountFullforceMissDisable:hsw */ 4963 /* WaVSRefCountFullforceMissDisable:hsw */
4956 gen7_setup_fixed_func_scheduler(dev_priv); 4964 gen7_setup_fixed_func_scheduler(dev_priv);
4957 4965
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f05cceac5a52..460ee1026fca 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -33,16 +33,6 @@
33#include "i915_trace.h" 33#include "i915_trace.h"
34#include "intel_drv.h" 34#include "intel_drv.h"
35 35
36/*
37 * 965+ support PIPE_CONTROL commands, which provide finer grained control
38 * over cache flushing.
39 */
40struct pipe_control {
41 struct drm_i915_gem_object *obj;
42 volatile u32 *cpu_page;
43 u32 gtt_offset;
44};
45
46static inline int ring_space(struct intel_ring_buffer *ring) 36static inline int ring_space(struct intel_ring_buffer *ring)
47{ 37{
48 int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); 38 int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
@@ -175,8 +165,7 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring,
175static int 165static int
176intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) 166intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
177{ 167{
178 struct pipe_control *pc = ring->private; 168 u32 scratch_addr = ring->scratch.gtt_offset + 128;
179 u32 scratch_addr = pc->gtt_offset + 128;
180 int ret; 169 int ret;
181 170
182 171
@@ -213,8 +202,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
213 u32 invalidate_domains, u32 flush_domains) 202 u32 invalidate_domains, u32 flush_domains)
214{ 203{
215 u32 flags = 0; 204 u32 flags = 0;
216 struct pipe_control *pc = ring->private; 205 u32 scratch_addr = ring->scratch.gtt_offset + 128;
217 u32 scratch_addr = pc->gtt_offset + 128;
218 int ret; 206 int ret;
219 207
220 /* Force SNB workarounds for PIPE_CONTROL flushes */ 208 /* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -306,8 +294,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
306 u32 invalidate_domains, u32 flush_domains) 294 u32 invalidate_domains, u32 flush_domains)
307{ 295{
308 u32 flags = 0; 296 u32 flags = 0;
309 struct pipe_control *pc = ring->private; 297 u32 scratch_addr = ring->scratch.gtt_offset + 128;
310 u32 scratch_addr = pc->gtt_offset + 128;
311 int ret; 298 int ret;
312 299
313 /* 300 /*
@@ -481,68 +468,43 @@ out:
481static int 468static int
482init_pipe_control(struct intel_ring_buffer *ring) 469init_pipe_control(struct intel_ring_buffer *ring)
483{ 470{
484 struct pipe_control *pc;
485 struct drm_i915_gem_object *obj;
486 int ret; 471 int ret;
487 472
488 if (ring->private) 473 if (ring->scratch.obj)
489 return 0; 474 return 0;
490 475
491 pc = kmalloc(sizeof(*pc), GFP_KERNEL); 476 ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
492 if (!pc) 477 if (ring->scratch.obj == NULL) {
493 return -ENOMEM;
494
495 obj = i915_gem_alloc_object(ring->dev, 4096);
496 if (obj == NULL) {
497 DRM_ERROR("Failed to allocate seqno page\n"); 478 DRM_ERROR("Failed to allocate seqno page\n");
498 ret = -ENOMEM; 479 ret = -ENOMEM;
499 goto err; 480 goto err;
500 } 481 }
501 482
502 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 483 i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
503 484
504 ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); 485 ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
505 if (ret) 486 if (ret)
506 goto err_unref; 487 goto err_unref;
507 488
508 pc->gtt_offset = i915_gem_obj_ggtt_offset(obj); 489 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
509 pc->cpu_page = kmap(sg_page(obj->pages->sgl)); 490 ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
510 if (pc->cpu_page == NULL) { 491 if (ring->scratch.cpu_page == NULL) {
511 ret = -ENOMEM; 492 ret = -ENOMEM;
512 goto err_unpin; 493 goto err_unpin;
513 } 494 }
514 495
515 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 496 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
516 ring->name, pc->gtt_offset); 497 ring->name, ring->scratch.gtt_offset);
517
518 pc->obj = obj;
519 ring->private = pc;
520 return 0; 498 return 0;
521 499
522err_unpin: 500err_unpin:
523 i915_gem_object_unpin(obj); 501 i915_gem_object_unpin(ring->scratch.obj);
524err_unref: 502err_unref:
525 drm_gem_object_unreference(&obj->base); 503 drm_gem_object_unreference(&ring->scratch.obj->base);
526err: 504err:
527 kfree(pc);
528 return ret; 505 return ret;
529} 506}
530 507
531static void
532cleanup_pipe_control(struct intel_ring_buffer *ring)
533{
534 struct pipe_control *pc = ring->private;
535 struct drm_i915_gem_object *obj;
536
537 obj = pc->obj;
538
539 kunmap(sg_page(obj->pages->sgl));
540 i915_gem_object_unpin(obj);
541 drm_gem_object_unreference(&obj->base);
542
543 kfree(pc);
544}
545
546static int init_render_ring(struct intel_ring_buffer *ring) 508static int init_render_ring(struct intel_ring_buffer *ring)
547{ 509{
548 struct drm_device *dev = ring->dev; 510 struct drm_device *dev = ring->dev;
@@ -607,16 +569,16 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
607{ 569{
608 struct drm_device *dev = ring->dev; 570 struct drm_device *dev = ring->dev;
609 571
610 if (!ring->private) 572 if (ring->scratch.obj == NULL)
611 return; 573 return;
612 574
613 if (HAS_BROKEN_CS_TLB(dev)) 575 if (INTEL_INFO(dev)->gen >= 5) {
614 drm_gem_object_unreference(to_gem_object(ring->private)); 576 kunmap(sg_page(ring->scratch.obj->pages->sgl));
615 577 i915_gem_object_unpin(ring->scratch.obj);
616 if (INTEL_INFO(dev)->gen >= 5) 578 }
617 cleanup_pipe_control(ring);
618 579
619 ring->private = NULL; 580 drm_gem_object_unreference(&ring->scratch.obj->base);
581 ring->scratch.obj = NULL;
620} 582}
621 583
622static void 584static void
@@ -742,8 +704,7 @@ do { \
742static int 704static int
743pc_render_add_request(struct intel_ring_buffer *ring) 705pc_render_add_request(struct intel_ring_buffer *ring)
744{ 706{
745 struct pipe_control *pc = ring->private; 707 u32 scratch_addr = ring->scratch.gtt_offset + 128;
746 u32 scratch_addr = pc->gtt_offset + 128;
747 int ret; 708 int ret;
748 709
749 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently 710 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@@ -761,7 +722,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
761 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 722 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
762 PIPE_CONTROL_WRITE_FLUSH | 723 PIPE_CONTROL_WRITE_FLUSH |
763 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 724 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
764 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 725 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
765 intel_ring_emit(ring, ring->outstanding_lazy_request); 726 intel_ring_emit(ring, ring->outstanding_lazy_request);
766 intel_ring_emit(ring, 0); 727 intel_ring_emit(ring, 0);
767 PIPE_CONTROL_FLUSH(ring, scratch_addr); 728 PIPE_CONTROL_FLUSH(ring, scratch_addr);
@@ -780,7 +741,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
780 PIPE_CONTROL_WRITE_FLUSH | 741 PIPE_CONTROL_WRITE_FLUSH |
781 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 742 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
782 PIPE_CONTROL_NOTIFY); 743 PIPE_CONTROL_NOTIFY);
783 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 744 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
784 intel_ring_emit(ring, ring->outstanding_lazy_request); 745 intel_ring_emit(ring, ring->outstanding_lazy_request);
785 intel_ring_emit(ring, 0); 746 intel_ring_emit(ring, 0);
786 intel_ring_advance(ring); 747 intel_ring_advance(ring);
@@ -814,15 +775,13 @@ ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
814static u32 775static u32
815pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) 776pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
816{ 777{
817 struct pipe_control *pc = ring->private; 778 return ring->scratch.cpu_page[0];
818 return pc->cpu_page[0];
819} 779}
820 780
821static void 781static void
822pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) 782pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
823{ 783{
824 struct pipe_control *pc = ring->private; 784 ring->scratch.cpu_page[0] = seqno;
825 pc->cpu_page[0] = seqno;
826} 785}
827 786
828static bool 787static bool
@@ -1141,8 +1100,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
1141 intel_ring_emit(ring, MI_NOOP); 1100 intel_ring_emit(ring, MI_NOOP);
1142 intel_ring_advance(ring); 1101 intel_ring_advance(ring);
1143 } else { 1102 } else {
1144 struct drm_i915_gem_object *obj = ring->private; 1103 u32 cs_offset = ring->scratch.gtt_offset;
1145 u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
1146 1104
1147 if (len > I830_BATCH_LIMIT) 1105 if (len > I830_BATCH_LIMIT)
1148 return -ENOSPC; 1106 return -ENOSPC;
@@ -1835,7 +1793,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1835 return ret; 1793 return ret;
1836 } 1794 }
1837 1795
1838 ring->private = obj; 1796 ring->scratch.obj = obj;
1797 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
1839 } 1798 }
1840 1799
1841 return intel_init_ring_buffer(dev, ring); 1800 return intel_init_ring_buffer(dev, ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 432ad5311ba6..68b1ca974d59 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -155,7 +155,11 @@ struct intel_ring_buffer {
155 155
156 struct intel_ring_hangcheck hangcheck; 156 struct intel_ring_hangcheck hangcheck;
157 157
158 void *private; 158 struct {
159 struct drm_i915_gem_object *obj;
160 u32 gtt_offset;
161 volatile u32 *cpu_page;
162 } scratch;
159}; 163};
160 164
161static inline bool 165static inline bool
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 317e058fb3cf..85037b9d4934 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1151,11 +1151,10 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
1151{ 1151{
1152 struct drm_device *dev = intel_encoder->base.dev; 1152 struct drm_device *dev = intel_encoder->base.dev;
1153 struct drm_i915_private *dev_priv = dev->dev_private; 1153 struct drm_i915_private *dev_priv = dev->dev_private;
1154 struct drm_crtc *crtc = intel_encoder->base.crtc; 1154 struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
1155 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1156 struct drm_display_mode *adjusted_mode = 1155 struct drm_display_mode *adjusted_mode =
1157 &intel_crtc->config.adjusted_mode; 1156 &crtc->config.adjusted_mode;
1158 struct drm_display_mode *mode = &intel_crtc->config.requested_mode; 1157 struct drm_display_mode *mode = &crtc->config.requested_mode;
1159 struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder); 1158 struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
1160 u32 sdvox; 1159 u32 sdvox;
1161 struct intel_sdvo_in_out_map in_out; 1160 struct intel_sdvo_in_out_map in_out;
@@ -1213,13 +1212,15 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
1213 * adjusted_mode. 1212 * adjusted_mode.
1214 */ 1213 */
1215 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); 1214 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1215 input_dtd.part1.clock /= crtc->config.pixel_multiplier;
1216
1216 if (intel_sdvo->is_tv || intel_sdvo->is_lvds) 1217 if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
1217 input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags; 1218 input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
1218 if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) 1219 if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
1219 DRM_INFO("Setting input timings on %s failed\n", 1220 DRM_INFO("Setting input timings on %s failed\n",
1220 SDVO_NAME(intel_sdvo)); 1221 SDVO_NAME(intel_sdvo));
1221 1222
1222 switch (intel_crtc->config.pixel_multiplier) { 1223 switch (crtc->config.pixel_multiplier) {
1223 default: 1224 default:
1224 WARN(1, "unknown pixel mutlipler specified\n"); 1225 WARN(1, "unknown pixel mutlipler specified\n");
1225 case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; 1226 case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
@@ -1252,9 +1253,9 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
1252 } 1253 }
1253 1254
1254 if (INTEL_PCH_TYPE(dev) >= PCH_CPT) 1255 if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
1255 sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe); 1256 sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
1256 else 1257 else
1257 sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe); 1258 sdvox |= SDVO_PIPE_SEL(crtc->pipe);
1258 1259
1259 if (intel_sdvo->has_hdmi_audio) 1260 if (intel_sdvo->has_hdmi_audio)
1260 sdvox |= SDVO_AUDIO_ENABLE; 1261 sdvox |= SDVO_AUDIO_ENABLE;
@@ -1264,7 +1265,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
1264 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 1265 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
1265 /* done in crtc_mode_set as it lives inside the dpll register */ 1266 /* done in crtc_mode_set as it lives inside the dpll register */
1266 } else { 1267 } else {
1267 sdvox |= (intel_crtc->config.pixel_multiplier - 1) 1268 sdvox |= (crtc->config.pixel_multiplier - 1)
1268 << SDVO_PORT_MULTIPLY_SHIFT; 1269 << SDVO_PORT_MULTIPLY_SHIFT;
1269 } 1270 }
1270 1271
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 78b621cdd108..ad6ec4b39005 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -260,8 +260,11 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
260 if (obj->tiling_mode != I915_TILING_NONE) 260 if (obj->tiling_mode != I915_TILING_NONE)
261 sprctl |= SPRITE_TILED; 261 sprctl |= SPRITE_TILED;
262 262
263 /* must disable */ 263 if (IS_HASWELL(dev))
264 sprctl |= SPRITE_TRICKLE_FEED_DISABLE; 264 sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
265 else
266 sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
267
265 sprctl |= SPRITE_ENABLE; 268 sprctl |= SPRITE_ENABLE;
266 269
267 if (IS_HASWELL(dev)) 270 if (IS_HASWELL(dev))
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8f5bc869c023..8649f1c36b00 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -261,7 +261,7 @@ void intel_uncore_init(struct drm_device *dev)
261 } 261 }
262} 262}
263 263
264void intel_uncore_sanitize(struct drm_device *dev) 264static void intel_uncore_forcewake_reset(struct drm_device *dev)
265{ 265{
266 struct drm_i915_private *dev_priv = dev->dev_private; 266 struct drm_i915_private *dev_priv = dev->dev_private;
267 267
@@ -272,6 +272,11 @@ void intel_uncore_sanitize(struct drm_device *dev)
272 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 272 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
273 __gen6_gt_force_wake_mt_reset(dev_priv); 273 __gen6_gt_force_wake_mt_reset(dev_priv);
274 } 274 }
275}
276
277void intel_uncore_sanitize(struct drm_device *dev)
278{
279 intel_uncore_forcewake_reset(dev);
275 280
276 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 281 /* BIOS often leaves RC6 enabled, but disable it for hw init */
277 intel_disable_gt_powersave(dev); 282 intel_disable_gt_powersave(dev);
@@ -549,6 +554,8 @@ static int gen6_do_reset(struct drm_device *dev)
549 /* Spin waiting for the device to ack the reset request */ 554 /* Spin waiting for the device to ack the reset request */
550 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); 555 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
551 556
557 intel_uncore_forcewake_reset(dev);
558
552 /* If reset with a user forcewake, try to restore, otherwise turn it off */ 559 /* If reset with a user forcewake, try to restore, otherwise turn it off */
553 if (dev_priv->uncore.forcewake_count) 560 if (dev_priv->uncore.forcewake_count)
554 dev_priv->uncore.funcs.force_wake_get(dev_priv); 561 dev_priv->uncore.funcs.force_wake_get(dev_priv);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8863644024b7..e893c5362402 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -636,7 +636,8 @@ int nouveau_pmops_resume(struct device *dev)
636 nouveau_fbcon_set_suspend(drm_dev, 0); 636 nouveau_fbcon_set_suspend(drm_dev, 0);
637 637
638 nouveau_fbcon_zfill_all(drm_dev); 638 nouveau_fbcon_zfill_all(drm_dev);
639 nouveau_display_resume(drm_dev); 639 if (drm_dev->mode_config.num_crtc)
640 nouveau_display_resume(drm_dev);
640 nv_suspend_set_printk_level(NV_DBG_DEBUG); 641 nv_suspend_set_printk_level(NV_DBG_DEBUG);
641 return 0; 642 return 0;
642} 643}
@@ -671,7 +672,8 @@ static int nouveau_pmops_thaw(struct device *dev)
671 if (drm_dev->mode_config.num_crtc) 672 if (drm_dev->mode_config.num_crtc)
672 nouveau_fbcon_set_suspend(drm_dev, 0); 673 nouveau_fbcon_set_suspend(drm_dev, 0);
673 nouveau_fbcon_zfill_all(drm_dev); 674 nouveau_fbcon_zfill_all(drm_dev);
674 nouveau_display_resume(drm_dev); 675 if (drm_dev->mode_config.num_crtc)
676 nouveau_display_resume(drm_dev);
675 nv_suspend_set_printk_level(NV_DBG_DEBUG); 677 nv_suspend_set_printk_level(NV_DBG_DEBUG);
676 return 0; 678 return 0;
677} 679}
@@ -906,7 +908,8 @@ static int nouveau_pmops_runtime_resume(struct device *dev)
906 pci_set_master(pdev); 908 pci_set_master(pdev);
907 909
908 ret = nouveau_do_resume(drm_dev); 910 ret = nouveau_do_resume(drm_dev);
909 nouveau_display_resume(drm_dev); 911 if (drm_dev->mode_config.num_crtc)
912 nouveau_display_resume(drm_dev);
910 drm_kms_helper_poll_enable(drm_dev); 913 drm_kms_helper_poll_enable(drm_dev);
911 /* do magic */ 914 /* do magic */
912 nv_mask(device, 0x88488, (1 << 25), (1 << 25)); 915 nv_mask(device, 0x88488, (1 << 25), (1 << 25));
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index dfac7965ea28..32923d2f6002 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -707,8 +707,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
707 switch (connector->connector_type) { 707 switch (connector->connector_type) {
708 case DRM_MODE_CONNECTOR_DVII: 708 case DRM_MODE_CONNECTOR_DVII:
709 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ 709 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
710 if (drm_detect_hdmi_monitor(radeon_connector->edid) && 710 if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
711 radeon_audio) 711 (drm_detect_hdmi_monitor(radeon_connector->edid) &&
712 (radeon_connector->audio == RADEON_AUDIO_AUTO)))
712 return ATOM_ENCODER_MODE_HDMI; 713 return ATOM_ENCODER_MODE_HDMI;
713 else if (radeon_connector->use_digital) 714 else if (radeon_connector->use_digital)
714 return ATOM_ENCODER_MODE_DVI; 715 return ATOM_ENCODER_MODE_DVI;
@@ -718,8 +719,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
718 case DRM_MODE_CONNECTOR_DVID: 719 case DRM_MODE_CONNECTOR_DVID:
719 case DRM_MODE_CONNECTOR_HDMIA: 720 case DRM_MODE_CONNECTOR_HDMIA:
720 default: 721 default:
721 if (drm_detect_hdmi_monitor(radeon_connector->edid) && 722 if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
722 radeon_audio) 723 (drm_detect_hdmi_monitor(radeon_connector->edid) &&
724 (radeon_connector->audio == RADEON_AUDIO_AUTO)))
723 return ATOM_ENCODER_MODE_HDMI; 725 return ATOM_ENCODER_MODE_HDMI;
724 else 726 else
725 return ATOM_ENCODER_MODE_DVI; 727 return ATOM_ENCODER_MODE_DVI;
@@ -732,8 +734,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
732 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 734 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
733 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 735 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
734 return ATOM_ENCODER_MODE_DP; 736 return ATOM_ENCODER_MODE_DP;
735 else if (drm_detect_hdmi_monitor(radeon_connector->edid) && 737 else if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
736 radeon_audio) 738 (drm_detect_hdmi_monitor(radeon_connector->edid) &&
739 (radeon_connector->audio == RADEON_AUDIO_AUTO)))
737 return ATOM_ENCODER_MODE_HDMI; 740 return ATOM_ENCODER_MODE_HDMI;
738 else 741 else
739 return ATOM_ENCODER_MODE_DVI; 742 return ATOM_ENCODER_MODE_DVI;
@@ -1647,8 +1650,12 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1647 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); 1650 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
1648 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); 1651 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1649 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1652 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1650 /* some early dce3.2 boards have a bug in their transmitter control table */ 1653 /* some dce3.x boards have a bug in their transmitter control table.
1651 if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730)) 1654 * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
1655 * does the same thing and more.
1656 */
1657 if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
1658 (rdev->family != CHIP_RS880))
1652 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); 1659 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1653 } 1660 }
1654 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { 1661 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 084e69414fd1..05ff315e8e9e 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -2340,12 +2340,6 @@ int btc_dpm_set_power_state(struct radeon_device *rdev)
2340 return ret; 2340 return ret;
2341 } 2341 }
2342 2342
2343 ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
2344 if (ret) {
2345 DRM_ERROR("rv770_dpm_force_performance_level failed\n");
2346 return ret;
2347 }
2348
2349 return 0; 2343 return 0;
2350} 2344}
2351 2345
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 916630fdc796..899627443030 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -4208,6 +4208,7 @@ static void ci_request_link_speed_change_before_state_change(struct radeon_devic
4208 pi->pspp_notify_required = false; 4208 pi->pspp_notify_required = false;
4209 if (target_link_speed > current_link_speed) { 4209 if (target_link_speed > current_link_speed) {
4210 switch (target_link_speed) { 4210 switch (target_link_speed) {
4211#ifdef CONFIG_ACPI
4211 case RADEON_PCIE_GEN3: 4212 case RADEON_PCIE_GEN3:
4212 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0) 4213 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4213 break; 4214 break;
@@ -4217,6 +4218,7 @@ static void ci_request_link_speed_change_before_state_change(struct radeon_devic
4217 case RADEON_PCIE_GEN2: 4218 case RADEON_PCIE_GEN2:
4218 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) 4219 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4219 break; 4220 break;
4221#endif
4220 default: 4222 default:
4221 pi->force_pcie_gen = ci_get_current_pcie_speed(rdev); 4223 pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4222 break; 4224 break;
@@ -4248,7 +4250,9 @@ static void ci_notify_link_speed_change_after_state_change(struct radeon_device
4248 (ci_get_current_pcie_speed(rdev) > 0)) 4250 (ci_get_current_pcie_speed(rdev) > 0))
4249 return; 4251 return;
4250 4252
4253#ifdef CONFIG_ACPI
4251 radeon_acpi_pcie_performance_request(rdev, request, false); 4254 radeon_acpi_pcie_performance_request(rdev, request, false);
4255#endif
4252 } 4256 }
4253} 4257}
4254 4258
@@ -4744,12 +4748,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
4744 if (pi->pcie_performance_request) 4748 if (pi->pcie_performance_request)
4745 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); 4749 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
4746 4750
4747 ret = ci_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
4748 if (ret) {
4749 DRM_ERROR("ci_dpm_force_performance_level failed\n");
4750 return ret;
4751 }
4752
4753 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | 4751 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4754 RADEON_CG_BLOCK_MC | 4752 RADEON_CG_BLOCK_MC |
4755 RADEON_CG_BLOCK_SDMA | 4753 RADEON_CG_BLOCK_SDMA |
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 53b43dd3cf1e..252e10a41cf5 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -47,10 +47,11 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
47 u32 smc_start_address, 47 u32 smc_start_address,
48 const u8 *src, u32 byte_count, u32 limit) 48 const u8 *src, u32 byte_count, u32 limit)
49{ 49{
50 unsigned long flags;
50 u32 data, original_data; 51 u32 data, original_data;
51 u32 addr; 52 u32 addr;
52 u32 extra_shift; 53 u32 extra_shift;
53 int ret; 54 int ret = 0;
54 55
55 if (smc_start_address & 3) 56 if (smc_start_address & 3)
56 return -EINVAL; 57 return -EINVAL;
@@ -59,13 +60,14 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
59 60
60 addr = smc_start_address; 61 addr = smc_start_address;
61 62
63 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
62 while (byte_count >= 4) { 64 while (byte_count >= 4) {
63 /* SMC address space is BE */ 65 /* SMC address space is BE */
64 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; 66 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
65 67
66 ret = ci_set_smc_sram_address(rdev, addr, limit); 68 ret = ci_set_smc_sram_address(rdev, addr, limit);
67 if (ret) 69 if (ret)
68 return ret; 70 goto done;
69 71
70 WREG32(SMC_IND_DATA_0, data); 72 WREG32(SMC_IND_DATA_0, data);
71 73
@@ -80,7 +82,7 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
80 82
81 ret = ci_set_smc_sram_address(rdev, addr, limit); 83 ret = ci_set_smc_sram_address(rdev, addr, limit);
82 if (ret) 84 if (ret)
83 return ret; 85 goto done;
84 86
85 original_data = RREG32(SMC_IND_DATA_0); 87 original_data = RREG32(SMC_IND_DATA_0);
86 88
@@ -97,11 +99,15 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
97 99
98 ret = ci_set_smc_sram_address(rdev, addr, limit); 100 ret = ci_set_smc_sram_address(rdev, addr, limit);
99 if (ret) 101 if (ret)
100 return ret; 102 goto done;
101 103
102 WREG32(SMC_IND_DATA_0, data); 104 WREG32(SMC_IND_DATA_0, data);
103 } 105 }
104 return 0; 106
107done:
108 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
109
110 return ret;
105} 111}
106 112
107void ci_start_smc(struct radeon_device *rdev) 113void ci_start_smc(struct radeon_device *rdev)
@@ -197,6 +203,7 @@ PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
197 203
198int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit) 204int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
199{ 205{
206 unsigned long flags;
200 u32 ucode_start_address; 207 u32 ucode_start_address;
201 u32 ucode_size; 208 u32 ucode_size;
202 const u8 *src; 209 const u8 *src;
@@ -219,6 +226,7 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
219 return -EINVAL; 226 return -EINVAL;
220 227
221 src = (const u8 *)rdev->smc_fw->data; 228 src = (const u8 *)rdev->smc_fw->data;
229 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
222 WREG32(SMC_IND_INDEX_0, ucode_start_address); 230 WREG32(SMC_IND_INDEX_0, ucode_start_address);
223 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); 231 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
224 while (ucode_size >= 4) { 232 while (ucode_size >= 4) {
@@ -231,6 +239,7 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
231 ucode_size -= 4; 239 ucode_size -= 4;
232 } 240 }
233 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); 241 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
242 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
234 243
235 return 0; 244 return 0;
236} 245}
@@ -238,25 +247,29 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
238int ci_read_smc_sram_dword(struct radeon_device *rdev, 247int ci_read_smc_sram_dword(struct radeon_device *rdev,
239 u32 smc_address, u32 *value, u32 limit) 248 u32 smc_address, u32 *value, u32 limit)
240{ 249{
250 unsigned long flags;
241 int ret; 251 int ret;
242 252
253 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
243 ret = ci_set_smc_sram_address(rdev, smc_address, limit); 254 ret = ci_set_smc_sram_address(rdev, smc_address, limit);
244 if (ret) 255 if (ret == 0)
245 return ret; 256 *value = RREG32(SMC_IND_DATA_0);
257 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
246 258
247 *value = RREG32(SMC_IND_DATA_0); 259 return ret;
248 return 0;
249} 260}
250 261
251int ci_write_smc_sram_dword(struct radeon_device *rdev, 262int ci_write_smc_sram_dword(struct radeon_device *rdev,
252 u32 smc_address, u32 value, u32 limit) 263 u32 smc_address, u32 value, u32 limit)
253{ 264{
265 unsigned long flags;
254 int ret; 266 int ret;
255 267
268 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
256 ret = ci_set_smc_sram_address(rdev, smc_address, limit); 269 ret = ci_set_smc_sram_address(rdev, smc_address, limit);
257 if (ret) 270 if (ret == 0)
258 return ret; 271 WREG32(SMC_IND_DATA_0, value);
272 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
259 273
260 WREG32(SMC_IND_DATA_0, value); 274 return ret;
261 return 0;
262} 275}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index a3bba0587276..adbdb6503b05 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -77,6 +77,8 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev);
77static void cik_program_aspm(struct radeon_device *rdev); 77static void cik_program_aspm(struct radeon_device *rdev);
78static void cik_init_pg(struct radeon_device *rdev); 78static void cik_init_pg(struct radeon_device *rdev);
79static void cik_init_cg(struct radeon_device *rdev); 79static void cik_init_cg(struct radeon_device *rdev);
80static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
81 bool enable);
80 82
81/* get temperature in millidegrees */ 83/* get temperature in millidegrees */
82int ci_get_temp(struct radeon_device *rdev) 84int ci_get_temp(struct radeon_device *rdev)
@@ -120,20 +122,27 @@ int kv_get_temp(struct radeon_device *rdev)
120 */ 122 */
121u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg) 123u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg)
122{ 124{
125 unsigned long flags;
123 u32 r; 126 u32 r;
124 127
128 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
125 WREG32(PCIE_INDEX, reg); 129 WREG32(PCIE_INDEX, reg);
126 (void)RREG32(PCIE_INDEX); 130 (void)RREG32(PCIE_INDEX);
127 r = RREG32(PCIE_DATA); 131 r = RREG32(PCIE_DATA);
132 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
128 return r; 133 return r;
129} 134}
130 135
131void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) 136void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
132{ 137{
138 unsigned long flags;
139
140 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
133 WREG32(PCIE_INDEX, reg); 141 WREG32(PCIE_INDEX, reg);
134 (void)RREG32(PCIE_INDEX); 142 (void)RREG32(PCIE_INDEX);
135 WREG32(PCIE_DATA, v); 143 WREG32(PCIE_DATA, v);
136 (void)RREG32(PCIE_DATA); 144 (void)RREG32(PCIE_DATA);
145 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
137} 146}
138 147
139static const u32 spectre_rlc_save_restore_register_list[] = 148static const u32 spectre_rlc_save_restore_register_list[] =
@@ -2722,7 +2731,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
2722 } else if ((rdev->pdev->device == 0x1309) || 2731 } else if ((rdev->pdev->device == 0x1309) ||
2723 (rdev->pdev->device == 0x130A) || 2732 (rdev->pdev->device == 0x130A) ||
2724 (rdev->pdev->device == 0x130D) || 2733 (rdev->pdev->device == 0x130D) ||
2725 (rdev->pdev->device == 0x1313)) { 2734 (rdev->pdev->device == 0x1313) ||
2735 (rdev->pdev->device == 0x131D)) {
2726 rdev->config.cik.max_cu_per_sh = 6; 2736 rdev->config.cik.max_cu_per_sh = 6;
2727 rdev->config.cik.max_backends_per_se = 2; 2737 rdev->config.cik.max_backends_per_se = 2;
2728 } else if ((rdev->pdev->device == 0x1306) || 2738 } else if ((rdev->pdev->device == 0x1306) ||
@@ -4013,6 +4023,8 @@ static int cik_cp_resume(struct radeon_device *rdev)
4013{ 4023{
4014 int r; 4024 int r;
4015 4025
4026 cik_enable_gui_idle_interrupt(rdev, false);
4027
4016 r = cik_cp_load_microcode(rdev); 4028 r = cik_cp_load_microcode(rdev);
4017 if (r) 4029 if (r)
4018 return r; 4030 return r;
@@ -4024,6 +4036,8 @@ static int cik_cp_resume(struct radeon_device *rdev)
4024 if (r) 4036 if (r)
4025 return r; 4037 return r;
4026 4038
4039 cik_enable_gui_idle_interrupt(rdev, true);
4040
4027 return 0; 4041 return 0;
4028} 4042}
4029 4043
@@ -5376,7 +5390,9 @@ static void cik_enable_hdp_ls(struct radeon_device *rdev,
5376void cik_update_cg(struct radeon_device *rdev, 5390void cik_update_cg(struct radeon_device *rdev,
5377 u32 block, bool enable) 5391 u32 block, bool enable)
5378{ 5392{
5393
5379 if (block & RADEON_CG_BLOCK_GFX) { 5394 if (block & RADEON_CG_BLOCK_GFX) {
5395 cik_enable_gui_idle_interrupt(rdev, false);
5380 /* order matters! */ 5396 /* order matters! */
5381 if (enable) { 5397 if (enable) {
5382 cik_enable_mgcg(rdev, true); 5398 cik_enable_mgcg(rdev, true);
@@ -5385,6 +5401,7 @@ void cik_update_cg(struct radeon_device *rdev,
5385 cik_enable_cgcg(rdev, false); 5401 cik_enable_cgcg(rdev, false);
5386 cik_enable_mgcg(rdev, false); 5402 cik_enable_mgcg(rdev, false);
5387 } 5403 }
5404 cik_enable_gui_idle_interrupt(rdev, true);
5388 } 5405 }
5389 5406
5390 if (block & RADEON_CG_BLOCK_MC) { 5407 if (block & RADEON_CG_BLOCK_MC) {
@@ -5541,7 +5558,7 @@ static void cik_enable_gfx_cgpg(struct radeon_device *rdev,
5541{ 5558{
5542 u32 data, orig; 5559 u32 data, orig;
5543 5560
5544 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) { 5561 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
5545 orig = data = RREG32(RLC_PG_CNTL); 5562 orig = data = RREG32(RLC_PG_CNTL);
5546 data |= GFX_PG_ENABLE; 5563 data |= GFX_PG_ENABLE;
5547 if (orig != data) 5564 if (orig != data)
@@ -5805,7 +5822,7 @@ static void cik_init_pg(struct radeon_device *rdev)
5805 if (rdev->pg_flags) { 5822 if (rdev->pg_flags) {
5806 cik_enable_sck_slowdown_on_pu(rdev, true); 5823 cik_enable_sck_slowdown_on_pu(rdev, true);
5807 cik_enable_sck_slowdown_on_pd(rdev, true); 5824 cik_enable_sck_slowdown_on_pd(rdev, true);
5808 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { 5825 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
5809 cik_init_gfx_cgpg(rdev); 5826 cik_init_gfx_cgpg(rdev);
5810 cik_enable_cp_pg(rdev, true); 5827 cik_enable_cp_pg(rdev, true);
5811 cik_enable_gds_pg(rdev, true); 5828 cik_enable_gds_pg(rdev, true);
@@ -5819,7 +5836,7 @@ static void cik_fini_pg(struct radeon_device *rdev)
5819{ 5836{
5820 if (rdev->pg_flags) { 5837 if (rdev->pg_flags) {
5821 cik_update_gfx_pg(rdev, false); 5838 cik_update_gfx_pg(rdev, false);
5822 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { 5839 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
5823 cik_enable_cp_pg(rdev, false); 5840 cik_enable_cp_pg(rdev, false);
5824 cik_enable_gds_pg(rdev, false); 5841 cik_enable_gds_pg(rdev, false);
5825 } 5842 }
@@ -5895,7 +5912,9 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
5895 u32 tmp; 5912 u32 tmp;
5896 5913
5897 /* gfx ring */ 5914 /* gfx ring */
5898 WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 5915 tmp = RREG32(CP_INT_CNTL_RING0) &
5916 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5917 WREG32(CP_INT_CNTL_RING0, tmp);
5899 /* sdma */ 5918 /* sdma */
5900 tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; 5919 tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
5901 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp); 5920 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp);
@@ -6036,8 +6055,7 @@ static int cik_irq_init(struct radeon_device *rdev)
6036 */ 6055 */
6037int cik_irq_set(struct radeon_device *rdev) 6056int cik_irq_set(struct radeon_device *rdev)
6038{ 6057{
6039 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE | 6058 u32 cp_int_cntl;
6040 PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
6041 u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3; 6059 u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
6042 u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3; 6060 u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
6043 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 6061 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
@@ -6058,6 +6076,10 @@ int cik_irq_set(struct radeon_device *rdev)
6058 return 0; 6076 return 0;
6059 } 6077 }
6060 6078
6079 cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
6080 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
6081 cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
6082
6061 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 6083 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
6062 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; 6084 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
6063 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; 6085 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 95a66db08d9b..91bb470de0a3 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -2014,12 +2014,6 @@ int cypress_dpm_set_power_state(struct radeon_device *rdev)
2014 if (eg_pi->pcie_performance_request) 2014 if (eg_pi->pcie_performance_request)
2015 cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); 2015 cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
2016 2016
2017 ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
2018 if (ret) {
2019 DRM_ERROR("rv770_dpm_force_performance_level failed\n");
2020 return ret;
2021 }
2022
2023 return 0; 2017 return 0;
2024} 2018}
2025 2019
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 8953255e894b..85a69d2ea3d2 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -28,22 +28,30 @@
28static u32 dce6_endpoint_rreg(struct radeon_device *rdev, 28static u32 dce6_endpoint_rreg(struct radeon_device *rdev,
29 u32 block_offset, u32 reg) 29 u32 block_offset, u32 reg)
30{ 30{
31 unsigned long flags;
31 u32 r; 32 u32 r;
32 33
34 spin_lock_irqsave(&rdev->end_idx_lock, flags);
33 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 35 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
34 r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset); 36 r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset);
37 spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
38
35 return r; 39 return r;
36} 40}
37 41
38static void dce6_endpoint_wreg(struct radeon_device *rdev, 42static void dce6_endpoint_wreg(struct radeon_device *rdev,
39 u32 block_offset, u32 reg, u32 v) 43 u32 block_offset, u32 reg, u32 v)
40{ 44{
45 unsigned long flags;
46
47 spin_lock_irqsave(&rdev->end_idx_lock, flags);
41 if (ASIC_IS_DCE8(rdev)) 48 if (ASIC_IS_DCE8(rdev))
42 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 49 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
43 else 50 else
44 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, 51 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset,
45 AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg)); 52 AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg));
46 WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v); 53 WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v);
54 spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
47} 55}
48 56
49#define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg)) 57#define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg))
@@ -86,12 +94,12 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
86 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
87 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
88 u32 offset = dig->afmt->offset; 96 u32 offset = dig->afmt->offset;
89 u32 id = dig->afmt->pin->id;
90 97
91 if (!dig->afmt->pin) 98 if (!dig->afmt->pin)
92 return; 99 return;
93 100
94 WREG32(AFMT_AUDIO_SRC_CONTROL + offset, AFMT_AUDIO_SRC_SELECT(id)); 101 WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
102 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
95} 103}
96 104
97void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) 105void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index ecd60809db4e..71399065db04 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -40,6 +40,7 @@ static int kv_calculate_dpm_settings(struct radeon_device *rdev);
40static void kv_enable_new_levels(struct radeon_device *rdev); 40static void kv_enable_new_levels(struct radeon_device *rdev);
41static void kv_program_nbps_index_settings(struct radeon_device *rdev, 41static void kv_program_nbps_index_settings(struct radeon_device *rdev,
42 struct radeon_ps *new_rps); 42 struct radeon_ps *new_rps);
43static int kv_set_enabled_level(struct radeon_device *rdev, u32 level);
43static int kv_set_enabled_levels(struct radeon_device *rdev); 44static int kv_set_enabled_levels(struct radeon_device *rdev);
44static int kv_force_dpm_highest(struct radeon_device *rdev); 45static int kv_force_dpm_highest(struct radeon_device *rdev);
45static int kv_force_dpm_lowest(struct radeon_device *rdev); 46static int kv_force_dpm_lowest(struct radeon_device *rdev);
@@ -519,7 +520,7 @@ static int kv_set_dpm_boot_state(struct radeon_device *rdev)
519 520
520static void kv_program_vc(struct radeon_device *rdev) 521static void kv_program_vc(struct radeon_device *rdev)
521{ 522{
522 WREG32_SMC(CG_FTV_0, 0x3FFFC000); 523 WREG32_SMC(CG_FTV_0, 0x3FFFC100);
523} 524}
524 525
525static void kv_clear_vc(struct radeon_device *rdev) 526static void kv_clear_vc(struct radeon_device *rdev)
@@ -638,7 +639,10 @@ static int kv_force_lowest_valid(struct radeon_device *rdev)
638 639
639static int kv_unforce_levels(struct radeon_device *rdev) 640static int kv_unforce_levels(struct radeon_device *rdev)
640{ 641{
641 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); 642 if (rdev->family == CHIP_KABINI)
643 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
644 else
645 return kv_set_enabled_levels(rdev);
642} 646}
643 647
644static int kv_update_sclk_t(struct radeon_device *rdev) 648static int kv_update_sclk_t(struct radeon_device *rdev)
@@ -667,9 +671,8 @@ static int kv_program_bootup_state(struct radeon_device *rdev)
667 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 671 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
668 672
669 if (table && table->count) { 673 if (table && table->count) {
670 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { 674 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
671 if ((table->entries[i].clk == pi->boot_pl.sclk) || 675 if (table->entries[i].clk == pi->boot_pl.sclk)
672 (i == 0))
673 break; 676 break;
674 } 677 }
675 678
@@ -682,9 +685,8 @@ static int kv_program_bootup_state(struct radeon_device *rdev)
682 if (table->num_max_dpm_entries == 0) 685 if (table->num_max_dpm_entries == 0)
683 return -EINVAL; 686 return -EINVAL;
684 687
685 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { 688 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
686 if ((table->entries[i].sclk_frequency == pi->boot_pl.sclk) || 689 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
687 (i == 0))
688 break; 690 break;
689 } 691 }
690 692
@@ -1078,6 +1080,13 @@ static int kv_enable_ulv(struct radeon_device *rdev, bool enable)
1078 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); 1080 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
1079} 1081}
1080 1082
1083static void kv_reset_acp_boot_level(struct radeon_device *rdev)
1084{
1085 struct kv_power_info *pi = kv_get_pi(rdev);
1086
1087 pi->acp_boot_level = 0xff;
1088}
1089
1081static void kv_update_current_ps(struct radeon_device *rdev, 1090static void kv_update_current_ps(struct radeon_device *rdev,
1082 struct radeon_ps *rps) 1091 struct radeon_ps *rps)
1083{ 1092{
@@ -1100,6 +1109,18 @@ static void kv_update_requested_ps(struct radeon_device *rdev,
1100 pi->requested_rps.ps_priv = &pi->requested_ps; 1109 pi->requested_rps.ps_priv = &pi->requested_ps;
1101} 1110}
1102 1111
1112void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
1113{
1114 struct kv_power_info *pi = kv_get_pi(rdev);
1115 int ret;
1116
1117 if (pi->bapm_enable) {
1118 ret = kv_smc_bapm_enable(rdev, enable);
1119 if (ret)
1120 DRM_ERROR("kv_smc_bapm_enable failed\n");
1121 }
1122}
1123
1103int kv_dpm_enable(struct radeon_device *rdev) 1124int kv_dpm_enable(struct radeon_device *rdev)
1104{ 1125{
1105 struct kv_power_info *pi = kv_get_pi(rdev); 1126 struct kv_power_info *pi = kv_get_pi(rdev);
@@ -1192,6 +1213,8 @@ int kv_dpm_enable(struct radeon_device *rdev)
1192 return ret; 1213 return ret;
1193 } 1214 }
1194 1215
1216 kv_reset_acp_boot_level(rdev);
1217
1195 if (rdev->irq.installed && 1218 if (rdev->irq.installed &&
1196 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 1219 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1197 ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 1220 ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
@@ -1203,6 +1226,12 @@ int kv_dpm_enable(struct radeon_device *rdev)
1203 radeon_irq_set(rdev); 1226 radeon_irq_set(rdev);
1204 } 1227 }
1205 1228
1229 ret = kv_smc_bapm_enable(rdev, false);
1230 if (ret) {
1231 DRM_ERROR("kv_smc_bapm_enable failed\n");
1232 return ret;
1233 }
1234
1206 /* powerdown unused blocks for now */ 1235 /* powerdown unused blocks for now */
1207 kv_dpm_powergate_acp(rdev, true); 1236 kv_dpm_powergate_acp(rdev, true);
1208 kv_dpm_powergate_samu(rdev, true); 1237 kv_dpm_powergate_samu(rdev, true);
@@ -1226,6 +1255,8 @@ void kv_dpm_disable(struct radeon_device *rdev)
1226 RADEON_CG_BLOCK_BIF | 1255 RADEON_CG_BLOCK_BIF |
1227 RADEON_CG_BLOCK_HDP), false); 1256 RADEON_CG_BLOCK_HDP), false);
1228 1257
1258 kv_smc_bapm_enable(rdev, false);
1259
1229 /* powerup blocks */ 1260 /* powerup blocks */
1230 kv_dpm_powergate_acp(rdev, false); 1261 kv_dpm_powergate_acp(rdev, false);
1231 kv_dpm_powergate_samu(rdev, false); 1262 kv_dpm_powergate_samu(rdev, false);
@@ -1450,6 +1481,39 @@ static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
1450 return kv_enable_samu_dpm(rdev, !gate); 1481 return kv_enable_samu_dpm(rdev, !gate);
1451} 1482}
1452 1483
1484static u8 kv_get_acp_boot_level(struct radeon_device *rdev)
1485{
1486 u8 i;
1487 struct radeon_clock_voltage_dependency_table *table =
1488 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1489
1490 for (i = 0; i < table->count; i++) {
1491 if (table->entries[i].clk >= 0) /* XXX */
1492 break;
1493 }
1494
1495 if (i >= table->count)
1496 i = table->count - 1;
1497
1498 return i;
1499}
1500
1501static void kv_update_acp_boot_level(struct radeon_device *rdev)
1502{
1503 struct kv_power_info *pi = kv_get_pi(rdev);
1504 u8 acp_boot_level;
1505
1506 if (!pi->caps_stable_p_state) {
1507 acp_boot_level = kv_get_acp_boot_level(rdev);
1508 if (acp_boot_level != pi->acp_boot_level) {
1509 pi->acp_boot_level = acp_boot_level;
1510 kv_send_msg_to_smc_with_parameter(rdev,
1511 PPSMC_MSG_ACPDPM_SetEnabledMask,
1512 (1 << pi->acp_boot_level));
1513 }
1514 }
1515}
1516
1453static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate) 1517static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
1454{ 1518{
1455 struct kv_power_info *pi = kv_get_pi(rdev); 1519 struct kv_power_info *pi = kv_get_pi(rdev);
@@ -1461,7 +1525,7 @@ static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
1461 if (pi->caps_stable_p_state) 1525 if (pi->caps_stable_p_state)
1462 pi->acp_boot_level = table->count - 1; 1526 pi->acp_boot_level = table->count - 1;
1463 else 1527 else
1464 pi->acp_boot_level = 0; 1528 pi->acp_boot_level = kv_get_acp_boot_level(rdev);
1465 1529
1466 ret = kv_copy_bytes_to_smc(rdev, 1530 ret = kv_copy_bytes_to_smc(rdev,
1467 pi->dpm_table_start + 1531 pi->dpm_table_start +
@@ -1588,13 +1652,11 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev,
1588 } 1652 }
1589 } 1653 }
1590 1654
1591 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { 1655 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
1592 if ((table->entries[i].clk <= new_ps->levels[new_ps->num_levels -1].sclk) || 1656 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
1593 (i == 0)) {
1594 pi->highest_valid = i;
1595 break; 1657 break;
1596 }
1597 } 1658 }
1659 pi->highest_valid = i;
1598 1660
1599 if (pi->lowest_valid > pi->highest_valid) { 1661 if (pi->lowest_valid > pi->highest_valid) {
1600 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > 1662 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
@@ -1615,14 +1677,12 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev,
1615 } 1677 }
1616 } 1678 }
1617 1679
1618 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { 1680 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
1619 if (table->entries[i].sclk_frequency <= 1681 if (table->entries[i].sclk_frequency <=
1620 new_ps->levels[new_ps->num_levels - 1].sclk || 1682 new_ps->levels[new_ps->num_levels - 1].sclk)
1621 i == 0) {
1622 pi->highest_valid = i;
1623 break; 1683 break;
1624 }
1625 } 1684 }
1685 pi->highest_valid = i;
1626 1686
1627 if (pi->lowest_valid > pi->highest_valid) { 1687 if (pi->lowest_valid > pi->highest_valid) {
1628 if ((new_ps->levels[0].sclk - 1688 if ((new_ps->levels[0].sclk -
@@ -1724,6 +1784,14 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
1724 RADEON_CG_BLOCK_BIF | 1784 RADEON_CG_BLOCK_BIF |
1725 RADEON_CG_BLOCK_HDP), false); 1785 RADEON_CG_BLOCK_HDP), false);
1726 1786
1787 if (pi->bapm_enable) {
1788 ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power);
1789 if (ret) {
1790 DRM_ERROR("kv_smc_bapm_enable failed\n");
1791 return ret;
1792 }
1793 }
1794
1727 if (rdev->family == CHIP_KABINI) { 1795 if (rdev->family == CHIP_KABINI) {
1728 if (pi->enable_dpm) { 1796 if (pi->enable_dpm) {
1729 kv_set_valid_clock_range(rdev, new_ps); 1797 kv_set_valid_clock_range(rdev, new_ps);
@@ -1775,6 +1843,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
1775 return ret; 1843 return ret;
1776 } 1844 }
1777#endif 1845#endif
1846 kv_update_acp_boot_level(rdev);
1778 kv_update_sclk_t(rdev); 1847 kv_update_sclk_t(rdev);
1779 kv_enable_nb_dpm(rdev); 1848 kv_enable_nb_dpm(rdev);
1780 } 1849 }
@@ -1785,7 +1854,6 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
1785 RADEON_CG_BLOCK_BIF | 1854 RADEON_CG_BLOCK_BIF |
1786 RADEON_CG_BLOCK_HDP), true); 1855 RADEON_CG_BLOCK_HDP), true);
1787 1856
1788 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1789 return 0; 1857 return 0;
1790} 1858}
1791 1859
@@ -1806,12 +1874,23 @@ void kv_dpm_setup_asic(struct radeon_device *rdev)
1806 1874
1807void kv_dpm_reset_asic(struct radeon_device *rdev) 1875void kv_dpm_reset_asic(struct radeon_device *rdev)
1808{ 1876{
1809 kv_force_lowest_valid(rdev); 1877 struct kv_power_info *pi = kv_get_pi(rdev);
1810 kv_init_graphics_levels(rdev); 1878
1811 kv_program_bootup_state(rdev); 1879 if (rdev->family == CHIP_KABINI) {
1812 kv_upload_dpm_settings(rdev); 1880 kv_force_lowest_valid(rdev);
1813 kv_force_lowest_valid(rdev); 1881 kv_init_graphics_levels(rdev);
1814 kv_unforce_levels(rdev); 1882 kv_program_bootup_state(rdev);
1883 kv_upload_dpm_settings(rdev);
1884 kv_force_lowest_valid(rdev);
1885 kv_unforce_levels(rdev);
1886 } else {
1887 kv_init_graphics_levels(rdev);
1888 kv_program_bootup_state(rdev);
1889 kv_freeze_sclk_dpm(rdev, true);
1890 kv_upload_dpm_settings(rdev);
1891 kv_freeze_sclk_dpm(rdev, false);
1892 kv_set_enabled_level(rdev, pi->graphics_boot_level);
1893 }
1815} 1894}
1816 1895
1817//XXX use sumo_dpm_display_configuration_changed 1896//XXX use sumo_dpm_display_configuration_changed
@@ -1871,12 +1950,15 @@ static int kv_force_dpm_highest(struct radeon_device *rdev)
1871 if (ret) 1950 if (ret)
1872 return ret; 1951 return ret;
1873 1952
1874 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i >= 0; i--) { 1953 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
1875 if (enable_mask & (1 << i)) 1954 if (enable_mask & (1 << i))
1876 break; 1955 break;
1877 } 1956 }
1878 1957
1879 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 1958 if (rdev->family == CHIP_KABINI)
1959 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
1960 else
1961 return kv_set_enabled_level(rdev, i);
1880} 1962}
1881 1963
1882static int kv_force_dpm_lowest(struct radeon_device *rdev) 1964static int kv_force_dpm_lowest(struct radeon_device *rdev)
@@ -1893,7 +1975,10 @@ static int kv_force_dpm_lowest(struct radeon_device *rdev)
1893 break; 1975 break;
1894 } 1976 }
1895 1977
1896 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 1978 if (rdev->family == CHIP_KABINI)
1979 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
1980 else
1981 return kv_set_enabled_level(rdev, i);
1897} 1982}
1898 1983
1899static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev, 1984static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
@@ -1911,9 +1996,9 @@ static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
1911 if (!pi->caps_sclk_ds) 1996 if (!pi->caps_sclk_ds)
1912 return 0; 1997 return 0;
1913 1998
1914 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i <= 0; i--) { 1999 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
1915 temp = sclk / sumo_get_sleep_divider_from_id(i); 2000 temp = sclk / sumo_get_sleep_divider_from_id(i);
1916 if ((temp >= min) || (i == 0)) 2001 if (temp >= min)
1917 break; 2002 break;
1918 } 2003 }
1919 2004
@@ -2039,12 +2124,12 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
2039 ps->dpmx_nb_ps_lo = 0x1; 2124 ps->dpmx_nb_ps_lo = 0x1;
2040 ps->dpmx_nb_ps_hi = 0x0; 2125 ps->dpmx_nb_ps_hi = 0x0;
2041 } else { 2126 } else {
2042 ps->dpm0_pg_nb_ps_lo = 0x1; 2127 ps->dpm0_pg_nb_ps_lo = 0x3;
2043 ps->dpm0_pg_nb_ps_hi = 0x0; 2128 ps->dpm0_pg_nb_ps_hi = 0x0;
2044 ps->dpmx_nb_ps_lo = 0x2; 2129 ps->dpmx_nb_ps_lo = 0x3;
2045 ps->dpmx_nb_ps_hi = 0x1; 2130 ps->dpmx_nb_ps_hi = 0x0;
2046 2131
2047 if (pi->sys_info.nb_dpm_enable && pi->battery_state) { 2132 if (pi->sys_info.nb_dpm_enable) {
2048 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2133 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2049 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) || 2134 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
2050 pi->disable_nb_ps3_in_battery; 2135 pi->disable_nb_ps3_in_battery;
@@ -2210,6 +2295,15 @@ static void kv_enable_new_levels(struct radeon_device *rdev)
2210 } 2295 }
2211} 2296}
2212 2297
2298static int kv_set_enabled_level(struct radeon_device *rdev, u32 level)
2299{
2300 u32 new_mask = (1 << level);
2301
2302 return kv_send_msg_to_smc_with_parameter(rdev,
2303 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2304 new_mask);
2305}
2306
2213static int kv_set_enabled_levels(struct radeon_device *rdev) 2307static int kv_set_enabled_levels(struct radeon_device *rdev)
2214{ 2308{
2215 struct kv_power_info *pi = kv_get_pi(rdev); 2309 struct kv_power_info *pi = kv_get_pi(rdev);
diff --git a/drivers/gpu/drm/radeon/kv_dpm.h b/drivers/gpu/drm/radeon/kv_dpm.h
index 32bb079572d7..8cef7525d7a8 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.h
+++ b/drivers/gpu/drm/radeon/kv_dpm.h
@@ -192,6 +192,7 @@ int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
192int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, 192int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
193 u32 *value, u32 limit); 193 u32 *value, u32 limit);
194int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable); 194int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable);
195int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable);
195int kv_copy_bytes_to_smc(struct radeon_device *rdev, 196int kv_copy_bytes_to_smc(struct radeon_device *rdev,
196 u32 smc_start_address, 197 u32 smc_start_address,
197 const u8 *src, u32 byte_count, u32 limit); 198 const u8 *src, u32 byte_count, u32 limit);
diff --git a/drivers/gpu/drm/radeon/kv_smc.c b/drivers/gpu/drm/radeon/kv_smc.c
index 34a226d7e34a..0000b59a6d05 100644
--- a/drivers/gpu/drm/radeon/kv_smc.c
+++ b/drivers/gpu/drm/radeon/kv_smc.c
@@ -107,6 +107,14 @@ int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable)
107 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable); 107 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable);
108} 108}
109 109
110int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable)
111{
112 if (enable)
113 return kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM);
114 else
115 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM);
116}
117
110int kv_copy_bytes_to_smc(struct radeon_device *rdev, 118int kv_copy_bytes_to_smc(struct radeon_device *rdev,
111 u32 smc_start_address, 119 u32 smc_start_address,
112 const u8 *src, u32 byte_count, u32 limit) 120 const u8 *src, u32 byte_count, u32 limit)
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index f7b625c9e0e9..6c398a456d78 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -3865,12 +3865,6 @@ int ni_dpm_set_power_state(struct radeon_device *rdev)
3865 return ret; 3865 return ret;
3866 } 3866 }
3867 3867
3868 ret = ni_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
3869 if (ret) {
3870 DRM_ERROR("ni_dpm_force_performance_level failed\n");
3871 return ret;
3872 }
3873
3874 return 0; 3868 return 0;
3875} 3869}
3876 3870
diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h
index 682842804bce..5670b8291285 100644
--- a/drivers/gpu/drm/radeon/ppsmc.h
+++ b/drivers/gpu/drm/radeon/ppsmc.h
@@ -163,6 +163,8 @@ typedef uint8_t PPSMC_Result;
163#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f) 163#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f)
164#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d) 164#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
165#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e) 165#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
166#define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120)
167#define PPSMC_MSG_DisableBAPM ((uint32_t) 0x121)
166#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124) 168#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124)
167 169
168 170
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 9fc61dd68bc0..24175717307b 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2853,21 +2853,28 @@ static void r100_pll_errata_after_data(struct radeon_device *rdev)
2853 2853
2854uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) 2854uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
2855{ 2855{
2856 unsigned long flags;
2856 uint32_t data; 2857 uint32_t data;
2857 2858
2859 spin_lock_irqsave(&rdev->pll_idx_lock, flags);
2858 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); 2860 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
2859 r100_pll_errata_after_index(rdev); 2861 r100_pll_errata_after_index(rdev);
2860 data = RREG32(RADEON_CLOCK_CNTL_DATA); 2862 data = RREG32(RADEON_CLOCK_CNTL_DATA);
2861 r100_pll_errata_after_data(rdev); 2863 r100_pll_errata_after_data(rdev);
2864 spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
2862 return data; 2865 return data;
2863} 2866}
2864 2867
2865void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2868void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
2866{ 2869{
2870 unsigned long flags;
2871
2872 spin_lock_irqsave(&rdev->pll_idx_lock, flags);
2867 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); 2873 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
2868 r100_pll_errata_after_index(rdev); 2874 r100_pll_errata_after_index(rdev);
2869 WREG32(RADEON_CLOCK_CNTL_DATA, v); 2875 WREG32(RADEON_CLOCK_CNTL_DATA, v);
2870 r100_pll_errata_after_data(rdev); 2876 r100_pll_errata_after_data(rdev);
2877 spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
2871} 2878}
2872 2879
2873static void r100_set_safe_registers(struct radeon_device *rdev) 2880static void r100_set_safe_registers(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 4e796ecf9ea4..6edf2b3a52b4 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -160,18 +160,25 @@ void r420_pipes_init(struct radeon_device *rdev)
160 160
161u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg) 161u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
162{ 162{
163 unsigned long flags;
163 u32 r; 164 u32 r;
164 165
166 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
165 WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg)); 167 WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
166 r = RREG32(R_0001FC_MC_IND_DATA); 168 r = RREG32(R_0001FC_MC_IND_DATA);
169 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
167 return r; 170 return r;
168} 171}
169 172
170void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v) 173void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
171{ 174{
175 unsigned long flags;
176
177 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
172 WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) | 178 WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
173 S_0001F8_MC_IND_WR_EN(1)); 179 S_0001F8_MC_IND_WR_EN(1));
174 WREG32(R_0001FC_MC_IND_DATA, v); 180 WREG32(R_0001FC_MC_IND_DATA, v);
181 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
175} 182}
176 183
177static void r420_debugfs(struct radeon_device *rdev) 184static void r420_debugfs(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index ea4d3734e6d9..2a1b1876b431 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -119,6 +119,11 @@ u32 r600_get_xclk(struct radeon_device *rdev)
119 return rdev->clock.spll.reference_freq; 119 return rdev->clock.spll.reference_freq;
120} 120}
121 121
122int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
123{
124 return 0;
125}
126
122/* get temperature in millidegrees */ 127/* get temperature in millidegrees */
123int rv6xx_get_temp(struct radeon_device *rdev) 128int rv6xx_get_temp(struct radeon_device *rdev)
124{ 129{
@@ -1045,20 +1050,27 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev)
1045 1050
1046uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg) 1051uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
1047{ 1052{
1053 unsigned long flags;
1048 uint32_t r; 1054 uint32_t r;
1049 1055
1056 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1050 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg)); 1057 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
1051 r = RREG32(R_0028FC_MC_DATA); 1058 r = RREG32(R_0028FC_MC_DATA);
1052 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR); 1059 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
1060 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1053 return r; 1061 return r;
1054} 1062}
1055 1063
1056void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 1064void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1057{ 1065{
1066 unsigned long flags;
1067
1068 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1058 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) | 1069 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
1059 S_0028F8_MC_IND_WR_EN(1)); 1070 S_0028F8_MC_IND_WR_EN(1));
1060 WREG32(R_0028FC_MC_DATA, v); 1071 WREG32(R_0028FC_MC_DATA, v);
1061 WREG32(R_0028F8_MC_INDEX, 0x7F); 1072 WREG32(R_0028F8_MC_INDEX, 0x7F);
1073 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1062} 1074}
1063 1075
1064static void r600_mc_program(struct radeon_device *rdev) 1076static void r600_mc_program(struct radeon_device *rdev)
@@ -2092,20 +2104,27 @@ static void r600_gpu_init(struct radeon_device *rdev)
2092 */ 2104 */
2093u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg) 2105u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
2094{ 2106{
2107 unsigned long flags;
2095 u32 r; 2108 u32 r;
2096 2109
2110 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2097 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 2111 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2098 (void)RREG32(PCIE_PORT_INDEX); 2112 (void)RREG32(PCIE_PORT_INDEX);
2099 r = RREG32(PCIE_PORT_DATA); 2113 r = RREG32(PCIE_PORT_DATA);
2114 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2100 return r; 2115 return r;
2101} 2116}
2102 2117
2103void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2118void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2104{ 2119{
2120 unsigned long flags;
2121
2122 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2105 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 2123 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2106 (void)RREG32(PCIE_PORT_INDEX); 2124 (void)RREG32(PCIE_PORT_INDEX);
2107 WREG32(PCIE_PORT_DATA, (v)); 2125 WREG32(PCIE_PORT_DATA, (v));
2108 (void)RREG32(PCIE_PORT_DATA); 2126 (void)RREG32(PCIE_PORT_DATA);
2127 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2109} 2128}
2110 2129
2111/* 2130/*
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index fa0de46fcc0d..e65f211a7be0 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -1219,30 +1219,20 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
1219 1219
1220void r600_free_extended_power_table(struct radeon_device *rdev) 1220void r600_free_extended_power_table(struct radeon_device *rdev)
1221{ 1221{
1222 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries) 1222 struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state;
1223 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); 1223
1224 if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) 1224 kfree(dyn_state->vddc_dependency_on_sclk.entries);
1225 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); 1225 kfree(dyn_state->vddci_dependency_on_mclk.entries);
1226 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) 1226 kfree(dyn_state->vddc_dependency_on_mclk.entries);
1227 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); 1227 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
1228 if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) 1228 kfree(dyn_state->cac_leakage_table.entries);
1229 kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); 1229 kfree(dyn_state->phase_shedding_limits_table.entries);
1230 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) 1230 kfree(dyn_state->ppm_table);
1231 kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); 1231 kfree(dyn_state->cac_tdp_table);
1232 if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) 1232 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
1233 kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries); 1233 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
1234 if (rdev->pm.dpm.dyn_state.ppm_table) 1234 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
1235 kfree(rdev->pm.dpm.dyn_state.ppm_table); 1235 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
1236 if (rdev->pm.dpm.dyn_state.cac_tdp_table)
1237 kfree(rdev->pm.dpm.dyn_state.cac_tdp_table);
1238 if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
1239 kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries);
1240 if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
1241 kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries);
1242 if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
1243 kfree(rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries);
1244 if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
1245 kfree(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries);
1246} 1236}
1247 1237
1248enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, 1238enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 454f90a849e4..e673fe26ea84 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1040,7 +1040,7 @@
1040# define HDMI0_AVI_INFO_CONT (1 << 1) 1040# define HDMI0_AVI_INFO_CONT (1 << 1)
1041# define HDMI0_AUDIO_INFO_SEND (1 << 4) 1041# define HDMI0_AUDIO_INFO_SEND (1 << 4)
1042# define HDMI0_AUDIO_INFO_CONT (1 << 5) 1042# define HDMI0_AUDIO_INFO_CONT (1 << 5)
1043# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */ 1043# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */
1044# define HDMI0_AUDIO_INFO_UPDATE (1 << 7) 1044# define HDMI0_AUDIO_INFO_UPDATE (1 << 7)
1045# define HDMI0_MPEG_INFO_SEND (1 << 8) 1045# define HDMI0_MPEG_INFO_SEND (1 << 8)
1046# define HDMI0_MPEG_INFO_CONT (1 << 9) 1046# define HDMI0_MPEG_INFO_CONT (1 << 9)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ff8b564ce2b2..a400ac1c4147 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -181,7 +181,7 @@ extern int radeon_aspm;
181#define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16) 181#define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16)
182 182
183/* PG flags */ 183/* PG flags */
184#define RADEON_PG_SUPPORT_GFX_CG (1 << 0) 184#define RADEON_PG_SUPPORT_GFX_PG (1 << 0)
185#define RADEON_PG_SUPPORT_GFX_SMG (1 << 1) 185#define RADEON_PG_SUPPORT_GFX_SMG (1 << 1)
186#define RADEON_PG_SUPPORT_GFX_DMG (1 << 2) 186#define RADEON_PG_SUPPORT_GFX_DMG (1 << 2)
187#define RADEON_PG_SUPPORT_UVD (1 << 3) 187#define RADEON_PG_SUPPORT_UVD (1 << 3)
@@ -1778,6 +1778,7 @@ struct radeon_asic {
1778 int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level); 1778 int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
1779 bool (*vblank_too_short)(struct radeon_device *rdev); 1779 bool (*vblank_too_short)(struct radeon_device *rdev);
1780 void (*powergate_uvd)(struct radeon_device *rdev, bool gate); 1780 void (*powergate_uvd)(struct radeon_device *rdev, bool gate);
1781 void (*enable_bapm)(struct radeon_device *rdev, bool enable);
1781 } dpm; 1782 } dpm;
1782 /* pageflipping */ 1783 /* pageflipping */
1783 struct { 1784 struct {
@@ -2110,6 +2111,28 @@ struct radeon_device {
2110 resource_size_t rmmio_size; 2111 resource_size_t rmmio_size;
2111 /* protects concurrent MM_INDEX/DATA based register access */ 2112 /* protects concurrent MM_INDEX/DATA based register access */
2112 spinlock_t mmio_idx_lock; 2113 spinlock_t mmio_idx_lock;
2114 /* protects concurrent SMC based register access */
2115 spinlock_t smc_idx_lock;
2116 /* protects concurrent PLL register access */
2117 spinlock_t pll_idx_lock;
2118 /* protects concurrent MC register access */
2119 spinlock_t mc_idx_lock;
2120 /* protects concurrent PCIE register access */
2121 spinlock_t pcie_idx_lock;
2122 /* protects concurrent PCIE_PORT register access */
2123 spinlock_t pciep_idx_lock;
2124 /* protects concurrent PIF register access */
2125 spinlock_t pif_idx_lock;
2126 /* protects concurrent CG register access */
2127 spinlock_t cg_idx_lock;
2128 /* protects concurrent UVD register access */
2129 spinlock_t uvd_idx_lock;
2130 /* protects concurrent RCU register access */
2131 spinlock_t rcu_idx_lock;
2132 /* protects concurrent DIDT register access */
2133 spinlock_t didt_idx_lock;
2134 /* protects concurrent ENDPOINT (audio) register access */
2135 spinlock_t end_idx_lock;
2113 void __iomem *rmmio; 2136 void __iomem *rmmio;
2114 radeon_rreg_t mc_rreg; 2137 radeon_rreg_t mc_rreg;
2115 radeon_wreg_t mc_wreg; 2138 radeon_wreg_t mc_wreg;
@@ -2277,123 +2300,179 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
2277 */ 2300 */
2278static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) 2301static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
2279{ 2302{
2303 unsigned long flags;
2280 uint32_t r; 2304 uint32_t r;
2281 2305
2306 spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
2282 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 2307 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
2283 r = RREG32(RADEON_PCIE_DATA); 2308 r = RREG32(RADEON_PCIE_DATA);
2309 spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
2284 return r; 2310 return r;
2285} 2311}
2286 2312
2287static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2313static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
2288{ 2314{
2315 unsigned long flags;
2316
2317 spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
2289 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 2318 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
2290 WREG32(RADEON_PCIE_DATA, (v)); 2319 WREG32(RADEON_PCIE_DATA, (v));
2320 spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
2291} 2321}
2292 2322
2293static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg) 2323static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
2294{ 2324{
2325 unsigned long flags;
2295 u32 r; 2326 u32 r;
2296 2327
2328 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
2297 WREG32(TN_SMC_IND_INDEX_0, (reg)); 2329 WREG32(TN_SMC_IND_INDEX_0, (reg));
2298 r = RREG32(TN_SMC_IND_DATA_0); 2330 r = RREG32(TN_SMC_IND_DATA_0);
2331 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
2299 return r; 2332 return r;
2300} 2333}
2301 2334
2302static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2335static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2303{ 2336{
2337 unsigned long flags;
2338
2339 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
2304 WREG32(TN_SMC_IND_INDEX_0, (reg)); 2340 WREG32(TN_SMC_IND_INDEX_0, (reg));
2305 WREG32(TN_SMC_IND_DATA_0, (v)); 2341 WREG32(TN_SMC_IND_DATA_0, (v));
2342 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
2306} 2343}
2307 2344
2308static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg) 2345static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
2309{ 2346{
2347 unsigned long flags;
2310 u32 r; 2348 u32 r;
2311 2349
2350 spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
2312 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 2351 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
2313 r = RREG32(R600_RCU_DATA); 2352 r = RREG32(R600_RCU_DATA);
2353 spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
2314 return r; 2354 return r;
2315} 2355}
2316 2356
2317static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2357static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2318{ 2358{
2359 unsigned long flags;
2360
2361 spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
2319 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 2362 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
2320 WREG32(R600_RCU_DATA, (v)); 2363 WREG32(R600_RCU_DATA, (v));
2364 spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
2321} 2365}
2322 2366
2323static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg) 2367static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg)
2324{ 2368{
2369 unsigned long flags;
2325 u32 r; 2370 u32 r;
2326 2371
2372 spin_lock_irqsave(&rdev->cg_idx_lock, flags);
2327 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); 2373 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
2328 r = RREG32(EVERGREEN_CG_IND_DATA); 2374 r = RREG32(EVERGREEN_CG_IND_DATA);
2375 spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
2329 return r; 2376 return r;
2330} 2377}
2331 2378
2332static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2379static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2333{ 2380{
2381 unsigned long flags;
2382
2383 spin_lock_irqsave(&rdev->cg_idx_lock, flags);
2334 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); 2384 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
2335 WREG32(EVERGREEN_CG_IND_DATA, (v)); 2385 WREG32(EVERGREEN_CG_IND_DATA, (v));
2386 spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
2336} 2387}
2337 2388
2338static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg) 2389static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg)
2339{ 2390{
2391 unsigned long flags;
2340 u32 r; 2392 u32 r;
2341 2393
2394 spin_lock_irqsave(&rdev->pif_idx_lock, flags);
2342 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); 2395 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
2343 r = RREG32(EVERGREEN_PIF_PHY0_DATA); 2396 r = RREG32(EVERGREEN_PIF_PHY0_DATA);
2397 spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
2344 return r; 2398 return r;
2345} 2399}
2346 2400
2347static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2401static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2348{ 2402{
2403 unsigned long flags;
2404
2405 spin_lock_irqsave(&rdev->pif_idx_lock, flags);
2349 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); 2406 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
2350 WREG32(EVERGREEN_PIF_PHY0_DATA, (v)); 2407 WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
2408 spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
2351} 2409}
2352 2410
2353static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg) 2411static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg)
2354{ 2412{
2413 unsigned long flags;
2355 u32 r; 2414 u32 r;
2356 2415
2416 spin_lock_irqsave(&rdev->pif_idx_lock, flags);
2357 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); 2417 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
2358 r = RREG32(EVERGREEN_PIF_PHY1_DATA); 2418 r = RREG32(EVERGREEN_PIF_PHY1_DATA);
2419 spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
2359 return r; 2420 return r;
2360} 2421}
2361 2422
2362static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2423static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2363{ 2424{
2425 unsigned long flags;
2426
2427 spin_lock_irqsave(&rdev->pif_idx_lock, flags);
2364 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); 2428 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
2365 WREG32(EVERGREEN_PIF_PHY1_DATA, (v)); 2429 WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
2430 spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
2366} 2431}
2367 2432
2368static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg) 2433static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
2369{ 2434{
2435 unsigned long flags;
2370 u32 r; 2436 u32 r;
2371 2437
2438 spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
2372 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 2439 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
2373 r = RREG32(R600_UVD_CTX_DATA); 2440 r = RREG32(R600_UVD_CTX_DATA);
2441 spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
2374 return r; 2442 return r;
2375} 2443}
2376 2444
2377static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2445static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2378{ 2446{
2447 unsigned long flags;
2448
2449 spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
2379 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 2450 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
2380 WREG32(R600_UVD_CTX_DATA, (v)); 2451 WREG32(R600_UVD_CTX_DATA, (v));
2452 spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
2381} 2453}
2382 2454
2383 2455
2384static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg) 2456static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg)
2385{ 2457{
2458 unsigned long flags;
2386 u32 r; 2459 u32 r;
2387 2460
2461 spin_lock_irqsave(&rdev->didt_idx_lock, flags);
2388 WREG32(CIK_DIDT_IND_INDEX, (reg)); 2462 WREG32(CIK_DIDT_IND_INDEX, (reg));
2389 r = RREG32(CIK_DIDT_IND_DATA); 2463 r = RREG32(CIK_DIDT_IND_DATA);
2464 spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
2390 return r; 2465 return r;
2391} 2466}
2392 2467
2393static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2468static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2394{ 2469{
2470 unsigned long flags;
2471
2472 spin_lock_irqsave(&rdev->didt_idx_lock, flags);
2395 WREG32(CIK_DIDT_IND_INDEX, (reg)); 2473 WREG32(CIK_DIDT_IND_INDEX, (reg));
2396 WREG32(CIK_DIDT_IND_DATA, (v)); 2474 WREG32(CIK_DIDT_IND_DATA, (v));
2475 spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
2397} 2476}
2398 2477
2399void r100_pll_errata_after_index(struct radeon_device *rdev); 2478void r100_pll_errata_after_index(struct radeon_device *rdev);
@@ -2569,6 +2648,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
2569#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l)) 2648#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l))
2570#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev)) 2649#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
2571#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g)) 2650#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
2651#define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e))
2572 2652
2573/* Common functions */ 2653/* Common functions */
2574/* AGP */ 2654/* AGP */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 630853b96841..5003385a7512 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1037,6 +1037,7 @@ static struct radeon_asic rv6xx_asic = {
1037 .set_pcie_lanes = &r600_set_pcie_lanes, 1037 .set_pcie_lanes = &r600_set_pcie_lanes,
1038 .set_clock_gating = NULL, 1038 .set_clock_gating = NULL,
1039 .get_temperature = &rv6xx_get_temp, 1039 .get_temperature = &rv6xx_get_temp,
1040 .set_uvd_clocks = &r600_set_uvd_clocks,
1040 }, 1041 },
1041 .dpm = { 1042 .dpm = {
1042 .init = &rv6xx_dpm_init, 1043 .init = &rv6xx_dpm_init,
@@ -1126,6 +1127,7 @@ static struct radeon_asic rs780_asic = {
1126 .set_pcie_lanes = NULL, 1127 .set_pcie_lanes = NULL,
1127 .set_clock_gating = NULL, 1128 .set_clock_gating = NULL,
1128 .get_temperature = &rv6xx_get_temp, 1129 .get_temperature = &rv6xx_get_temp,
1130 .set_uvd_clocks = &r600_set_uvd_clocks,
1129 }, 1131 },
1130 .dpm = { 1132 .dpm = {
1131 .init = &rs780_dpm_init, 1133 .init = &rs780_dpm_init,
@@ -1141,6 +1143,7 @@ static struct radeon_asic rs780_asic = {
1141 .get_mclk = &rs780_dpm_get_mclk, 1143 .get_mclk = &rs780_dpm_get_mclk,
1142 .print_power_state = &rs780_dpm_print_power_state, 1144 .print_power_state = &rs780_dpm_print_power_state,
1143 .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level, 1145 .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level,
1146 .force_performance_level = &rs780_dpm_force_performance_level,
1144 }, 1147 },
1145 .pflip = { 1148 .pflip = {
1146 .pre_page_flip = &rs600_pre_page_flip, 1149 .pre_page_flip = &rs600_pre_page_flip,
@@ -1791,6 +1794,7 @@ static struct radeon_asic trinity_asic = {
1791 .print_power_state = &trinity_dpm_print_power_state, 1794 .print_power_state = &trinity_dpm_print_power_state,
1792 .debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level, 1795 .debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level,
1793 .force_performance_level = &trinity_dpm_force_performance_level, 1796 .force_performance_level = &trinity_dpm_force_performance_level,
1797 .enable_bapm = &trinity_dpm_enable_bapm,
1794 }, 1798 },
1795 .pflip = { 1799 .pflip = {
1796 .pre_page_flip = &evergreen_pre_page_flip, 1800 .pre_page_flip = &evergreen_pre_page_flip,
@@ -2166,6 +2170,7 @@ static struct radeon_asic kv_asic = {
2166 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, 2170 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
2167 .force_performance_level = &kv_dpm_force_performance_level, 2171 .force_performance_level = &kv_dpm_force_performance_level,
2168 .powergate_uvd = &kv_dpm_powergate_uvd, 2172 .powergate_uvd = &kv_dpm_powergate_uvd,
2173 .enable_bapm = &kv_dpm_enable_bapm,
2169 }, 2174 },
2170 .pflip = { 2175 .pflip = {
2171 .pre_page_flip = &evergreen_pre_page_flip, 2176 .pre_page_flip = &evergreen_pre_page_flip,
@@ -2390,7 +2395,7 @@ int radeon_asic_init(struct radeon_device *rdev)
2390 RADEON_CG_SUPPORT_HDP_LS | 2395 RADEON_CG_SUPPORT_HDP_LS |
2391 RADEON_CG_SUPPORT_HDP_MGCG; 2396 RADEON_CG_SUPPORT_HDP_MGCG;
2392 rdev->pg_flags = 0 | 2397 rdev->pg_flags = 0 |
2393 /*RADEON_PG_SUPPORT_GFX_CG | */ 2398 /*RADEON_PG_SUPPORT_GFX_PG | */
2394 RADEON_PG_SUPPORT_SDMA; 2399 RADEON_PG_SUPPORT_SDMA;
2395 break; 2400 break;
2396 case CHIP_OLAND: 2401 case CHIP_OLAND:
@@ -2479,7 +2484,7 @@ int radeon_asic_init(struct radeon_device *rdev)
2479 RADEON_CG_SUPPORT_HDP_LS | 2484 RADEON_CG_SUPPORT_HDP_LS |
2480 RADEON_CG_SUPPORT_HDP_MGCG; 2485 RADEON_CG_SUPPORT_HDP_MGCG;
2481 rdev->pg_flags = 0; 2486 rdev->pg_flags = 0;
2482 /*RADEON_PG_SUPPORT_GFX_CG | 2487 /*RADEON_PG_SUPPORT_GFX_PG |
2483 RADEON_PG_SUPPORT_GFX_SMG | 2488 RADEON_PG_SUPPORT_GFX_SMG |
2484 RADEON_PG_SUPPORT_GFX_DMG | 2489 RADEON_PG_SUPPORT_GFX_DMG |
2485 RADEON_PG_SUPPORT_UVD | 2490 RADEON_PG_SUPPORT_UVD |
@@ -2507,7 +2512,7 @@ int radeon_asic_init(struct radeon_device *rdev)
2507 RADEON_CG_SUPPORT_HDP_LS | 2512 RADEON_CG_SUPPORT_HDP_LS |
2508 RADEON_CG_SUPPORT_HDP_MGCG; 2513 RADEON_CG_SUPPORT_HDP_MGCG;
2509 rdev->pg_flags = 0; 2514 rdev->pg_flags = 0;
2510 /*RADEON_PG_SUPPORT_GFX_CG | 2515 /*RADEON_PG_SUPPORT_GFX_PG |
2511 RADEON_PG_SUPPORT_GFX_SMG | 2516 RADEON_PG_SUPPORT_GFX_SMG |
2512 RADEON_PG_SUPPORT_UVD | 2517 RADEON_PG_SUPPORT_UVD |
2513 RADEON_PG_SUPPORT_VCE | 2518 RADEON_PG_SUPPORT_VCE |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 818bbe6b884b..70c29d5e080d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -389,6 +389,7 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
389u32 r600_get_xclk(struct radeon_device *rdev); 389u32 r600_get_xclk(struct radeon_device *rdev);
390uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev); 390uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
391int rv6xx_get_temp(struct radeon_device *rdev); 391int rv6xx_get_temp(struct radeon_device *rdev);
392int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
392int r600_dpm_pre_set_power_state(struct radeon_device *rdev); 393int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
393void r600_dpm_post_set_power_state(struct radeon_device *rdev); 394void r600_dpm_post_set_power_state(struct radeon_device *rdev);
394/* r600 dma */ 395/* r600 dma */
@@ -428,6 +429,8 @@ void rs780_dpm_print_power_state(struct radeon_device *rdev,
428 struct radeon_ps *ps); 429 struct radeon_ps *ps);
429void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 430void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
430 struct seq_file *m); 431 struct seq_file *m);
432int rs780_dpm_force_performance_level(struct radeon_device *rdev,
433 enum radeon_dpm_forced_level level);
431 434
432/* 435/*
433 * rv770,rv730,rv710,rv740 436 * rv770,rv730,rv710,rv740
@@ -625,6 +628,7 @@ void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *r
625 struct seq_file *m); 628 struct seq_file *m);
626int trinity_dpm_force_performance_level(struct radeon_device *rdev, 629int trinity_dpm_force_performance_level(struct radeon_device *rdev,
627 enum radeon_dpm_forced_level level); 630 enum radeon_dpm_forced_level level);
631void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
628 632
629/* DCE6 - SI */ 633/* DCE6 - SI */
630void dce6_bandwidth_update(struct radeon_device *rdev); 634void dce6_bandwidth_update(struct radeon_device *rdev);
@@ -781,6 +785,7 @@ void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
781int kv_dpm_force_performance_level(struct radeon_device *rdev, 785int kv_dpm_force_performance_level(struct radeon_device *rdev,
782 enum radeon_dpm_forced_level level); 786 enum radeon_dpm_forced_level level);
783void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); 787void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
788void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
784 789
785/* uvd v1.0 */ 790/* uvd v1.0 */
786uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev, 791uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2399f25ec037..79159b5da05b 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -396,6 +396,21 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
396 } 396 }
397 } 397 }
398 398
399 if (property == rdev->mode_info.audio_property) {
400 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
401 /* need to find digital encoder on connector */
402 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
403 if (!encoder)
404 return 0;
405
406 radeon_encoder = to_radeon_encoder(encoder);
407
408 if (radeon_connector->audio != val) {
409 radeon_connector->audio = val;
410 radeon_property_change_mode(&radeon_encoder->base);
411 }
412 }
413
399 if (property == rdev->mode_info.underscan_property) { 414 if (property == rdev->mode_info.underscan_property) {
400 /* need to find digital encoder on connector */ 415 /* need to find digital encoder on connector */
401 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); 416 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
@@ -1420,7 +1435,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1420 if (radeon_dp_getdpcd(radeon_connector)) 1435 if (radeon_dp_getdpcd(radeon_connector))
1421 ret = connector_status_connected; 1436 ret = connector_status_connected;
1422 } else { 1437 } else {
1423 /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */ 1438 /* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
1424 if (radeon_ddc_probe(radeon_connector, false)) 1439 if (radeon_ddc_probe(radeon_connector, false))
1425 ret = connector_status_connected; 1440 ret = connector_status_connected;
1426 } 1441 }
@@ -1489,6 +1504,24 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
1489 .force = radeon_dvi_force, 1504 .force = radeon_dvi_force,
1490}; 1505};
1491 1506
1507static const struct drm_connector_funcs radeon_edp_connector_funcs = {
1508 .dpms = drm_helper_connector_dpms,
1509 .detect = radeon_dp_detect,
1510 .fill_modes = drm_helper_probe_single_connector_modes,
1511 .set_property = radeon_lvds_set_property,
1512 .destroy = radeon_dp_connector_destroy,
1513 .force = radeon_dvi_force,
1514};
1515
1516static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
1517 .dpms = drm_helper_connector_dpms,
1518 .detect = radeon_dp_detect,
1519 .fill_modes = drm_helper_probe_single_connector_modes,
1520 .set_property = radeon_lvds_set_property,
1521 .destroy = radeon_dp_connector_destroy,
1522 .force = radeon_dvi_force,
1523};
1524
1492void 1525void
1493radeon_add_atom_connector(struct drm_device *dev, 1526radeon_add_atom_connector(struct drm_device *dev,
1494 uint32_t connector_id, 1527 uint32_t connector_id,
@@ -1580,8 +1613,6 @@ radeon_add_atom_connector(struct drm_device *dev,
1580 goto failed; 1613 goto failed;
1581 radeon_dig_connector->igp_lane_info = igp_lane_info; 1614 radeon_dig_connector->igp_lane_info = igp_lane_info;
1582 radeon_connector->con_priv = radeon_dig_connector; 1615 radeon_connector->con_priv = radeon_dig_connector;
1583 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
1584 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
1585 if (i2c_bus->valid) { 1616 if (i2c_bus->valid) {
1586 /* add DP i2c bus */ 1617 /* add DP i2c bus */
1587 if (connector_type == DRM_MODE_CONNECTOR_eDP) 1618 if (connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -1598,6 +1629,10 @@ radeon_add_atom_connector(struct drm_device *dev,
1598 case DRM_MODE_CONNECTOR_VGA: 1629 case DRM_MODE_CONNECTOR_VGA:
1599 case DRM_MODE_CONNECTOR_DVIA: 1630 case DRM_MODE_CONNECTOR_DVIA:
1600 default: 1631 default:
1632 drm_connector_init(dev, &radeon_connector->base,
1633 &radeon_dp_connector_funcs, connector_type);
1634 drm_connector_helper_add(&radeon_connector->base,
1635 &radeon_dp_connector_helper_funcs);
1601 connector->interlace_allowed = true; 1636 connector->interlace_allowed = true;
1602 connector->doublescan_allowed = true; 1637 connector->doublescan_allowed = true;
1603 radeon_connector->dac_load_detect = true; 1638 radeon_connector->dac_load_detect = true;
@@ -1610,6 +1645,10 @@ radeon_add_atom_connector(struct drm_device *dev,
1610 case DRM_MODE_CONNECTOR_HDMIA: 1645 case DRM_MODE_CONNECTOR_HDMIA:
1611 case DRM_MODE_CONNECTOR_HDMIB: 1646 case DRM_MODE_CONNECTOR_HDMIB:
1612 case DRM_MODE_CONNECTOR_DisplayPort: 1647 case DRM_MODE_CONNECTOR_DisplayPort:
1648 drm_connector_init(dev, &radeon_connector->base,
1649 &radeon_dp_connector_funcs, connector_type);
1650 drm_connector_helper_add(&radeon_connector->base,
1651 &radeon_dp_connector_helper_funcs);
1613 drm_object_attach_property(&radeon_connector->base.base, 1652 drm_object_attach_property(&radeon_connector->base.base,
1614 rdev->mode_info.underscan_property, 1653 rdev->mode_info.underscan_property,
1615 UNDERSCAN_OFF); 1654 UNDERSCAN_OFF);
@@ -1619,6 +1658,9 @@ radeon_add_atom_connector(struct drm_device *dev,
1619 drm_object_attach_property(&radeon_connector->base.base, 1658 drm_object_attach_property(&radeon_connector->base.base,
1620 rdev->mode_info.underscan_vborder_property, 1659 rdev->mode_info.underscan_vborder_property,
1621 0); 1660 0);
1661 drm_object_attach_property(&radeon_connector->base.base,
1662 rdev->mode_info.audio_property,
1663 RADEON_AUDIO_DISABLE);
1622 subpixel_order = SubPixelHorizontalRGB; 1664 subpixel_order = SubPixelHorizontalRGB;
1623 connector->interlace_allowed = true; 1665 connector->interlace_allowed = true;
1624 if (connector_type == DRM_MODE_CONNECTOR_HDMIB) 1666 if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1634,6 +1676,10 @@ radeon_add_atom_connector(struct drm_device *dev,
1634 break; 1676 break;
1635 case DRM_MODE_CONNECTOR_LVDS: 1677 case DRM_MODE_CONNECTOR_LVDS:
1636 case DRM_MODE_CONNECTOR_eDP: 1678 case DRM_MODE_CONNECTOR_eDP:
1679 drm_connector_init(dev, &radeon_connector->base,
1680 &radeon_lvds_bridge_connector_funcs, connector_type);
1681 drm_connector_helper_add(&radeon_connector->base,
1682 &radeon_dp_connector_helper_funcs);
1637 drm_object_attach_property(&radeon_connector->base.base, 1683 drm_object_attach_property(&radeon_connector->base.base,
1638 dev->mode_config.scaling_mode_property, 1684 dev->mode_config.scaling_mode_property,
1639 DRM_MODE_SCALE_FULLSCREEN); 1685 DRM_MODE_SCALE_FULLSCREEN);
@@ -1708,6 +1754,11 @@ radeon_add_atom_connector(struct drm_device *dev,
1708 rdev->mode_info.underscan_vborder_property, 1754 rdev->mode_info.underscan_vborder_property,
1709 0); 1755 0);
1710 } 1756 }
1757 if (ASIC_IS_DCE2(rdev)) {
1758 drm_object_attach_property(&radeon_connector->base.base,
1759 rdev->mode_info.audio_property,
1760 RADEON_AUDIO_DISABLE);
1761 }
1711 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1762 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1712 radeon_connector->dac_load_detect = true; 1763 radeon_connector->dac_load_detect = true;
1713 drm_object_attach_property(&radeon_connector->base.base, 1764 drm_object_attach_property(&radeon_connector->base.base,
@@ -1748,6 +1799,11 @@ radeon_add_atom_connector(struct drm_device *dev,
1748 rdev->mode_info.underscan_vborder_property, 1799 rdev->mode_info.underscan_vborder_property,
1749 0); 1800 0);
1750 } 1801 }
1802 if (ASIC_IS_DCE2(rdev)) {
1803 drm_object_attach_property(&radeon_connector->base.base,
1804 rdev->mode_info.audio_property,
1805 RADEON_AUDIO_DISABLE);
1806 }
1751 subpixel_order = SubPixelHorizontalRGB; 1807 subpixel_order = SubPixelHorizontalRGB;
1752 connector->interlace_allowed = true; 1808 connector->interlace_allowed = true;
1753 if (connector_type == DRM_MODE_CONNECTOR_HDMIB) 1809 if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1787,6 +1843,11 @@ radeon_add_atom_connector(struct drm_device *dev,
1787 rdev->mode_info.underscan_vborder_property, 1843 rdev->mode_info.underscan_vborder_property,
1788 0); 1844 0);
1789 } 1845 }
1846 if (ASIC_IS_DCE2(rdev)) {
1847 drm_object_attach_property(&radeon_connector->base.base,
1848 rdev->mode_info.audio_property,
1849 RADEON_AUDIO_DISABLE);
1850 }
1790 connector->interlace_allowed = true; 1851 connector->interlace_allowed = true;
1791 /* in theory with a DP to VGA converter... */ 1852 /* in theory with a DP to VGA converter... */
1792 connector->doublescan_allowed = false; 1853 connector->doublescan_allowed = false;
@@ -1797,7 +1858,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1797 goto failed; 1858 goto failed;
1798 radeon_dig_connector->igp_lane_info = igp_lane_info; 1859 radeon_dig_connector->igp_lane_info = igp_lane_info;
1799 radeon_connector->con_priv = radeon_dig_connector; 1860 radeon_connector->con_priv = radeon_dig_connector;
1800 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); 1861 drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type);
1801 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); 1862 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
1802 if (i2c_bus->valid) { 1863 if (i2c_bus->valid) {
1803 /* add DP i2c bus */ 1864 /* add DP i2c bus */
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index a56084410372..ac6ece61a476 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -28,6 +28,7 @@
28#include <drm/radeon_drm.h> 28#include <drm/radeon_drm.h>
29#include "radeon_reg.h" 29#include "radeon_reg.h"
30#include "radeon.h" 30#include "radeon.h"
31#include "radeon_trace.h"
31 32
32static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) 33static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
33{ 34{
@@ -80,9 +81,11 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
80 p->relocs[i].lobj.bo = p->relocs[i].robj; 81 p->relocs[i].lobj.bo = p->relocs[i].robj;
81 p->relocs[i].lobj.written = !!r->write_domain; 82 p->relocs[i].lobj.written = !!r->write_domain;
82 83
83 /* the first reloc of an UVD job is the 84 /* the first reloc of an UVD job is the msg and that must be in
84 msg and that must be in VRAM */ 85 VRAM, also but everything into VRAM on AGP cards to avoid
85 if (p->ring == R600_RING_TYPE_UVD_INDEX && i == 0) { 86 image corruptions */
87 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
88 (i == 0 || p->rdev->flags & RADEON_IS_AGP)) {
86 /* TODO: is this still needed for NI+ ? */ 89 /* TODO: is this still needed for NI+ ? */
87 p->relocs[i].lobj.domain = 90 p->relocs[i].lobj.domain =
88 RADEON_GEM_DOMAIN_VRAM; 91 RADEON_GEM_DOMAIN_VRAM;
@@ -559,6 +562,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
559 return r; 562 return r;
560 } 563 }
561 564
565 trace_radeon_cs(&parser);
566
562 r = radeon_cs_ib_chunk(rdev, &parser); 567 r = radeon_cs_ib_chunk(rdev, &parser);
563 if (r) { 568 if (r) {
564 goto out; 569 goto out;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 16cb8792b1e6..e29faa73b574 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1249,6 +1249,17 @@ int radeon_device_init(struct radeon_device *rdev,
1249 /* Registers mapping */ 1249 /* Registers mapping */
1250 /* TODO: block userspace mapping of io register */ 1250 /* TODO: block userspace mapping of io register */
1251 spin_lock_init(&rdev->mmio_idx_lock); 1251 spin_lock_init(&rdev->mmio_idx_lock);
1252 spin_lock_init(&rdev->smc_idx_lock);
1253 spin_lock_init(&rdev->pll_idx_lock);
1254 spin_lock_init(&rdev->mc_idx_lock);
1255 spin_lock_init(&rdev->pcie_idx_lock);
1256 spin_lock_init(&rdev->pciep_idx_lock);
1257 spin_lock_init(&rdev->pif_idx_lock);
1258 spin_lock_init(&rdev->cg_idx_lock);
1259 spin_lock_init(&rdev->uvd_idx_lock);
1260 spin_lock_init(&rdev->rcu_idx_lock);
1261 spin_lock_init(&rdev->didt_idx_lock);
1262 spin_lock_init(&rdev->end_idx_lock);
1252 if (rdev->family >= CHIP_BONAIRE) { 1263 if (rdev->family >= CHIP_BONAIRE) {
1253 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5); 1264 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1254 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5); 1265 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b055bddaa94c..0d1aa050d41d 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1172,6 +1172,12 @@ static struct drm_prop_enum_list radeon_underscan_enum_list[] =
1172 { UNDERSCAN_AUTO, "auto" }, 1172 { UNDERSCAN_AUTO, "auto" },
1173}; 1173};
1174 1174
1175static struct drm_prop_enum_list radeon_audio_enum_list[] =
1176{ { RADEON_AUDIO_DISABLE, "off" },
1177 { RADEON_AUDIO_ENABLE, "on" },
1178 { RADEON_AUDIO_AUTO, "auto" },
1179};
1180
1175static int radeon_modeset_create_props(struct radeon_device *rdev) 1181static int radeon_modeset_create_props(struct radeon_device *rdev)
1176{ 1182{
1177 int sz; 1183 int sz;
@@ -1222,6 +1228,12 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
1222 if (!rdev->mode_info.underscan_vborder_property) 1228 if (!rdev->mode_info.underscan_vborder_property)
1223 return -ENOMEM; 1229 return -ENOMEM;
1224 1230
1231 sz = ARRAY_SIZE(radeon_audio_enum_list);
1232 rdev->mode_info.audio_property =
1233 drm_property_create_enum(rdev->ddev, 0,
1234 "audio",
1235 radeon_audio_enum_list, sz);
1236
1225 return 0; 1237 return 0;
1226} 1238}
1227 1239
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index cb4445f55a96..cdd12dcd988b 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -153,7 +153,7 @@ int radeon_benchmarking = 0;
153int radeon_testing = 0; 153int radeon_testing = 0;
154int radeon_connector_table = 0; 154int radeon_connector_table = 0;
155int radeon_tv = 1; 155int radeon_tv = 1;
156int radeon_audio = 0; 156int radeon_audio = 1;
157int radeon_disp_priority = 0; 157int radeon_disp_priority = 0;
158int radeon_hw_i2c = 0; 158int radeon_hw_i2c = 0;
159int radeon_pcie_gen2 = -1; 159int radeon_pcie_gen2 = -1;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index d908d8d68f6b..ef63d3f00b2f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -247,6 +247,8 @@ struct radeon_mode_info {
247 struct drm_property *underscan_property; 247 struct drm_property *underscan_property;
248 struct drm_property *underscan_hborder_property; 248 struct drm_property *underscan_hborder_property;
249 struct drm_property *underscan_vborder_property; 249 struct drm_property *underscan_vborder_property;
250 /* audio */
251 struct drm_property *audio_property;
250 /* hardcoded DFP edid from BIOS */ 252 /* hardcoded DFP edid from BIOS */
251 struct edid *bios_hardcoded_edid; 253 struct edid *bios_hardcoded_edid;
252 int bios_hardcoded_edid_size; 254 int bios_hardcoded_edid_size;
@@ -471,6 +473,12 @@ struct radeon_router {
471 u8 cd_mux_state; 473 u8 cd_mux_state;
472}; 474};
473 475
476enum radeon_connector_audio {
477 RADEON_AUDIO_DISABLE = 0,
478 RADEON_AUDIO_ENABLE = 1,
479 RADEON_AUDIO_AUTO = 2
480};
481
474struct radeon_connector { 482struct radeon_connector {
475 struct drm_connector base; 483 struct drm_connector base;
476 uint32_t connector_id; 484 uint32_t connector_id;
@@ -489,6 +497,7 @@ struct radeon_connector {
489 struct radeon_hpd hpd; 497 struct radeon_hpd hpd;
490 struct radeon_router router; 498 struct radeon_router router;
491 struct radeon_i2c_chan *router_bus; 499 struct radeon_i2c_chan *router_bus;
500 enum radeon_connector_audio audio;
492}; 501};
493 502
494struct radeon_framebuffer { 503struct radeon_framebuffer {
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index d7555369a3e5..87e1d69e8fdb 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -67,7 +67,16 @@ int radeon_pm_get_type_index(struct radeon_device *rdev,
67 67
68void radeon_pm_acpi_event_handler(struct radeon_device *rdev) 68void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
69{ 69{
70 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 70 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
71 mutex_lock(&rdev->pm.mutex);
72 if (power_supply_is_system_supplied() > 0)
73 rdev->pm.dpm.ac_power = true;
74 else
75 rdev->pm.dpm.ac_power = false;
76 if (rdev->asic->dpm.enable_bapm)
77 radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
78 mutex_unlock(&rdev->pm.mutex);
79 } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
71 if (rdev->pm.profile == PM_PROFILE_AUTO) { 80 if (rdev->pm.profile == PM_PROFILE_AUTO) {
72 mutex_lock(&rdev->pm.mutex); 81 mutex_lock(&rdev->pm.mutex);
73 radeon_pm_update_profile(rdev); 82 radeon_pm_update_profile(rdev);
@@ -333,7 +342,7 @@ static ssize_t radeon_get_pm_profile(struct device *dev,
333 struct device_attribute *attr, 342 struct device_attribute *attr,
334 char *buf) 343 char *buf)
335{ 344{
336 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 345 struct drm_device *ddev = dev_get_drvdata(dev);
337 struct radeon_device *rdev = ddev->dev_private; 346 struct radeon_device *rdev = ddev->dev_private;
338 int cp = rdev->pm.profile; 347 int cp = rdev->pm.profile;
339 348
@@ -349,7 +358,7 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
349 const char *buf, 358 const char *buf,
350 size_t count) 359 size_t count)
351{ 360{
352 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 361 struct drm_device *ddev = dev_get_drvdata(dev);
353 struct radeon_device *rdev = ddev->dev_private; 362 struct radeon_device *rdev = ddev->dev_private;
354 363
355 mutex_lock(&rdev->pm.mutex); 364 mutex_lock(&rdev->pm.mutex);
@@ -383,7 +392,7 @@ static ssize_t radeon_get_pm_method(struct device *dev,
383 struct device_attribute *attr, 392 struct device_attribute *attr,
384 char *buf) 393 char *buf)
385{ 394{
386 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 395 struct drm_device *ddev = dev_get_drvdata(dev);
387 struct radeon_device *rdev = ddev->dev_private; 396 struct radeon_device *rdev = ddev->dev_private;
388 int pm = rdev->pm.pm_method; 397 int pm = rdev->pm.pm_method;
389 398
@@ -397,7 +406,7 @@ static ssize_t radeon_set_pm_method(struct device *dev,
397 const char *buf, 406 const char *buf,
398 size_t count) 407 size_t count)
399{ 408{
400 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 409 struct drm_device *ddev = dev_get_drvdata(dev);
401 struct radeon_device *rdev = ddev->dev_private; 410 struct radeon_device *rdev = ddev->dev_private;
402 411
403 /* we don't support the legacy modes with dpm */ 412 /* we don't support the legacy modes with dpm */
@@ -433,7 +442,7 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
433 struct device_attribute *attr, 442 struct device_attribute *attr,
434 char *buf) 443 char *buf)
435{ 444{
436 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 445 struct drm_device *ddev = dev_get_drvdata(dev);
437 struct radeon_device *rdev = ddev->dev_private; 446 struct radeon_device *rdev = ddev->dev_private;
438 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; 447 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
439 448
@@ -447,7 +456,7 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
447 const char *buf, 456 const char *buf,
448 size_t count) 457 size_t count)
449{ 458{
450 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 459 struct drm_device *ddev = dev_get_drvdata(dev);
451 struct radeon_device *rdev = ddev->dev_private; 460 struct radeon_device *rdev = ddev->dev_private;
452 461
453 mutex_lock(&rdev->pm.mutex); 462 mutex_lock(&rdev->pm.mutex);
@@ -472,7 +481,7 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
472 struct device_attribute *attr, 481 struct device_attribute *attr,
473 char *buf) 482 char *buf)
474{ 483{
475 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 484 struct drm_device *ddev = dev_get_drvdata(dev);
476 struct radeon_device *rdev = ddev->dev_private; 485 struct radeon_device *rdev = ddev->dev_private;
477 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 486 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
478 487
@@ -486,7 +495,7 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
486 const char *buf, 495 const char *buf,
487 size_t count) 496 size_t count)
488{ 497{
489 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 498 struct drm_device *ddev = dev_get_drvdata(dev);
490 struct radeon_device *rdev = ddev->dev_private; 499 struct radeon_device *rdev = ddev->dev_private;
491 enum radeon_dpm_forced_level level; 500 enum radeon_dpm_forced_level level;
492 int ret = 0; 501 int ret = 0;
@@ -524,7 +533,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
524 struct device_attribute *attr, 533 struct device_attribute *attr,
525 char *buf) 534 char *buf)
526{ 535{
527 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 536 struct drm_device *ddev = dev_get_drvdata(dev);
528 struct radeon_device *rdev = ddev->dev_private; 537 struct radeon_device *rdev = ddev->dev_private;
529 int temp; 538 int temp;
530 539
@@ -536,6 +545,23 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
536 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 545 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
537} 546}
538 547
548static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
549 struct device_attribute *attr,
550 char *buf)
551{
552 struct drm_device *ddev = dev_get_drvdata(dev);
553 struct radeon_device *rdev = ddev->dev_private;
554 int hyst = to_sensor_dev_attr(attr)->index;
555 int temp;
556
557 if (hyst)
558 temp = rdev->pm.dpm.thermal.min_temp;
559 else
560 temp = rdev->pm.dpm.thermal.max_temp;
561
562 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
563}
564
539static ssize_t radeon_hwmon_show_name(struct device *dev, 565static ssize_t radeon_hwmon_show_name(struct device *dev,
540 struct device_attribute *attr, 566 struct device_attribute *attr,
541 char *buf) 567 char *buf)
@@ -544,16 +570,37 @@ static ssize_t radeon_hwmon_show_name(struct device *dev,
544} 570}
545 571
546static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); 572static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
573static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
574static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
547static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0); 575static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
548 576
549static struct attribute *hwmon_attributes[] = { 577static struct attribute *hwmon_attributes[] = {
550 &sensor_dev_attr_temp1_input.dev_attr.attr, 578 &sensor_dev_attr_temp1_input.dev_attr.attr,
579 &sensor_dev_attr_temp1_crit.dev_attr.attr,
580 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
551 &sensor_dev_attr_name.dev_attr.attr, 581 &sensor_dev_attr_name.dev_attr.attr,
552 NULL 582 NULL
553}; 583};
554 584
585static umode_t hwmon_attributes_visible(struct kobject *kobj,
586 struct attribute *attr, int index)
587{
588 struct device *dev = container_of(kobj, struct device, kobj);
589 struct drm_device *ddev = dev_get_drvdata(dev);
590 struct radeon_device *rdev = ddev->dev_private;
591
592 /* Skip limit attributes if DPM is not enabled */
593 if (rdev->pm.pm_method != PM_METHOD_DPM &&
594 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
595 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
596 return 0;
597
598 return attr->mode;
599}
600
555static const struct attribute_group hwmon_attrgroup = { 601static const struct attribute_group hwmon_attrgroup = {
556 .attrs = hwmon_attributes, 602 .attrs = hwmon_attributes,
603 .is_visible = hwmon_attributes_visible,
557}; 604};
558 605
559static int radeon_hwmon_init(struct radeon_device *rdev) 606static int radeon_hwmon_init(struct radeon_device *rdev)
@@ -870,10 +917,13 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
870 917
871 radeon_dpm_post_set_power_state(rdev); 918 radeon_dpm_post_set_power_state(rdev);
872 919
873 /* force low perf level for thermal */ 920 if (rdev->asic->dpm.force_performance_level) {
874 if (rdev->pm.dpm.thermal_active && 921 if (rdev->pm.dpm.thermal_active)
875 rdev->asic->dpm.force_performance_level) { 922 /* force low perf level for thermal */
876 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW); 923 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
924 else
925 /* otherwise, enable auto */
926 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
877 } 927 }
878 928
879done: 929done:
@@ -1102,9 +1152,10 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
1102{ 1152{
1103 int ret; 1153 int ret;
1104 1154
1105 /* default to performance state */ 1155 /* default to balanced state */
1106 rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; 1156 rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
1107 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 1157 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
1158 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1108 rdev->pm.default_sclk = rdev->clock.default_sclk; 1159 rdev->pm.default_sclk = rdev->clock.default_sclk;
1109 rdev->pm.default_mclk = rdev->clock.default_mclk; 1160 rdev->pm.default_mclk = rdev->clock.default_mclk;
1110 rdev->pm.current_sclk = rdev->clock.default_sclk; 1161 rdev->pm.current_sclk = rdev->clock.default_sclk;
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index eafd8160a155..f7e367815964 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -27,6 +27,26 @@ TRACE_EVENT(radeon_bo_create,
27 TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) 27 TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
28); 28);
29 29
30TRACE_EVENT(radeon_cs,
31 TP_PROTO(struct radeon_cs_parser *p),
32 TP_ARGS(p),
33 TP_STRUCT__entry(
34 __field(u32, ring)
35 __field(u32, dw)
36 __field(u32, fences)
37 ),
38
39 TP_fast_assign(
40 __entry->ring = p->ring;
41 __entry->dw = p->chunks[p->chunk_ib_idx].length_dw;
42 __entry->fences = radeon_fence_count_emitted(
43 p->rdev, p->ring);
44 ),
45 TP_printk("ring=%u, dw=%u, fences=%u",
46 __entry->ring, __entry->dw,
47 __entry->fences)
48);
49
30DECLARE_EVENT_CLASS(radeon_fence_request, 50DECLARE_EVENT_CLASS(radeon_fence_request,
31 51
32 TP_PROTO(struct drm_device *dev, u32 seqno), 52 TP_PROTO(struct drm_device *dev, u32 seqno),
@@ -53,13 +73,6 @@ DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
53 TP_ARGS(dev, seqno) 73 TP_ARGS(dev, seqno)
54); 74);
55 75
56DEFINE_EVENT(radeon_fence_request, radeon_fence_retire,
57
58 TP_PROTO(struct drm_device *dev, u32 seqno),
59
60 TP_ARGS(dev, seqno)
61);
62
63DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin, 76DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
64 77
65 TP_PROTO(struct drm_device *dev, u32 seqno), 78 TP_PROTO(struct drm_device *dev, u32 seqno),
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index b8074a8ec75a..9566b5940a5a 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -274,19 +274,26 @@ static void rs400_mc_init(struct radeon_device *rdev)
274 274
275uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) 275uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
276{ 276{
277 unsigned long flags;
277 uint32_t r; 278 uint32_t r;
278 279
280 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
279 WREG32(RS480_NB_MC_INDEX, reg & 0xff); 281 WREG32(RS480_NB_MC_INDEX, reg & 0xff);
280 r = RREG32(RS480_NB_MC_DATA); 282 r = RREG32(RS480_NB_MC_DATA);
281 WREG32(RS480_NB_MC_INDEX, 0xff); 283 WREG32(RS480_NB_MC_INDEX, 0xff);
284 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
282 return r; 285 return r;
283} 286}
284 287
285void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 288void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
286{ 289{
290 unsigned long flags;
291
292 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
287 WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN); 293 WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
288 WREG32(RS480_NB_MC_DATA, (v)); 294 WREG32(RS480_NB_MC_DATA, (v));
289 WREG32(RS480_NB_MC_INDEX, 0xff); 295 WREG32(RS480_NB_MC_INDEX, 0xff);
296 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
290} 297}
291 298
292#if defined(CONFIG_DEBUG_FS) 299#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 670b555d2ca2..6acba8017b9a 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -847,16 +847,26 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
847 847
848uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) 848uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
849{ 849{
850 unsigned long flags;
851 u32 r;
852
853 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
850 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 854 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
851 S_000070_MC_IND_CITF_ARB0(1)); 855 S_000070_MC_IND_CITF_ARB0(1));
852 return RREG32(R_000074_MC_IND_DATA); 856 r = RREG32(R_000074_MC_IND_DATA);
857 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
858 return r;
853} 859}
854 860
855void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 861void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
856{ 862{
863 unsigned long flags;
864
865 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
857 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 866 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
858 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); 867 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
859 WREG32(R_000074_MC_IND_DATA, v); 868 WREG32(R_000074_MC_IND_DATA, v);
869 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
860} 870}
861 871
862static void rs600_debugfs(struct radeon_device *rdev) 872static void rs600_debugfs(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index d8ddfb34545d..1447d794c22a 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -631,20 +631,27 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
631 631
632uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) 632uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
633{ 633{
634 unsigned long flags;
634 uint32_t r; 635 uint32_t r;
635 636
637 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
636 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg)); 638 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
637 r = RREG32(R_00007C_MC_DATA); 639 r = RREG32(R_00007C_MC_DATA);
638 WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR); 640 WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
641 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
639 return r; 642 return r;
640} 643}
641 644
642void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 645void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
643{ 646{
647 unsigned long flags;
648
649 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
644 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) | 650 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
645 S_000078_MC_IND_WR_EN(1)); 651 S_000078_MC_IND_WR_EN(1));
646 WREG32(R_00007C_MC_DATA, v); 652 WREG32(R_00007C_MC_DATA, v);
647 WREG32(R_000078_MC_INDEX, 0x7F); 653 WREG32(R_000078_MC_INDEX, 0x7F);
654 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
648} 655}
649 656
650static void rs690_mc_program(struct radeon_device *rdev) 657static void rs690_mc_program(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index d1a1ce73bd45..6af8505cf4d2 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -62,9 +62,7 @@ static void rs780_get_pm_mode_parameters(struct radeon_device *rdev)
62 radeon_crtc = to_radeon_crtc(crtc); 62 radeon_crtc = to_radeon_crtc(crtc);
63 pi->crtc_id = radeon_crtc->crtc_id; 63 pi->crtc_id = radeon_crtc->crtc_id;
64 if (crtc->mode.htotal && crtc->mode.vtotal) 64 if (crtc->mode.htotal && crtc->mode.vtotal)
65 pi->refresh_rate = 65 pi->refresh_rate = drm_mode_vrefresh(&crtc->mode);
66 (crtc->mode.clock * 1000) /
67 (crtc->mode.htotal * crtc->mode.vtotal);
68 break; 66 break;
69 } 67 }
70 } 68 }
@@ -376,9 +374,8 @@ static void rs780_disable_vbios_powersaving(struct radeon_device *rdev)
376 WREG32_P(CG_INTGFX_MISC, 0, ~0xFFF00000); 374 WREG32_P(CG_INTGFX_MISC, 0, ~0xFFF00000);
377} 375}
378 376
379static void rs780_force_voltage_to_high(struct radeon_device *rdev) 377static void rs780_force_voltage(struct radeon_device *rdev, u16 voltage)
380{ 378{
381 struct igp_power_info *pi = rs780_get_pi(rdev);
382 struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps); 379 struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps);
383 380
384 if ((current_state->max_voltage == RS780_VDDC_LEVEL_HIGH) && 381 if ((current_state->max_voltage == RS780_VDDC_LEVEL_HIGH) &&
@@ -390,7 +387,7 @@ static void rs780_force_voltage_to_high(struct radeon_device *rdev)
390 udelay(1); 387 udelay(1);
391 388
392 WREG32_P(FVTHROT_PWM_CTRL_REG0, 389 WREG32_P(FVTHROT_PWM_CTRL_REG0,
393 STARTING_PWM_HIGHTIME(pi->max_voltage), 390 STARTING_PWM_HIGHTIME(voltage),
394 ~STARTING_PWM_HIGHTIME_MASK); 391 ~STARTING_PWM_HIGHTIME_MASK);
395 392
396 WREG32_P(FVTHROT_PWM_CTRL_REG0, 393 WREG32_P(FVTHROT_PWM_CTRL_REG0,
@@ -404,6 +401,26 @@ static void rs780_force_voltage_to_high(struct radeon_device *rdev)
404 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL); 401 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
405} 402}
406 403
404static void rs780_force_fbdiv(struct radeon_device *rdev, u32 fb_div)
405{
406 struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps);
407
408 if (current_state->sclk_low == current_state->sclk_high)
409 return;
410
411 WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL);
412
413 WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(fb_div),
414 ~FORCED_FEEDBACK_DIV_MASK);
415 WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(fb_div),
416 ~STARTING_FEEDBACK_DIV_MASK);
417 WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
418
419 udelay(100);
420
421 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
422}
423
407static int rs780_set_engine_clock_scaling(struct radeon_device *rdev, 424static int rs780_set_engine_clock_scaling(struct radeon_device *rdev,
408 struct radeon_ps *new_ps, 425 struct radeon_ps *new_ps,
409 struct radeon_ps *old_ps) 426 struct radeon_ps *old_ps)
@@ -432,17 +449,13 @@ static int rs780_set_engine_clock_scaling(struct radeon_device *rdev,
432 if (ret) 449 if (ret)
433 return ret; 450 return ret;
434 451
435 WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL); 452 if ((min_dividers.ref_div != max_dividers.ref_div) ||
436 453 (min_dividers.post_div != max_dividers.post_div) ||
437 WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(max_dividers.fb_div), 454 (max_dividers.ref_div != current_max_dividers.ref_div) ||
438 ~FORCED_FEEDBACK_DIV_MASK); 455 (max_dividers.post_div != current_max_dividers.post_div))
439 WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(max_dividers.fb_div), 456 return -EINVAL;
440 ~STARTING_FEEDBACK_DIV_MASK);
441 WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
442
443 udelay(100);
444 457
445 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL); 458 rs780_force_fbdiv(rdev, max_dividers.fb_div);
446 459
447 if (max_dividers.fb_div > min_dividers.fb_div) { 460 if (max_dividers.fb_div > min_dividers.fb_div) {
448 WREG32_P(FVTHROT_FBDIV_REG0, 461 WREG32_P(FVTHROT_FBDIV_REG0,
@@ -486,6 +499,9 @@ static void rs780_activate_engine_clk_scaling(struct radeon_device *rdev,
486 (new_state->sclk_low == old_state->sclk_low)) 499 (new_state->sclk_low == old_state->sclk_low))
487 return; 500 return;
488 501
502 if (new_state->sclk_high == new_state->sclk_low)
503 return;
504
489 rs780_clk_scaling_enable(rdev, true); 505 rs780_clk_scaling_enable(rdev, true);
490} 506}
491 507
@@ -649,7 +665,7 @@ int rs780_dpm_set_power_state(struct radeon_device *rdev)
649 rs780_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); 665 rs780_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
650 666
651 if (pi->voltage_control) { 667 if (pi->voltage_control) {
652 rs780_force_voltage_to_high(rdev); 668 rs780_force_voltage(rdev, pi->max_voltage);
653 mdelay(5); 669 mdelay(5);
654 } 670 }
655 671
@@ -717,14 +733,18 @@ static void rs780_parse_pplib_non_clock_info(struct radeon_device *rdev,
717 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 733 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
718 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 734 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
719 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 735 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
720 } else if (r600_is_uvd_state(rps->class, rps->class2)) {
721 rps->vclk = RS780_DEFAULT_VCLK_FREQ;
722 rps->dclk = RS780_DEFAULT_DCLK_FREQ;
723 } else { 736 } else {
724 rps->vclk = 0; 737 rps->vclk = 0;
725 rps->dclk = 0; 738 rps->dclk = 0;
726 } 739 }
727 740
741 if (r600_is_uvd_state(rps->class, rps->class2)) {
742 if ((rps->vclk == 0) || (rps->dclk == 0)) {
743 rps->vclk = RS780_DEFAULT_VCLK_FREQ;
744 rps->dclk = RS780_DEFAULT_DCLK_FREQ;
745 }
746 }
747
728 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) 748 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
729 rdev->pm.dpm.boot_ps = rps; 749 rdev->pm.dpm.boot_ps = rps;
730 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 750 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
@@ -986,3 +1006,55 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
986 seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n", 1006 seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n",
987 ps->sclk_high, ps->max_voltage); 1007 ps->sclk_high, ps->max_voltage);
988} 1008}
1009
1010int rs780_dpm_force_performance_level(struct radeon_device *rdev,
1011 enum radeon_dpm_forced_level level)
1012{
1013 struct igp_power_info *pi = rs780_get_pi(rdev);
1014 struct radeon_ps *rps = rdev->pm.dpm.current_ps;
1015 struct igp_ps *ps = rs780_get_ps(rps);
1016 struct atom_clock_dividers dividers;
1017 int ret;
1018
1019 rs780_clk_scaling_enable(rdev, false);
1020 rs780_voltage_scaling_enable(rdev, false);
1021
1022 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
1023 if (pi->voltage_control)
1024 rs780_force_voltage(rdev, pi->max_voltage);
1025
1026 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
1027 ps->sclk_high, false, &dividers);
1028 if (ret)
1029 return ret;
1030
1031 rs780_force_fbdiv(rdev, dividers.fb_div);
1032 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
1033 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
1034 ps->sclk_low, false, &dividers);
1035 if (ret)
1036 return ret;
1037
1038 rs780_force_fbdiv(rdev, dividers.fb_div);
1039
1040 if (pi->voltage_control)
1041 rs780_force_voltage(rdev, pi->min_voltage);
1042 } else {
1043 if (pi->voltage_control)
1044 rs780_force_voltage(rdev, pi->max_voltage);
1045
1046 if (ps->sclk_high != ps->sclk_low) {
1047 WREG32_P(FVTHROT_FBDIV_REG1, 0, ~FORCE_FEEDBACK_DIV);
1048 rs780_clk_scaling_enable(rdev, true);
1049 }
1050
1051 if (pi->voltage_control) {
1052 rs780_voltage_scaling_enable(rdev, true);
1053 rs780_enable_voltage_scaling(rdev, rps);
1054 }
1055 }
1056
1057 rdev->pm.dpm.forced_level = level;
1058
1059 return 0;
1060}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 8ea1573ae820..873eb4b193b4 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -209,19 +209,27 @@ static void rv515_mc_init(struct radeon_device *rdev)
209 209
210uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) 210uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
211{ 211{
212 unsigned long flags;
212 uint32_t r; 213 uint32_t r;
213 214
215 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
214 WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); 216 WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
215 r = RREG32(MC_IND_DATA); 217 r = RREG32(MC_IND_DATA);
216 WREG32(MC_IND_INDEX, 0); 218 WREG32(MC_IND_INDEX, 0);
219 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
220
217 return r; 221 return r;
218} 222}
219 223
220void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 224void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
221{ 225{
226 unsigned long flags;
227
228 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
222 WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); 229 WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
223 WREG32(MC_IND_DATA, (v)); 230 WREG32(MC_IND_DATA, (v));
224 WREG32(MC_IND_INDEX, 0); 231 WREG32(MC_IND_INDEX, 0);
232 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
225} 233}
226 234
227#if defined(CONFIG_DEBUG_FS) 235#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index ab1f2016f21e..5811d277a36a 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -1758,8 +1758,6 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
1758 1758
1759 rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); 1759 rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
1760 1760
1761 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1762
1763 return 0; 1761 return 0;
1764} 1762}
1765 1763
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 8cbb85dae5aa..913b025ae9b3 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2064,12 +2064,6 @@ int rv770_dpm_set_power_state(struct radeon_device *rdev)
2064 rv770_program_dcodt_after_state_switch(rdev, new_ps, old_ps); 2064 rv770_program_dcodt_after_state_switch(rdev, new_ps, old_ps);
2065 rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); 2065 rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
2066 2066
2067 ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
2068 if (ret) {
2069 DRM_ERROR("rv770_dpm_force_performance_level failed\n");
2070 return ret;
2071 }
2072
2073 return 0; 2067 return 0;
2074} 2068}
2075 2069
@@ -2147,14 +2141,18 @@ static void rv7xx_parse_pplib_non_clock_info(struct radeon_device *rdev,
2147 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2141 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
2148 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2142 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
2149 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2143 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
2150 } else if (r600_is_uvd_state(rps->class, rps->class2)) {
2151 rps->vclk = RV770_DEFAULT_VCLK_FREQ;
2152 rps->dclk = RV770_DEFAULT_DCLK_FREQ;
2153 } else { 2144 } else {
2154 rps->vclk = 0; 2145 rps->vclk = 0;
2155 rps->dclk = 0; 2146 rps->dclk = 0;
2156 } 2147 }
2157 2148
2149 if (r600_is_uvd_state(rps->class, rps->class2)) {
2150 if ((rps->vclk == 0) || (rps->dclk == 0)) {
2151 rps->vclk = RV770_DEFAULT_VCLK_FREQ;
2152 rps->dclk = RV770_DEFAULT_DCLK_FREQ;
2153 }
2154 }
2155
2158 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) 2156 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
2159 rdev->pm.dpm.boot_ps = rps; 2157 rdev->pm.dpm.boot_ps = rps;
2160 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 2158 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
diff --git a/drivers/gpu/drm/radeon/rv770_smc.c b/drivers/gpu/drm/radeon/rv770_smc.c
index ab95da570215..b2a224407365 100644
--- a/drivers/gpu/drm/radeon/rv770_smc.c
+++ b/drivers/gpu/drm/radeon/rv770_smc.c
@@ -274,8 +274,8 @@ static const u8 cayman_smc_int_vectors[] =
274 0x08, 0x72, 0x08, 0x72 274 0x08, 0x72, 0x08, 0x72
275}; 275};
276 276
277int rv770_set_smc_sram_address(struct radeon_device *rdev, 277static int rv770_set_smc_sram_address(struct radeon_device *rdev,
278 u16 smc_address, u16 limit) 278 u16 smc_address, u16 limit)
279{ 279{
280 u32 addr; 280 u32 addr;
281 281
@@ -296,9 +296,10 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
296 u16 smc_start_address, const u8 *src, 296 u16 smc_start_address, const u8 *src,
297 u16 byte_count, u16 limit) 297 u16 byte_count, u16 limit)
298{ 298{
299 unsigned long flags;
299 u32 data, original_data, extra_shift; 300 u32 data, original_data, extra_shift;
300 u16 addr; 301 u16 addr;
301 int ret; 302 int ret = 0;
302 303
303 if (smc_start_address & 3) 304 if (smc_start_address & 3)
304 return -EINVAL; 305 return -EINVAL;
@@ -307,13 +308,14 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
307 308
308 addr = smc_start_address; 309 addr = smc_start_address;
309 310
311 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
310 while (byte_count >= 4) { 312 while (byte_count >= 4) {
311 /* SMC address space is BE */ 313 /* SMC address space is BE */
312 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; 314 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
313 315
314 ret = rv770_set_smc_sram_address(rdev, addr, limit); 316 ret = rv770_set_smc_sram_address(rdev, addr, limit);
315 if (ret) 317 if (ret)
316 return ret; 318 goto done;
317 319
318 WREG32(SMC_SRAM_DATA, data); 320 WREG32(SMC_SRAM_DATA, data);
319 321
@@ -328,7 +330,7 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
328 330
329 ret = rv770_set_smc_sram_address(rdev, addr, limit); 331 ret = rv770_set_smc_sram_address(rdev, addr, limit);
330 if (ret) 332 if (ret)
331 return ret; 333 goto done;
332 334
333 original_data = RREG32(SMC_SRAM_DATA); 335 original_data = RREG32(SMC_SRAM_DATA);
334 336
@@ -346,12 +348,15 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
346 348
347 ret = rv770_set_smc_sram_address(rdev, addr, limit); 349 ret = rv770_set_smc_sram_address(rdev, addr, limit);
348 if (ret) 350 if (ret)
349 return ret; 351 goto done;
350 352
351 WREG32(SMC_SRAM_DATA, data); 353 WREG32(SMC_SRAM_DATA, data);
352 } 354 }
353 355
354 return 0; 356done:
357 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
358
359 return ret;
355} 360}
356 361
357static int rv770_program_interrupt_vectors(struct radeon_device *rdev, 362static int rv770_program_interrupt_vectors(struct radeon_device *rdev,
@@ -461,12 +466,15 @@ PPSMC_Result rv770_wait_for_smc_inactive(struct radeon_device *rdev)
461 466
462static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit) 467static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit)
463{ 468{
469 unsigned long flags;
464 u16 i; 470 u16 i;
465 471
472 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
466 for (i = 0; i < limit; i += 4) { 473 for (i = 0; i < limit; i += 4) {
467 rv770_set_smc_sram_address(rdev, i, limit); 474 rv770_set_smc_sram_address(rdev, i, limit);
468 WREG32(SMC_SRAM_DATA, 0); 475 WREG32(SMC_SRAM_DATA, 0);
469 } 476 }
477 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
470} 478}
471 479
472int rv770_load_smc_ucode(struct radeon_device *rdev, 480int rv770_load_smc_ucode(struct radeon_device *rdev,
@@ -595,27 +603,29 @@ int rv770_load_smc_ucode(struct radeon_device *rdev,
595int rv770_read_smc_sram_dword(struct radeon_device *rdev, 603int rv770_read_smc_sram_dword(struct radeon_device *rdev,
596 u16 smc_address, u32 *value, u16 limit) 604 u16 smc_address, u32 *value, u16 limit)
597{ 605{
606 unsigned long flags;
598 int ret; 607 int ret;
599 608
609 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
600 ret = rv770_set_smc_sram_address(rdev, smc_address, limit); 610 ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
601 if (ret) 611 if (ret == 0)
602 return ret; 612 *value = RREG32(SMC_SRAM_DATA);
603 613 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
604 *value = RREG32(SMC_SRAM_DATA);
605 614
606 return 0; 615 return ret;
607} 616}
608 617
609int rv770_write_smc_sram_dword(struct radeon_device *rdev, 618int rv770_write_smc_sram_dword(struct radeon_device *rdev,
610 u16 smc_address, u32 value, u16 limit) 619 u16 smc_address, u32 value, u16 limit)
611{ 620{
621 unsigned long flags;
612 int ret; 622 int ret;
613 623
624 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
614 ret = rv770_set_smc_sram_address(rdev, smc_address, limit); 625 ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
615 if (ret) 626 if (ret == 0)
616 return ret; 627 WREG32(SMC_SRAM_DATA, value);
628 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
617 629
618 WREG32(SMC_SRAM_DATA, value); 630 return ret;
619
620 return 0;
621} 631}
diff --git a/drivers/gpu/drm/radeon/rv770_smc.h b/drivers/gpu/drm/radeon/rv770_smc.h
index f78d92a4b325..3b2c963c4880 100644
--- a/drivers/gpu/drm/radeon/rv770_smc.h
+++ b/drivers/gpu/drm/radeon/rv770_smc.h
@@ -187,8 +187,6 @@ typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
187#define RV770_SMC_SOFT_REGISTER_uvd_enabled 0x9C 187#define RV770_SMC_SOFT_REGISTER_uvd_enabled 0x9C
188#define RV770_SMC_SOFT_REGISTER_is_asic_lombok 0xA0 188#define RV770_SMC_SOFT_REGISTER_is_asic_lombok 0xA0
189 189
190int rv770_set_smc_sram_address(struct radeon_device *rdev,
191 u16 smc_address, u16 limit);
192int rv770_copy_bytes_to_smc(struct radeon_device *rdev, 190int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
193 u16 smc_start_address, const u8 *src, 191 u16 smc_start_address, const u8 *src,
194 u16 byte_count, u16 limit); 192 u16 byte_count, u16 limit);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 9fe60e542922..1ae277152cc7 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -852,7 +852,7 @@
852#define AFMT_VBI_PACKET_CONTROL 0x7608 852#define AFMT_VBI_PACKET_CONTROL 0x7608
853# define AFMT_GENERIC0_UPDATE (1 << 2) 853# define AFMT_GENERIC0_UPDATE (1 << 2)
854#define AFMT_INFOFRAME_CONTROL0 0x760c 854#define AFMT_INFOFRAME_CONTROL0 0x760c
855# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */ 855# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */
856# define AFMT_AUDIO_INFO_UPDATE (1 << 7) 856# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
857# define AFMT_MPEG_INFO_UPDATE (1 << 10) 857# define AFMT_MPEG_INFO_UPDATE (1 << 10)
858#define AFMT_GENERIC0_7 0x7610 858#define AFMT_GENERIC0_7 0x7610
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 3e23b757dcfa..c354c1094967 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -83,6 +83,8 @@ extern void si_dma_vm_set_page(struct radeon_device *rdev,
83 uint64_t pe, 83 uint64_t pe,
84 uint64_t addr, unsigned count, 84 uint64_t addr, unsigned count,
85 uint32_t incr, uint32_t flags); 85 uint32_t incr, uint32_t flags);
86static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
87 bool enable);
86 88
87static const u32 verde_rlc_save_restore_register_list[] = 89static const u32 verde_rlc_save_restore_register_list[] =
88{ 90{
@@ -3386,6 +3388,8 @@ static int si_cp_resume(struct radeon_device *rdev)
3386 u32 rb_bufsz; 3388 u32 rb_bufsz;
3387 int r; 3389 int r;
3388 3390
3391 si_enable_gui_idle_interrupt(rdev, false);
3392
3389 WREG32(CP_SEM_WAIT_TIMER, 0x0); 3393 WREG32(CP_SEM_WAIT_TIMER, 0x0);
3390 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); 3394 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
3391 3395
@@ -3501,6 +3505,8 @@ static int si_cp_resume(struct radeon_device *rdev)
3501 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 3505 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3502 } 3506 }
3503 3507
3508 si_enable_gui_idle_interrupt(rdev, true);
3509
3504 return 0; 3510 return 0;
3505} 3511}
3506 3512
@@ -4888,7 +4894,7 @@ static void si_enable_gfx_cgpg(struct radeon_device *rdev,
4888{ 4894{
4889 u32 tmp; 4895 u32 tmp;
4890 4896
4891 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) { 4897 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
4892 tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10); 4898 tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
4893 WREG32(RLC_TTOP_D, tmp); 4899 WREG32(RLC_TTOP_D, tmp);
4894 4900
@@ -5250,6 +5256,7 @@ void si_update_cg(struct radeon_device *rdev,
5250 u32 block, bool enable) 5256 u32 block, bool enable)
5251{ 5257{
5252 if (block & RADEON_CG_BLOCK_GFX) { 5258 if (block & RADEON_CG_BLOCK_GFX) {
5259 si_enable_gui_idle_interrupt(rdev, false);
5253 /* order matters! */ 5260 /* order matters! */
5254 if (enable) { 5261 if (enable) {
5255 si_enable_mgcg(rdev, true); 5262 si_enable_mgcg(rdev, true);
@@ -5258,6 +5265,7 @@ void si_update_cg(struct radeon_device *rdev,
5258 si_enable_cgcg(rdev, false); 5265 si_enable_cgcg(rdev, false);
5259 si_enable_mgcg(rdev, false); 5266 si_enable_mgcg(rdev, false);
5260 } 5267 }
5268 si_enable_gui_idle_interrupt(rdev, true);
5261 } 5269 }
5262 5270
5263 if (block & RADEON_CG_BLOCK_MC) { 5271 if (block & RADEON_CG_BLOCK_MC) {
@@ -5408,7 +5416,7 @@ static void si_init_pg(struct radeon_device *rdev)
5408 si_init_dma_pg(rdev); 5416 si_init_dma_pg(rdev);
5409 } 5417 }
5410 si_init_ao_cu_mask(rdev); 5418 si_init_ao_cu_mask(rdev);
5411 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { 5419 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
5412 si_init_gfx_cgpg(rdev); 5420 si_init_gfx_cgpg(rdev);
5413 } 5421 }
5414 si_enable_dma_pg(rdev, true); 5422 si_enable_dma_pg(rdev, true);
@@ -5560,7 +5568,9 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
5560{ 5568{
5561 u32 tmp; 5569 u32 tmp;
5562 5570
5563 WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 5571 tmp = RREG32(CP_INT_CNTL_RING0) &
5572 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5573 WREG32(CP_INT_CNTL_RING0, tmp);
5564 WREG32(CP_INT_CNTL_RING1, 0); 5574 WREG32(CP_INT_CNTL_RING1, 0);
5565 WREG32(CP_INT_CNTL_RING2, 0); 5575 WREG32(CP_INT_CNTL_RING2, 0);
5566 tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; 5576 tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
@@ -5685,7 +5695,7 @@ static int si_irq_init(struct radeon_device *rdev)
5685 5695
5686int si_irq_set(struct radeon_device *rdev) 5696int si_irq_set(struct radeon_device *rdev)
5687{ 5697{
5688 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; 5698 u32 cp_int_cntl;
5689 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; 5699 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
5690 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 5700 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
5691 u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; 5701 u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
@@ -5706,6 +5716,9 @@ int si_irq_set(struct radeon_device *rdev)
5706 return 0; 5716 return 0;
5707 } 5717 }
5708 5718
5719 cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
5720 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5721
5709 if (!ASIC_IS_NODCE(rdev)) { 5722 if (!ASIC_IS_NODCE(rdev)) {
5710 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 5723 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
5711 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; 5724 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 5be9b4e72350..cfe5d4d28915 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -6075,12 +6075,6 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
6075 return ret; 6075 return ret;
6076 } 6076 }
6077 6077
6078 ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
6079 if (ret) {
6080 DRM_ERROR("si_dpm_force_performance_level failed\n");
6081 return ret;
6082 }
6083
6084 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | 6078 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
6085 RADEON_CG_BLOCK_MC | 6079 RADEON_CG_BLOCK_MC |
6086 RADEON_CG_BLOCK_SDMA | 6080 RADEON_CG_BLOCK_SDMA |
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
index 5f524c0a541e..d422a1cbf727 100644
--- a/drivers/gpu/drm/radeon/si_smc.c
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -29,8 +29,8 @@
29#include "ppsmc.h" 29#include "ppsmc.h"
30#include "radeon_ucode.h" 30#include "radeon_ucode.h"
31 31
32int si_set_smc_sram_address(struct radeon_device *rdev, 32static int si_set_smc_sram_address(struct radeon_device *rdev,
33 u32 smc_address, u32 limit) 33 u32 smc_address, u32 limit)
34{ 34{
35 if (smc_address & 3) 35 if (smc_address & 3)
36 return -EINVAL; 36 return -EINVAL;
@@ -47,7 +47,8 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
47 u32 smc_start_address, 47 u32 smc_start_address,
48 const u8 *src, u32 byte_count, u32 limit) 48 const u8 *src, u32 byte_count, u32 limit)
49{ 49{
50 int ret; 50 unsigned long flags;
51 int ret = 0;
51 u32 data, original_data, addr, extra_shift; 52 u32 data, original_data, addr, extra_shift;
52 53
53 if (smc_start_address & 3) 54 if (smc_start_address & 3)
@@ -57,13 +58,14 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
57 58
58 addr = smc_start_address; 59 addr = smc_start_address;
59 60
61 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
60 while (byte_count >= 4) { 62 while (byte_count >= 4) {
61 /* SMC address space is BE */ 63 /* SMC address space is BE */
62 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; 64 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
63 65
64 ret = si_set_smc_sram_address(rdev, addr, limit); 66 ret = si_set_smc_sram_address(rdev, addr, limit);
65 if (ret) 67 if (ret)
66 return ret; 68 goto done;
67 69
68 WREG32(SMC_IND_DATA_0, data); 70 WREG32(SMC_IND_DATA_0, data);
69 71
@@ -78,7 +80,7 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
78 80
79 ret = si_set_smc_sram_address(rdev, addr, limit); 81 ret = si_set_smc_sram_address(rdev, addr, limit);
80 if (ret) 82 if (ret)
81 return ret; 83 goto done;
82 84
83 original_data = RREG32(SMC_IND_DATA_0); 85 original_data = RREG32(SMC_IND_DATA_0);
84 86
@@ -96,11 +98,15 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
96 98
97 ret = si_set_smc_sram_address(rdev, addr, limit); 99 ret = si_set_smc_sram_address(rdev, addr, limit);
98 if (ret) 100 if (ret)
99 return ret; 101 goto done;
100 102
101 WREG32(SMC_IND_DATA_0, data); 103 WREG32(SMC_IND_DATA_0, data);
102 } 104 }
103 return 0; 105
106done:
107 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
108
109 return ret;
104} 110}
105 111
106void si_start_smc(struct radeon_device *rdev) 112void si_start_smc(struct radeon_device *rdev)
@@ -203,6 +209,7 @@ PPSMC_Result si_wait_for_smc_inactive(struct radeon_device *rdev)
203 209
204int si_load_smc_ucode(struct radeon_device *rdev, u32 limit) 210int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
205{ 211{
212 unsigned long flags;
206 u32 ucode_start_address; 213 u32 ucode_start_address;
207 u32 ucode_size; 214 u32 ucode_size;
208 const u8 *src; 215 const u8 *src;
@@ -241,6 +248,7 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
241 return -EINVAL; 248 return -EINVAL;
242 249
243 src = (const u8 *)rdev->smc_fw->data; 250 src = (const u8 *)rdev->smc_fw->data;
251 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
244 WREG32(SMC_IND_INDEX_0, ucode_start_address); 252 WREG32(SMC_IND_INDEX_0, ucode_start_address);
245 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); 253 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
246 while (ucode_size >= 4) { 254 while (ucode_size >= 4) {
@@ -253,6 +261,7 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
253 ucode_size -= 4; 261 ucode_size -= 4;
254 } 262 }
255 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); 263 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
264 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
256 265
257 return 0; 266 return 0;
258} 267}
@@ -260,25 +269,29 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
260int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, 269int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
261 u32 *value, u32 limit) 270 u32 *value, u32 limit)
262{ 271{
272 unsigned long flags;
263 int ret; 273 int ret;
264 274
275 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
265 ret = si_set_smc_sram_address(rdev, smc_address, limit); 276 ret = si_set_smc_sram_address(rdev, smc_address, limit);
266 if (ret) 277 if (ret == 0)
267 return ret; 278 *value = RREG32(SMC_IND_DATA_0);
279 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
268 280
269 *value = RREG32(SMC_IND_DATA_0); 281 return ret;
270 return 0;
271} 282}
272 283
273int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, 284int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
274 u32 value, u32 limit) 285 u32 value, u32 limit)
275{ 286{
287 unsigned long flags;
276 int ret; 288 int ret;
277 289
290 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
278 ret = si_set_smc_sram_address(rdev, smc_address, limit); 291 ret = si_set_smc_sram_address(rdev, smc_address, limit);
279 if (ret) 292 if (ret == 0)
280 return ret; 293 WREG32(SMC_IND_DATA_0, value);
294 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
281 295
282 WREG32(SMC_IND_DATA_0, value); 296 return ret;
283 return 0;
284} 297}
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 864761c0120e..96ea6db8bf57 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1319,8 +1319,6 @@ int sumo_dpm_set_power_state(struct radeon_device *rdev)
1319 if (pi->enable_dpm) 1319 if (pi->enable_dpm)
1320 sumo_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); 1320 sumo_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
1321 1321
1322 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1323
1324 return 0; 1322 return 0;
1325} 1323}
1326 1324
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index b07b7b8f1aff..7f998bf1cc9d 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1068,6 +1068,17 @@ static void trinity_update_requested_ps(struct radeon_device *rdev,
1068 pi->requested_rps.ps_priv = &pi->requested_ps; 1068 pi->requested_rps.ps_priv = &pi->requested_ps;
1069} 1069}
1070 1070
1071void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
1072{
1073 struct trinity_power_info *pi = trinity_get_pi(rdev);
1074
1075 if (pi->enable_bapm) {
1076 trinity_acquire_mutex(rdev);
1077 trinity_dpm_bapm_enable(rdev, enable);
1078 trinity_release_mutex(rdev);
1079 }
1080}
1081
1071int trinity_dpm_enable(struct radeon_device *rdev) 1082int trinity_dpm_enable(struct radeon_device *rdev)
1072{ 1083{
1073 struct trinity_power_info *pi = trinity_get_pi(rdev); 1084 struct trinity_power_info *pi = trinity_get_pi(rdev);
@@ -1091,6 +1102,7 @@ int trinity_dpm_enable(struct radeon_device *rdev)
1091 trinity_program_sclk_dpm(rdev); 1102 trinity_program_sclk_dpm(rdev);
1092 trinity_start_dpm(rdev); 1103 trinity_start_dpm(rdev);
1093 trinity_wait_for_dpm_enabled(rdev); 1104 trinity_wait_for_dpm_enabled(rdev);
1105 trinity_dpm_bapm_enable(rdev, false);
1094 trinity_release_mutex(rdev); 1106 trinity_release_mutex(rdev);
1095 1107
1096 if (rdev->irq.installed && 1108 if (rdev->irq.installed &&
@@ -1116,6 +1128,7 @@ void trinity_dpm_disable(struct radeon_device *rdev)
1116 trinity_release_mutex(rdev); 1128 trinity_release_mutex(rdev);
1117 return; 1129 return;
1118 } 1130 }
1131 trinity_dpm_bapm_enable(rdev, false);
1119 trinity_disable_clock_power_gating(rdev); 1132 trinity_disable_clock_power_gating(rdev);
1120 sumo_clear_vc(rdev); 1133 sumo_clear_vc(rdev);
1121 trinity_wait_for_level_0(rdev); 1134 trinity_wait_for_level_0(rdev);
@@ -1212,6 +1225,8 @@ int trinity_dpm_set_power_state(struct radeon_device *rdev)
1212 1225
1213 trinity_acquire_mutex(rdev); 1226 trinity_acquire_mutex(rdev);
1214 if (pi->enable_dpm) { 1227 if (pi->enable_dpm) {
1228 if (pi->enable_bapm)
1229 trinity_dpm_bapm_enable(rdev, rdev->pm.dpm.ac_power);
1215 trinity_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); 1230 trinity_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
1216 trinity_enable_power_level_0(rdev); 1231 trinity_enable_power_level_0(rdev);
1217 trinity_force_level_0(rdev); 1232 trinity_force_level_0(rdev);
@@ -1221,7 +1236,6 @@ int trinity_dpm_set_power_state(struct radeon_device *rdev)
1221 trinity_force_level_0(rdev); 1236 trinity_force_level_0(rdev);
1222 trinity_unforce_levels(rdev); 1237 trinity_unforce_levels(rdev);
1223 trinity_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); 1238 trinity_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
1224 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1225 } 1239 }
1226 trinity_release_mutex(rdev); 1240 trinity_release_mutex(rdev);
1227 1241
@@ -1854,6 +1868,7 @@ int trinity_dpm_init(struct radeon_device *rdev)
1854 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 1868 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
1855 pi->at[i] = TRINITY_AT_DFLT; 1869 pi->at[i] = TRINITY_AT_DFLT;
1856 1870
1871 pi->enable_bapm = true;
1857 pi->enable_nbps_policy = true; 1872 pi->enable_nbps_policy = true;
1858 pi->enable_sclk_ds = true; 1873 pi->enable_sclk_ds = true;
1859 pi->enable_gfx_power_gating = true; 1874 pi->enable_gfx_power_gating = true;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.h b/drivers/gpu/drm/radeon/trinity_dpm.h
index e82df071f8b3..c261657750ca 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.h
+++ b/drivers/gpu/drm/radeon/trinity_dpm.h
@@ -108,6 +108,7 @@ struct trinity_power_info {
108 bool enable_auto_thermal_throttling; 108 bool enable_auto_thermal_throttling;
109 bool enable_dpm; 109 bool enable_dpm;
110 bool enable_sclk_ds; 110 bool enable_sclk_ds;
111 bool enable_bapm;
111 bool uvd_dpm; 112 bool uvd_dpm;
112 struct radeon_ps current_rps; 113 struct radeon_ps current_rps;
113 struct trinity_ps current_ps; 114 struct trinity_ps current_ps;
@@ -118,6 +119,7 @@ struct trinity_power_info {
118#define TRINITY_AT_DFLT 30 119#define TRINITY_AT_DFLT 30
119 120
120/* trinity_smc.c */ 121/* trinity_smc.c */
122int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable);
121int trinity_dpm_config(struct radeon_device *rdev, bool enable); 123int trinity_dpm_config(struct radeon_device *rdev, bool enable);
122int trinity_uvd_dpm_config(struct radeon_device *rdev); 124int trinity_uvd_dpm_config(struct radeon_device *rdev);
123int trinity_dpm_force_state(struct radeon_device *rdev, u32 n); 125int trinity_dpm_force_state(struct radeon_device *rdev, u32 n);
diff --git a/drivers/gpu/drm/radeon/trinity_smc.c b/drivers/gpu/drm/radeon/trinity_smc.c
index a42d89f1830c..9672bcbc7312 100644
--- a/drivers/gpu/drm/radeon/trinity_smc.c
+++ b/drivers/gpu/drm/radeon/trinity_smc.c
@@ -56,6 +56,14 @@ static int trinity_notify_message_to_smu(struct radeon_device *rdev, u32 id)
56 return 0; 56 return 0;
57} 57}
58 58
59int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable)
60{
61 if (enable)
62 return trinity_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM);
63 else
64 return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM);
65}
66
59int trinity_dpm_config(struct radeon_device *rdev, bool enable) 67int trinity_dpm_config(struct radeon_device *rdev, bool enable)
60{ 68{
61 if (enable) 69 if (enable)
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8dbe9d0ae9a7..8bf646183bac 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -97,7 +97,6 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
97 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); 97 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
98 switch (ret) { 98 switch (ret) {
99 case -EAGAIN: 99 case -EAGAIN:
100 set_need_resched();
101 case 0: 100 case 0:
102 case -ERESTARTSYS: 101 case -ERESTARTSYS:
103 return VM_FAULT_NOPAGE; 102 return VM_FAULT_NOPAGE;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index e893f6e1937d..af0259708358 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -257,9 +257,9 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
257 if (!conflict->bridge_has_one_vga) { 257 if (!conflict->bridge_has_one_vga) {
258 vga_irq_set_state(conflict, false); 258 vga_irq_set_state(conflict, false);
259 flags |= PCI_VGA_STATE_CHANGE_DECODES; 259 flags |= PCI_VGA_STATE_CHANGE_DECODES;
260 if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM)) 260 if (match & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
261 pci_bits |= PCI_COMMAND_MEMORY; 261 pci_bits |= PCI_COMMAND_MEMORY;
262 if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) 262 if (match & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
263 pci_bits |= PCI_COMMAND_IO; 263 pci_bits |= PCI_COMMAND_IO;
264 } 264 }
265 265
@@ -267,11 +267,11 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
267 flags |= PCI_VGA_STATE_CHANGE_BRIDGE; 267 flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
268 268
269 pci_set_vga_state(conflict->pdev, false, pci_bits, flags); 269 pci_set_vga_state(conflict->pdev, false, pci_bits, flags);
270 conflict->owns &= ~lwants; 270 conflict->owns &= ~match;
271 /* If he also owned non-legacy, that is no longer the case */ 271 /* If he also owned non-legacy, that is no longer the case */
272 if (lwants & VGA_RSRC_LEGACY_MEM) 272 if (match & VGA_RSRC_LEGACY_MEM)
273 conflict->owns &= ~VGA_RSRC_NORMAL_MEM; 273 conflict->owns &= ~VGA_RSRC_NORMAL_MEM;
274 if (lwants & VGA_RSRC_LEGACY_IO) 274 if (match & VGA_RSRC_LEGACY_IO)
275 conflict->owns &= ~VGA_RSRC_NORMAL_IO; 275 conflict->owns &= ~VGA_RSRC_NORMAL_IO;
276 } 276 }
277 277
@@ -644,10 +644,12 @@ bail:
644static inline void vga_update_device_decodes(struct vga_device *vgadev, 644static inline void vga_update_device_decodes(struct vga_device *vgadev,
645 int new_decodes) 645 int new_decodes)
646{ 646{
647 int old_decodes; 647 int old_decodes, decodes_removed, decodes_unlocked;
648 struct vga_device *new_vgadev, *conflict;
649 648
650 old_decodes = vgadev->decodes; 649 old_decodes = vgadev->decodes;
650 decodes_removed = ~new_decodes & old_decodes;
651 decodes_unlocked = vgadev->locks & decodes_removed;
652 vgadev->owns &= ~decodes_removed;
651 vgadev->decodes = new_decodes; 653 vgadev->decodes = new_decodes;
652 654
653 pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n", 655 pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
@@ -656,31 +658,22 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
656 vga_iostate_to_str(vgadev->decodes), 658 vga_iostate_to_str(vgadev->decodes),
657 vga_iostate_to_str(vgadev->owns)); 659 vga_iostate_to_str(vgadev->owns));
658 660
659 661 /* if we removed locked decodes, lock count goes to zero, and release */
660 /* if we own the decodes we should move them along to 662 if (decodes_unlocked) {
661 another card */ 663 if (decodes_unlocked & VGA_RSRC_LEGACY_IO)
662 if ((vgadev->owns & old_decodes) && (vga_count > 1)) { 664 vgadev->io_lock_cnt = 0;
663 /* set us to own nothing */ 665 if (decodes_unlocked & VGA_RSRC_LEGACY_MEM)
664 vgadev->owns &= ~old_decodes; 666 vgadev->mem_lock_cnt = 0;
665 list_for_each_entry(new_vgadev, &vga_list, list) { 667 __vga_put(vgadev, decodes_unlocked);
666 if ((new_vgadev != vgadev) &&
667 (new_vgadev->decodes & VGA_RSRC_LEGACY_MASK)) {
668 pr_info("vgaarb: transferring owner from PCI:%s to PCI:%s\n", pci_name(vgadev->pdev), pci_name(new_vgadev->pdev));
669 conflict = __vga_tryget(new_vgadev, VGA_RSRC_LEGACY_MASK);
670 if (!conflict)
671 __vga_put(new_vgadev, VGA_RSRC_LEGACY_MASK);
672 break;
673 }
674 }
675 } 668 }
676 669
677 /* change decodes counter */ 670 /* change decodes counter */
678 if (old_decodes != new_decodes) { 671 if (old_decodes & VGA_RSRC_LEGACY_MASK &&
679 if (new_decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)) 672 !(new_decodes & VGA_RSRC_LEGACY_MASK))
680 vga_decode_count++; 673 vga_decode_count--;
681 else 674 if (!(old_decodes & VGA_RSRC_LEGACY_MASK) &&
682 vga_decode_count--; 675 new_decodes & VGA_RSRC_LEGACY_MASK)
683 } 676 vga_decode_count++;
684 pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); 677 pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
685} 678}
686 679
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index fd54a14a7c2a..3d79e513c0b3 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -12,11 +12,14 @@
12 {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 12 {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
13 {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 13 {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
14 {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 14 {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
15 {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
15 {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 16 {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
16 {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 17 {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
17 {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 18 {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
19 {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
18 {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 20 {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
19 {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 21 {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
22 {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
20 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 23 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
21 {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 24 {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
22 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 25 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index d6aeaf3c6d6c..cb65fa14acfc 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -15,6 +15,7 @@
15#define _EXYNOS_DRM_H_ 15#define _EXYNOS_DRM_H_
16 16
17#include <uapi/drm/exynos_drm.h> 17#include <uapi/drm/exynos_drm.h>
18#include <video/videomode.h>
18 19
19/** 20/**
20 * A structure for lcd panel information. 21 * A structure for lcd panel information.
@@ -24,7 +25,7 @@
24 * @height_mm: physical size of lcd height. 25 * @height_mm: physical size of lcd height.
25 */ 26 */
26struct exynos_drm_panel_info { 27struct exynos_drm_panel_info {
27 struct fb_videomode timing; 28 struct videomode vm;
28 u32 width_mm; 29 u32 width_mm;
29 u32 height_mm; 30 u32 height_mm;
30}; 31};
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 63d609d8a3f6..3abfa6ea226e 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -26,6 +26,7 @@
26#ifndef _I915_DRM_H_ 26#ifndef _I915_DRM_H_
27#define _I915_DRM_H_ 27#define _I915_DRM_H_
28 28
29#include <drm/i915_pciids.h>
29#include <uapi/drm/i915_drm.h> 30#include <uapi/drm/i915_drm.h>
30 31
31/* For use by IPS driver */ 32/* For use by IPS driver */
@@ -34,4 +35,37 @@ extern bool i915_gpu_raise(void);
34extern bool i915_gpu_lower(void); 35extern bool i915_gpu_lower(void);
35extern bool i915_gpu_busy(void); 36extern bool i915_gpu_busy(void);
36extern bool i915_gpu_turbo_disable(void); 37extern bool i915_gpu_turbo_disable(void);
38
39/*
40 * The Bridge device's PCI config space has information about the
41 * fb aperture size and the amount of pre-reserved memory.
42 * This is all handled in the intel-gtt.ko module. i915.ko only
43 * cares about the vga bit for the vga rbiter.
44 */
45#define INTEL_GMCH_CTRL 0x52
46#define INTEL_GMCH_VGA_DISABLE (1 << 1)
47#define SNB_GMCH_CTRL 0x50
48#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
49#define SNB_GMCH_GGMS_MASK 0x3
50#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
51#define SNB_GMCH_GMS_MASK 0x1f
52
53#define I830_GMCH_CTRL 0x52
54
55#define I855_GMCH_GMS_MASK 0xF0
56#define I855_GMCH_GMS_STOLEN_0M 0x0
57#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
58#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
59#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
60#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
61#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
62#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
63#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
64#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
65#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
66#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
67#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
68#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
69#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
70
37#endif /* _I915_DRM_H_ */ 71#endif /* _I915_DRM_H_ */
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
new file mode 100644
index 000000000000..8a10f5c354e6
--- /dev/null
+++ b/include/drm/i915_pciids.h
@@ -0,0 +1,211 @@
1/*
2 * Copyright 2013 Intel Corporation
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25#ifndef _I915_PCIIDS_H
26#define _I915_PCIIDS_H
27
28/*
29 * A pci_device_id struct {
30 * __u32 vendor, device;
31 * __u32 subvendor, subdevice;
32 * __u32 class, class_mask;
33 * kernel_ulong_t driver_data;
34 * };
35 * Don't use C99 here because "class" is reserved and we want to
36 * give userspace flexibility.
37 */
38#define INTEL_VGA_DEVICE(id, info) { \
39 0x8086, id, \
40 ~0, ~0, \
41 0x030000, 0xff0000, \
42 (unsigned long) info }
43
44#define INTEL_QUANTA_VGA_DEVICE(info) { \
45 0x8086, 0x16a, \
46 0x152d, 0x8990, \
47 0x030000, 0xff0000, \
48 (unsigned long) info }
49
50#define INTEL_I830_IDS(info) \
51 INTEL_VGA_DEVICE(0x3577, info)
52
53#define INTEL_I845G_IDS(info) \
54 INTEL_VGA_DEVICE(0x2562, info)
55
56#define INTEL_I85X_IDS(info) \
57 INTEL_VGA_DEVICE(0x3582, info), /* I855_GM */ \
58 INTEL_VGA_DEVICE(0x358e, info)
59
60#define INTEL_I865G_IDS(info) \
61 INTEL_VGA_DEVICE(0x2572, info) /* I865_G */
62
63#define INTEL_I915G_IDS(info) \
64 INTEL_VGA_DEVICE(0x2582, info), /* I915_G */ \
65 INTEL_VGA_DEVICE(0x258a, info) /* E7221_G */
66
67#define INTEL_I915GM_IDS(info) \
68 INTEL_VGA_DEVICE(0x2592, info) /* I915_GM */
69
70#define INTEL_I945G_IDS(info) \
71 INTEL_VGA_DEVICE(0x2772, info) /* I945_G */
72
73#define INTEL_I945GM_IDS(info) \
74 INTEL_VGA_DEVICE(0x27a2, info), /* I945_GM */ \
75 INTEL_VGA_DEVICE(0x27ae, info) /* I945_GME */
76
77#define INTEL_I965G_IDS(info) \
78 INTEL_VGA_DEVICE(0x2972, info), /* I946_GZ */ \
79 INTEL_VGA_DEVICE(0x2982, info), /* G35_G */ \
80 INTEL_VGA_DEVICE(0x2992, info), /* I965_Q */ \
81 INTEL_VGA_DEVICE(0x29a2, info) /* I965_G */
82
83#define INTEL_G33_IDS(info) \
84 INTEL_VGA_DEVICE(0x29b2, info), /* Q35_G */ \
85 INTEL_VGA_DEVICE(0x29c2, info), /* G33_G */ \
86 INTEL_VGA_DEVICE(0x29d2, info) /* Q33_G */
87
88#define INTEL_I965GM_IDS(info) \
89 INTEL_VGA_DEVICE(0x2a02, info), /* I965_GM */ \
90 INTEL_VGA_DEVICE(0x2a12, info) /* I965_GME */
91
92#define INTEL_GM45_IDS(info) \
93 INTEL_VGA_DEVICE(0x2a42, info) /* GM45_G */
94
95#define INTEL_G45_IDS(info) \
96 INTEL_VGA_DEVICE(0x2e02, info), /* IGD_E_G */ \
97 INTEL_VGA_DEVICE(0x2e12, info), /* Q45_G */ \
98 INTEL_VGA_DEVICE(0x2e22, info), /* G45_G */ \
99 INTEL_VGA_DEVICE(0x2e32, info), /* G41_G */ \
100 INTEL_VGA_DEVICE(0x2e42, info), /* B43_G */ \
101 INTEL_VGA_DEVICE(0x2e92, info) /* B43_G.1 */
102
103#define INTEL_PINEVIEW_IDS(info) \
104 INTEL_VGA_DEVICE(0xa001, info), \
105 INTEL_VGA_DEVICE(0xa011, info)
106
107#define INTEL_IRONLAKE_D_IDS(info) \
108 INTEL_VGA_DEVICE(0x0042, info)
109
110#define INTEL_IRONLAKE_M_IDS(info) \
111 INTEL_VGA_DEVICE(0x0046, info)
112
113#define INTEL_SNB_D_IDS(info) \
114 INTEL_VGA_DEVICE(0x0102, info), \
115 INTEL_VGA_DEVICE(0x0112, info), \
116 INTEL_VGA_DEVICE(0x0122, info), \
117 INTEL_VGA_DEVICE(0x010A, info)
118
119#define INTEL_SNB_M_IDS(info) \
120 INTEL_VGA_DEVICE(0x0106, info), \
121 INTEL_VGA_DEVICE(0x0116, info), \
122 INTEL_VGA_DEVICE(0x0126, info)
123
124#define INTEL_IVB_M_IDS(info) \
125 INTEL_VGA_DEVICE(0x0156, info), /* GT1 mobile */ \
126 INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */
127
128#define INTEL_IVB_D_IDS(info) \
129 INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \
130 INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \
131 INTEL_VGA_DEVICE(0x015a, info), /* GT1 server */ \
132 INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */
133
134#define INTEL_IVB_Q_IDS(info) \
135 INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */
136
137#define INTEL_HSW_D_IDS(info) \
138 INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
139 INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
140 INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
141 INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \
142 INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \
143 INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \
144 INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \
145 INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
146 INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
147 INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \
148 INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \
149 INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \
150 INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \
151 INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \
152 INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \
153 INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \
154 INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
155 INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
156 INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \
157 INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
158 INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
159 INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \
160 INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
161 INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
162 INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \
163 INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
164 INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
165 INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \
166 INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
167 INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
168 INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \
169 INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \
170 INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
171 INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \
172 INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
173 INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
174 INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \
175 INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
176 INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
177 INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \
178 INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
179 INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
180 INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \
181 INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
182 INTEL_VGA_DEVICE(0x0D2E, info) /* CRW GT3 reserved */ \
183
184#define INTEL_HSW_M_IDS(info) \
185 INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
186 INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
187 INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \
188 INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
189 INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
190 INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
191 INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
192 INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
193 INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
194 INTEL_VGA_DEVICE(0x0A0E, info), /* ULT GT1 reserved */ \
195 INTEL_VGA_DEVICE(0x0A1E, info), /* ULT GT2 reserved */ \
196 INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \
197 INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
198 INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
199 INTEL_VGA_DEVICE(0x0D26, info) /* CRW GT3 mobile */
200
201#define INTEL_VLV_M_IDS(info) \
202 INTEL_VGA_DEVICE(0x0f30, info), \
203 INTEL_VGA_DEVICE(0x0f31, info), \
204 INTEL_VGA_DEVICE(0x0f32, info), \
205 INTEL_VGA_DEVICE(0x0f33, info), \
206 INTEL_VGA_DEVICE(0x0157, info)
207
208#define INTEL_VLV_D_IDS(info) \
209 INTEL_VGA_DEVICE(0x0155, info)
210
211#endif /* _I915_PCIIDS_H */
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 2c02f3a8d2ba..80cf8173a65b 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -65,8 +65,15 @@ struct pci_dev;
65 * out of the arbitration process (and can be safe to take 65 * out of the arbitration process (and can be safe to take
66 * interrupts at any time. 66 * interrupts at any time.
67 */ 67 */
68#if defined(CONFIG_VGA_ARB)
68extern void vga_set_legacy_decoding(struct pci_dev *pdev, 69extern void vga_set_legacy_decoding(struct pci_dev *pdev,
69 unsigned int decodes); 70 unsigned int decodes);
71#else
72static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
73 unsigned int decodes)
74{
75}
76#endif
70 77
71/** 78/**
72 * vga_get - acquire & locks VGA resources 79 * vga_get - acquire & locks VGA resources