aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2014-10-20 12:55:07 -0400
committerMark Brown <broonie@kernel.org>2014-10-20 13:27:32 -0400
commitb7a40242c82cd73cfcea305f23e67d068dd8401a (patch)
tree251b49d19cd7c371847ae1f951e1b537ca0e1c15 /drivers/gpu
parentd26833bfce5e56017bea9f1f50838f20e18e7b7e (diff)
parent9c6de47d53a3ce8df1642ae67823688eb98a190a (diff)
Merge branch 'fix/dw' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi into spi-dw
Conflicts: drivers/spi/spi-dw-mid.c
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/ast/ast_main.c3
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h1
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c1
-rw-r--r--drivers/gpu/drm/drm_crtc.c3
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c12
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c9
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c33
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h13
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c409
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c33
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h12
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c9
-rw-r--r--drivers/gpu/drm/i915/intel_display.c73
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c92
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h9
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c9
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c66
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c21
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c46
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c15
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c5
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c2
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/parent.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c9
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c7
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/cik.c81
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c13
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c12
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c39
-rw-r--r--drivers/gpu/drm/radeon/ni.c13
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c6
-rw-r--r--drivers/gpu/drm/radeon/r100.c36
-rw-r--r--drivers/gpu/drm/radeon/r200.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/r420.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c60
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c15
-rw-r--r--drivers/gpu/drm/radeon/r600d.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c10
-rw-r--r--drivers/gpu/drm/radeon/rs400.c4
-rw-r--r--drivers/gpu/drm/radeon/rv515.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c23
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c48
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c24
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c4
-rw-r--r--drivers/gpu/drm/sti/Kconfig1
-rw-r--r--drivers/gpu/drm/sti/sti_drm_drv.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c10
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c11
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c3
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c6
-rw-r--r--drivers/gpu/vga/vgaarb.c46
89 files changed, 1052 insertions, 664 deletions
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index a2cc6be97983..b792194e0d9c 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -67,6 +67,7 @@ static int ast_detect_chip(struct drm_device *dev)
67{ 67{
68 struct ast_private *ast = dev->dev_private; 68 struct ast_private *ast = dev->dev_private;
69 uint32_t data, jreg; 69 uint32_t data, jreg;
70 ast_open_key(ast);
70 71
71 if (dev->pdev->device == PCI_CHIP_AST1180) { 72 if (dev->pdev->device == PCI_CHIP_AST1180) {
72 ast->chip = AST1100; 73 ast->chip = AST1100;
@@ -104,7 +105,7 @@ static int ast_detect_chip(struct drm_device *dev)
104 } 105 }
105 ast->vga2_clone = false; 106 ast->vga2_clone = false;
106 } else { 107 } else {
107 ast->chip = 2000; 108 ast->chip = AST2000;
108 DRM_INFO("AST 2000 detected\n"); 109 DRM_INFO("AST 2000 detected\n");
109 } 110 }
110 } 111 }
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
index 4c761dcea972..05c01ea85294 100644
--- a/drivers/gpu/drm/ast/ast_tables.h
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -99,6 +99,7 @@ static struct ast_vbios_dclk_info dclk_table[] = {
99 {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ 99 {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */
100 {0x77, 0x58, 0x80}, /* 17: VCLK119 */ 100 {0x77, 0x58, 0x80}, /* 17: VCLK119 */
101 {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ 101 {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
102 {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */
102}; 103};
103 104
104static struct ast_vbios_stdtable vbios_stdtable[] = { 105static struct ast_vbios_stdtable vbios_stdtable[] = {
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 9d7346b92653..6b7efcf363d6 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -250,6 +250,7 @@ static void bochs_connector_init(struct drm_device *dev)
250 DRM_MODE_CONNECTOR_VIRTUAL); 250 DRM_MODE_CONNECTOR_VIRTUAL);
251 drm_connector_helper_add(connector, 251 drm_connector_helper_add(connector,
252 &bochs_connector_connector_helper_funcs); 252 &bochs_connector_connector_helper_funcs);
253 drm_connector_register(connector);
253} 254}
254 255
255 256
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index e1c5c3222129..c7c5a9d91fa0 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -555,6 +555,7 @@ static struct drm_connector *cirrus_vga_init(struct drm_device *dev)
555 555
556 drm_connector_helper_add(connector, &cirrus_vga_connector_helper_funcs); 556 drm_connector_helper_add(connector, &cirrus_vga_connector_helper_funcs);
557 557
558 drm_connector_register(connector);
558 return connector; 559 return connector;
559} 560}
560 561
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index fa2be249999c..90e773019eac 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -4696,8 +4696,9 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
4696 return -EINVAL; 4696 return -EINVAL;
4697 4697
4698 /* overflow checks for 32bit size calculations */ 4698 /* overflow checks for 32bit size calculations */
4699 /* NOTE: DIV_ROUND_UP() can overflow */
4699 cpp = DIV_ROUND_UP(args->bpp, 8); 4700 cpp = DIV_ROUND_UP(args->bpp, 8);
4700 if (cpp > 0xffffffffU / args->width) 4701 if (!cpp || cpp > 0xffffffffU / args->width)
4701 return -EINVAL; 4702 return -EINVAL;
4702 stride = cpp * args->width; 4703 stride = cpp * args->width;
4703 if (args->height > 0xffffffffU / stride) 4704 if (args->height > 0xffffffffU / stride)
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index dea99d92fb4a..4b7ed5289217 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -709,11 +709,13 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
709 BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count)); 709 BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
710 BUG_ON(!validate_regs_sorted(ring)); 710 BUG_ON(!validate_regs_sorted(ring));
711 711
712 ret = init_hash_table(ring, cmd_tables, cmd_table_count); 712 if (hash_empty(ring->cmd_hash)) {
713 if (ret) { 713 ret = init_hash_table(ring, cmd_tables, cmd_table_count);
714 DRM_ERROR("CMD: cmd_parser_init failed!\n"); 714 if (ret) {
715 fini_hash_table(ring); 715 DRM_ERROR("CMD: cmd_parser_init failed!\n");
716 return ret; 716 fini_hash_table(ring);
717 return ret;
718 }
717 } 719 }
718 720
719 ring->needs_cmd_parser = true; 721 ring->needs_cmd_parser = true;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2e7f03ad5ee2..9933c26017ed 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1336,12 +1336,17 @@ static int i915_load_modeset_init(struct drm_device *dev)
1336 1336
1337 intel_power_domains_init_hw(dev_priv); 1337 intel_power_domains_init_hw(dev_priv);
1338 1338
1339 /*
1340 * We enable some interrupt sources in our postinstall hooks, so mark
1341 * interrupts as enabled _before_ actually enabling them to avoid
1342 * special cases in our ordering checks.
1343 */
1344 dev_priv->pm._irqs_disabled = false;
1345
1339 ret = drm_irq_install(dev, dev->pdev->irq); 1346 ret = drm_irq_install(dev, dev->pdev->irq);
1340 if (ret) 1347 if (ret)
1341 goto cleanup_gem_stolen; 1348 goto cleanup_gem_stolen;
1342 1349
1343 dev_priv->pm._irqs_disabled = false;
1344
1345 /* Important: The output setup functions called by modeset_init need 1350 /* Important: The output setup functions called by modeset_init need
1346 * working irqs for e.g. gmbus and dp aux transfers. */ 1351 * working irqs for e.g. gmbus and dp aux transfers. */
1347 intel_modeset_init(dev); 1352 intel_modeset_init(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ec96f9a9724c..e27cdbe9d524 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -494,6 +494,36 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
494 return true; 494 return true;
495} 495}
496 496
497void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
498{
499 spin_lock_irq(&dev_priv->irq_lock);
500
501 dev_priv->long_hpd_port_mask = 0;
502 dev_priv->short_hpd_port_mask = 0;
503 dev_priv->hpd_event_bits = 0;
504
505 spin_unlock_irq(&dev_priv->irq_lock);
506
507 cancel_work_sync(&dev_priv->dig_port_work);
508 cancel_work_sync(&dev_priv->hotplug_work);
509 cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
510}
511
512static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
513{
514 struct drm_device *dev = dev_priv->dev;
515 struct drm_encoder *encoder;
516
517 drm_modeset_lock_all(dev);
518 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
519 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
520
521 if (intel_encoder->suspend)
522 intel_encoder->suspend(intel_encoder);
523 }
524 drm_modeset_unlock_all(dev);
525}
526
497static int i915_drm_freeze(struct drm_device *dev) 527static int i915_drm_freeze(struct drm_device *dev)
498{ 528{
499 struct drm_i915_private *dev_priv = dev->dev_private; 529 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -538,6 +568,9 @@ static int i915_drm_freeze(struct drm_device *dev)
538 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 568 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
539 569
540 intel_runtime_pm_disable_interrupts(dev); 570 intel_runtime_pm_disable_interrupts(dev);
571 intel_hpd_cancel_work(dev_priv);
572
573 intel_suspend_encoders(dev_priv);
541 574
542 intel_suspend_gt_powersave(dev); 575 intel_suspend_gt_powersave(dev);
543 576
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4412f6a4383b..3524306d8cfb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -184,6 +184,7 @@ enum hpd_pin {
184 if ((1 << (domain)) & (mask)) 184 if ((1 << (domain)) & (mask))
185 185
186struct drm_i915_private; 186struct drm_i915_private;
187struct i915_mm_struct;
187struct i915_mmu_object; 188struct i915_mmu_object;
188 189
189enum intel_dpll_id { 190enum intel_dpll_id {
@@ -1458,7 +1459,7 @@ struct drm_i915_private {
1458 } hpd_mark; 1459 } hpd_mark;
1459 } hpd_stats[HPD_NUM_PINS]; 1460 } hpd_stats[HPD_NUM_PINS];
1460 u32 hpd_event_bits; 1461 u32 hpd_event_bits;
1461 struct timer_list hotplug_reenable_timer; 1462 struct delayed_work hotplug_reenable_work;
1462 1463
1463 struct i915_fbc fbc; 1464 struct i915_fbc fbc;
1464 struct i915_drrs drrs; 1465 struct i915_drrs drrs;
@@ -1506,9 +1507,8 @@ struct drm_i915_private {
1506 struct i915_gtt gtt; /* VM representing the global address space */ 1507 struct i915_gtt gtt; /* VM representing the global address space */
1507 1508
1508 struct i915_gem_mm mm; 1509 struct i915_gem_mm mm;
1509#if defined(CONFIG_MMU_NOTIFIER) 1510 DECLARE_HASHTABLE(mm_structs, 7);
1510 DECLARE_HASHTABLE(mmu_notifiers, 7); 1511 struct mutex mm_lock;
1511#endif
1512 1512
1513 /* Kernel Modesetting */ 1513 /* Kernel Modesetting */
1514 1514
@@ -1814,8 +1814,8 @@ struct drm_i915_gem_object {
1814 unsigned workers :4; 1814 unsigned workers :4;
1815#define I915_GEM_USERPTR_MAX_WORKERS 15 1815#define I915_GEM_USERPTR_MAX_WORKERS 15
1816 1816
1817 struct mm_struct *mm; 1817 struct i915_mm_struct *mm;
1818 struct i915_mmu_object *mn; 1818 struct i915_mmu_object *mmu_object;
1819 struct work_struct *work; 1819 struct work_struct *work;
1820 } userptr; 1820 } userptr;
1821 }; 1821 };
@@ -2178,6 +2178,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
2178extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 2178extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
2179extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 2179extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2180int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2180int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2181void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
2181 2182
2182extern void intel_console_resume(struct work_struct *work); 2183extern void intel_console_resume(struct work_struct *work);
2183 2184
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ba7f5c6bb50d..ad55b06a3cb1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1590,10 +1590,13 @@ unlock:
1590out: 1590out:
1591 switch (ret) { 1591 switch (ret) {
1592 case -EIO: 1592 case -EIO:
1593 /* If this -EIO is due to a gpu hang, give the reset code a 1593 /*
1594 * chance to clean up the mess. Otherwise return the proper 1594 * We eat errors when the gpu is terminally wedged to avoid
1595 * SIGBUS. */ 1595 * userspace unduly crashing (gl has no provisions for mmaps to
1596 if (i915_terminally_wedged(&dev_priv->gpu_error)) { 1596 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1597 * and so needs to be reported.
1598 */
1599 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1597 ret = VM_FAULT_SIGBUS; 1600 ret = VM_FAULT_SIGBUS;
1598 break; 1601 break;
1599 } 1602 }
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index fe69fc837d9e..d38413997379 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -32,6 +32,15 @@
32#include <linux/mempolicy.h> 32#include <linux/mempolicy.h>
33#include <linux/swap.h> 33#include <linux/swap.h>
34 34
35struct i915_mm_struct {
36 struct mm_struct *mm;
37 struct drm_device *dev;
38 struct i915_mmu_notifier *mn;
39 struct hlist_node node;
40 struct kref kref;
41 struct work_struct work;
42};
43
35#if defined(CONFIG_MMU_NOTIFIER) 44#if defined(CONFIG_MMU_NOTIFIER)
36#include <linux/interval_tree.h> 45#include <linux/interval_tree.h>
37 46
@@ -41,16 +50,12 @@ struct i915_mmu_notifier {
41 struct mmu_notifier mn; 50 struct mmu_notifier mn;
42 struct rb_root objects; 51 struct rb_root objects;
43 struct list_head linear; 52 struct list_head linear;
44 struct drm_device *dev;
45 struct mm_struct *mm;
46 struct work_struct work;
47 unsigned long count;
48 unsigned long serial; 53 unsigned long serial;
49 bool has_linear; 54 bool has_linear;
50}; 55};
51 56
52struct i915_mmu_object { 57struct i915_mmu_object {
53 struct i915_mmu_notifier *mmu; 58 struct i915_mmu_notifier *mn;
54 struct interval_tree_node it; 59 struct interval_tree_node it;
55 struct list_head link; 60 struct list_head link;
56 struct drm_i915_gem_object *obj; 61 struct drm_i915_gem_object *obj;
@@ -96,18 +101,18 @@ static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
96 unsigned long start, 101 unsigned long start,
97 unsigned long end) 102 unsigned long end)
98{ 103{
99 struct i915_mmu_object *mmu; 104 struct i915_mmu_object *mo;
100 unsigned long serial; 105 unsigned long serial;
101 106
102restart: 107restart:
103 serial = mn->serial; 108 serial = mn->serial;
104 list_for_each_entry(mmu, &mn->linear, link) { 109 list_for_each_entry(mo, &mn->linear, link) {
105 struct drm_i915_gem_object *obj; 110 struct drm_i915_gem_object *obj;
106 111
107 if (mmu->it.last < start || mmu->it.start > end) 112 if (mo->it.last < start || mo->it.start > end)
108 continue; 113 continue;
109 114
110 obj = mmu->obj; 115 obj = mo->obj;
111 drm_gem_object_reference(&obj->base); 116 drm_gem_object_reference(&obj->base);
112 spin_unlock(&mn->lock); 117 spin_unlock(&mn->lock);
113 118
@@ -160,130 +165,47 @@ static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
160}; 165};
161 166
162static struct i915_mmu_notifier * 167static struct i915_mmu_notifier *
163__i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm) 168i915_mmu_notifier_create(struct mm_struct *mm)
164{
165 struct drm_i915_private *dev_priv = to_i915(dev);
166 struct i915_mmu_notifier *mmu;
167
168 /* Protected by dev->struct_mutex */
169 hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm)
170 if (mmu->mm == mm)
171 return mmu;
172
173 return NULL;
174}
175
176static struct i915_mmu_notifier *
177i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
178{ 169{
179 struct drm_i915_private *dev_priv = to_i915(dev); 170 struct i915_mmu_notifier *mn;
180 struct i915_mmu_notifier *mmu;
181 int ret; 171 int ret;
182 172
183 lockdep_assert_held(&dev->struct_mutex); 173 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
184 174 if (mn == NULL)
185 mmu = __i915_mmu_notifier_lookup(dev, mm);
186 if (mmu)
187 return mmu;
188
189 mmu = kmalloc(sizeof(*mmu), GFP_KERNEL);
190 if (mmu == NULL)
191 return ERR_PTR(-ENOMEM); 175 return ERR_PTR(-ENOMEM);
192 176
193 spin_lock_init(&mmu->lock); 177 spin_lock_init(&mn->lock);
194 mmu->dev = dev; 178 mn->mn.ops = &i915_gem_userptr_notifier;
195 mmu->mn.ops = &i915_gem_userptr_notifier; 179 mn->objects = RB_ROOT;
196 mmu->mm = mm; 180 mn->serial = 1;
197 mmu->objects = RB_ROOT; 181 INIT_LIST_HEAD(&mn->linear);
198 mmu->count = 0; 182 mn->has_linear = false;
199 mmu->serial = 1; 183
200 INIT_LIST_HEAD(&mmu->linear); 184 /* Protected by mmap_sem (write-lock) */
201 mmu->has_linear = false; 185 ret = __mmu_notifier_register(&mn->mn, mm);
202
203 /* Protected by mmap_sem (write-lock) */
204 ret = __mmu_notifier_register(&mmu->mn, mm);
205 if (ret) { 186 if (ret) {
206 kfree(mmu); 187 kfree(mn);
207 return ERR_PTR(ret); 188 return ERR_PTR(ret);
208 } 189 }
209 190
210 /* Protected by dev->struct_mutex */ 191 return mn;
211 hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm);
212 return mmu;
213} 192}
214 193
215static void 194static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn)
216__i915_mmu_notifier_destroy_worker(struct work_struct *work)
217{ 195{
218 struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work); 196 if (++mn->serial == 0)
219 mmu_notifier_unregister(&mmu->mn, mmu->mm); 197 mn->serial = 1;
220 kfree(mmu);
221}
222
223static void
224__i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu)
225{
226 lockdep_assert_held(&mmu->dev->struct_mutex);
227
228 /* Protected by dev->struct_mutex */
229 hash_del(&mmu->node);
230
231 /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex.
232 * We enter the function holding struct_mutex, therefore we need
233 * to drop our mutex prior to calling mmu_notifier_unregister in
234 * order to prevent lock inversion (and system-wide deadlock)
235 * between the mmap_sem and struct-mutex. Hence we defer the
236 * unregistration to a workqueue where we hold no locks.
237 */
238 INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker);
239 schedule_work(&mmu->work);
240}
241
242static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
243{
244 if (++mmu->serial == 0)
245 mmu->serial = 1;
246}
247
248static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu)
249{
250 struct i915_mmu_object *mn;
251
252 list_for_each_entry(mn, &mmu->linear, link)
253 if (mn->is_linear)
254 return true;
255
256 return false;
257}
258
259static void
260i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
261 struct i915_mmu_object *mn)
262{
263 lockdep_assert_held(&mmu->dev->struct_mutex);
264
265 spin_lock(&mmu->lock);
266 list_del(&mn->link);
267 if (mn->is_linear)
268 mmu->has_linear = i915_mmu_notifier_has_linear(mmu);
269 else
270 interval_tree_remove(&mn->it, &mmu->objects);
271 __i915_mmu_notifier_update_serial(mmu);
272 spin_unlock(&mmu->lock);
273
274 /* Protected against _add() by dev->struct_mutex */
275 if (--mmu->count == 0)
276 __i915_mmu_notifier_destroy(mmu);
277} 198}
278 199
279static int 200static int
280i915_mmu_notifier_add(struct i915_mmu_notifier *mmu, 201i915_mmu_notifier_add(struct drm_device *dev,
281 struct i915_mmu_object *mn) 202 struct i915_mmu_notifier *mn,
203 struct i915_mmu_object *mo)
282{ 204{
283 struct interval_tree_node *it; 205 struct interval_tree_node *it;
284 int ret; 206 int ret;
285 207
286 ret = i915_mutex_lock_interruptible(mmu->dev); 208 ret = i915_mutex_lock_interruptible(dev);
287 if (ret) 209 if (ret)
288 return ret; 210 return ret;
289 211
@@ -291,11 +213,11 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
291 * remove the objects from the interval tree) before we do 213 * remove the objects from the interval tree) before we do
292 * the check for overlapping objects. 214 * the check for overlapping objects.
293 */ 215 */
294 i915_gem_retire_requests(mmu->dev); 216 i915_gem_retire_requests(dev);
295 217
296 spin_lock(&mmu->lock); 218 spin_lock(&mn->lock);
297 it = interval_tree_iter_first(&mmu->objects, 219 it = interval_tree_iter_first(&mn->objects,
298 mn->it.start, mn->it.last); 220 mo->it.start, mo->it.last);
299 if (it) { 221 if (it) {
300 struct drm_i915_gem_object *obj; 222 struct drm_i915_gem_object *obj;
301 223
@@ -312,86 +234,122 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
312 234
313 obj = container_of(it, struct i915_mmu_object, it)->obj; 235 obj = container_of(it, struct i915_mmu_object, it)->obj;
314 if (!obj->userptr.workers) 236 if (!obj->userptr.workers)
315 mmu->has_linear = mn->is_linear = true; 237 mn->has_linear = mo->is_linear = true;
316 else 238 else
317 ret = -EAGAIN; 239 ret = -EAGAIN;
318 } else 240 } else
319 interval_tree_insert(&mn->it, &mmu->objects); 241 interval_tree_insert(&mo->it, &mn->objects);
320 242
321 if (ret == 0) { 243 if (ret == 0) {
322 list_add(&mn->link, &mmu->linear); 244 list_add(&mo->link, &mn->linear);
323 __i915_mmu_notifier_update_serial(mmu); 245 __i915_mmu_notifier_update_serial(mn);
324 } 246 }
325 spin_unlock(&mmu->lock); 247 spin_unlock(&mn->lock);
326 mutex_unlock(&mmu->dev->struct_mutex); 248 mutex_unlock(&dev->struct_mutex);
327 249
328 return ret; 250 return ret;
329} 251}
330 252
253static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
254{
255 struct i915_mmu_object *mo;
256
257 list_for_each_entry(mo, &mn->linear, link)
258 if (mo->is_linear)
259 return true;
260
261 return false;
262}
263
264static void
265i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
266 struct i915_mmu_object *mo)
267{
268 spin_lock(&mn->lock);
269 list_del(&mo->link);
270 if (mo->is_linear)
271 mn->has_linear = i915_mmu_notifier_has_linear(mn);
272 else
273 interval_tree_remove(&mo->it, &mn->objects);
274 __i915_mmu_notifier_update_serial(mn);
275 spin_unlock(&mn->lock);
276}
277
331static void 278static void
332i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) 279i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
333{ 280{
334 struct i915_mmu_object *mn; 281 struct i915_mmu_object *mo;
335 282
336 mn = obj->userptr.mn; 283 mo = obj->userptr.mmu_object;
337 if (mn == NULL) 284 if (mo == NULL)
338 return; 285 return;
339 286
340 i915_mmu_notifier_del(mn->mmu, mn); 287 i915_mmu_notifier_del(mo->mn, mo);
341 obj->userptr.mn = NULL; 288 kfree(mo);
289
290 obj->userptr.mmu_object = NULL;
291}
292
293static struct i915_mmu_notifier *
294i915_mmu_notifier_find(struct i915_mm_struct *mm)
295{
296 if (mm->mn == NULL) {
297 down_write(&mm->mm->mmap_sem);
298 mutex_lock(&to_i915(mm->dev)->mm_lock);
299 if (mm->mn == NULL)
300 mm->mn = i915_mmu_notifier_create(mm->mm);
301 mutex_unlock(&to_i915(mm->dev)->mm_lock);
302 up_write(&mm->mm->mmap_sem);
303 }
304 return mm->mn;
342} 305}
343 306
344static int 307static int
345i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, 308i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
346 unsigned flags) 309 unsigned flags)
347{ 310{
348 struct i915_mmu_notifier *mmu; 311 struct i915_mmu_notifier *mn;
349 struct i915_mmu_object *mn; 312 struct i915_mmu_object *mo;
350 int ret; 313 int ret;
351 314
352 if (flags & I915_USERPTR_UNSYNCHRONIZED) 315 if (flags & I915_USERPTR_UNSYNCHRONIZED)
353 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; 316 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
354 317
355 down_write(&obj->userptr.mm->mmap_sem); 318 if (WARN_ON(obj->userptr.mm == NULL))
356 ret = i915_mutex_lock_interruptible(obj->base.dev); 319 return -EINVAL;
357 if (ret == 0) {
358 mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm);
359 if (!IS_ERR(mmu))
360 mmu->count++; /* preemptive add to act as a refcount */
361 else
362 ret = PTR_ERR(mmu);
363 mutex_unlock(&obj->base.dev->struct_mutex);
364 }
365 up_write(&obj->userptr.mm->mmap_sem);
366 if (ret)
367 return ret;
368 320
369 mn = kzalloc(sizeof(*mn), GFP_KERNEL); 321 mn = i915_mmu_notifier_find(obj->userptr.mm);
370 if (mn == NULL) { 322 if (IS_ERR(mn))
371 ret = -ENOMEM; 323 return PTR_ERR(mn);
372 goto destroy_mmu;
373 }
374 324
375 mn->mmu = mmu; 325 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
376 mn->it.start = obj->userptr.ptr; 326 if (mo == NULL)
377 mn->it.last = mn->it.start + obj->base.size - 1; 327 return -ENOMEM;
378 mn->obj = obj;
379 328
380 ret = i915_mmu_notifier_add(mmu, mn); 329 mo->mn = mn;
381 if (ret) 330 mo->it.start = obj->userptr.ptr;
382 goto free_mn; 331 mo->it.last = mo->it.start + obj->base.size - 1;
332 mo->obj = obj;
383 333
384 obj->userptr.mn = mn; 334 ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
335 if (ret) {
336 kfree(mo);
337 return ret;
338 }
339
340 obj->userptr.mmu_object = mo;
385 return 0; 341 return 0;
342}
343
344static void
345i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
346 struct mm_struct *mm)
347{
348 if (mn == NULL)
349 return;
386 350
387free_mn: 351 mmu_notifier_unregister(&mn->mn, mm);
388 kfree(mn); 352 kfree(mn);
389destroy_mmu:
390 mutex_lock(&obj->base.dev->struct_mutex);
391 if (--mmu->count == 0)
392 __i915_mmu_notifier_destroy(mmu);
393 mutex_unlock(&obj->base.dev->struct_mutex);
394 return ret;
395} 353}
396 354
397#else 355#else
@@ -413,15 +371,114 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
413 371
414 return 0; 372 return 0;
415} 373}
374
375static void
376i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
377 struct mm_struct *mm)
378{
379}
380
416#endif 381#endif
417 382
383static struct i915_mm_struct *
384__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
385{
386 struct i915_mm_struct *mm;
387
388 /* Protected by dev_priv->mm_lock */
389 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
390 if (mm->mm == real)
391 return mm;
392
393 return NULL;
394}
395
396static int
397i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
398{
399 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
400 struct i915_mm_struct *mm;
401 int ret = 0;
402
403 /* During release of the GEM object we hold the struct_mutex. This
404 * precludes us from calling mmput() at that time as that may be
405 * the last reference and so call exit_mmap(). exit_mmap() will
406 * attempt to reap the vma, and if we were holding a GTT mmap
407 * would then call drm_gem_vm_close() and attempt to reacquire
408 * the struct mutex. So in order to avoid that recursion, we have
409 * to defer releasing the mm reference until after we drop the
410 * struct_mutex, i.e. we need to schedule a worker to do the clean
411 * up.
412 */
413 mutex_lock(&dev_priv->mm_lock);
414 mm = __i915_mm_struct_find(dev_priv, current->mm);
415 if (mm == NULL) {
416 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
417 if (mm == NULL) {
418 ret = -ENOMEM;
419 goto out;
420 }
421
422 kref_init(&mm->kref);
423 mm->dev = obj->base.dev;
424
425 mm->mm = current->mm;
426 atomic_inc(&current->mm->mm_count);
427
428 mm->mn = NULL;
429
430 /* Protected by dev_priv->mm_lock */
431 hash_add(dev_priv->mm_structs,
432 &mm->node, (unsigned long)mm->mm);
433 } else
434 kref_get(&mm->kref);
435
436 obj->userptr.mm = mm;
437out:
438 mutex_unlock(&dev_priv->mm_lock);
439 return ret;
440}
441
442static void
443__i915_mm_struct_free__worker(struct work_struct *work)
444{
445 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
446 i915_mmu_notifier_free(mm->mn, mm->mm);
447 mmdrop(mm->mm);
448 kfree(mm);
449}
450
451static void
452__i915_mm_struct_free(struct kref *kref)
453{
454 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
455
456 /* Protected by dev_priv->mm_lock */
457 hash_del(&mm->node);
458 mutex_unlock(&to_i915(mm->dev)->mm_lock);
459
460 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
461 schedule_work(&mm->work);
462}
463
464static void
465i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
466{
467 if (obj->userptr.mm == NULL)
468 return;
469
470 kref_put_mutex(&obj->userptr.mm->kref,
471 __i915_mm_struct_free,
472 &to_i915(obj->base.dev)->mm_lock);
473 obj->userptr.mm = NULL;
474}
475
418struct get_pages_work { 476struct get_pages_work {
419 struct work_struct work; 477 struct work_struct work;
420 struct drm_i915_gem_object *obj; 478 struct drm_i915_gem_object *obj;
421 struct task_struct *task; 479 struct task_struct *task;
422}; 480};
423 481
424
425#if IS_ENABLED(CONFIG_SWIOTLB) 482#if IS_ENABLED(CONFIG_SWIOTLB)
426#define swiotlb_active() swiotlb_nr_tbl() 483#define swiotlb_active() swiotlb_nr_tbl()
427#else 484#else
@@ -479,7 +536,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
479 if (pvec == NULL) 536 if (pvec == NULL)
480 pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); 537 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
481 if (pvec != NULL) { 538 if (pvec != NULL) {
482 struct mm_struct *mm = obj->userptr.mm; 539 struct mm_struct *mm = obj->userptr.mm->mm;
483 540
484 down_read(&mm->mmap_sem); 541 down_read(&mm->mmap_sem);
485 while (pinned < num_pages) { 542 while (pinned < num_pages) {
@@ -545,7 +602,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
545 602
546 pvec = NULL; 603 pvec = NULL;
547 pinned = 0; 604 pinned = 0;
548 if (obj->userptr.mm == current->mm) { 605 if (obj->userptr.mm->mm == current->mm) {
549 pvec = kmalloc(num_pages*sizeof(struct page *), 606 pvec = kmalloc(num_pages*sizeof(struct page *),
550 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); 607 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
551 if (pvec == NULL) { 608 if (pvec == NULL) {
@@ -651,17 +708,13 @@ static void
651i915_gem_userptr_release(struct drm_i915_gem_object *obj) 708i915_gem_userptr_release(struct drm_i915_gem_object *obj)
652{ 709{
653 i915_gem_userptr_release__mmu_notifier(obj); 710 i915_gem_userptr_release__mmu_notifier(obj);
654 711 i915_gem_userptr_release__mm_struct(obj);
655 if (obj->userptr.mm) {
656 mmput(obj->userptr.mm);
657 obj->userptr.mm = NULL;
658 }
659} 712}
660 713
661static int 714static int
662i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) 715i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
663{ 716{
664 if (obj->userptr.mn) 717 if (obj->userptr.mmu_object)
665 return 0; 718 return 0;
666 719
667 return i915_gem_userptr_init__mmu_notifier(obj, 0); 720 return i915_gem_userptr_init__mmu_notifier(obj, 0);
@@ -736,7 +789,6 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
736 return -ENODEV; 789 return -ENODEV;
737 } 790 }
738 791
739 /* Allocate the new object */
740 obj = i915_gem_object_alloc(dev); 792 obj = i915_gem_object_alloc(dev);
741 if (obj == NULL) 793 if (obj == NULL)
742 return -ENOMEM; 794 return -ENOMEM;
@@ -754,8 +806,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
754 * at binding. This means that we need to hook into the mmu_notifier 806 * at binding. This means that we need to hook into the mmu_notifier
755 * in order to detect if the mmu is destroyed. 807 * in order to detect if the mmu is destroyed.
756 */ 808 */
757 ret = -ENOMEM; 809 ret = i915_gem_userptr_init__mm_struct(obj);
758 if ((obj->userptr.mm = get_task_mm(current))) 810 if (ret == 0)
759 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); 811 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
760 if (ret == 0) 812 if (ret == 0)
761 ret = drm_gem_handle_create(file, &obj->base, &handle); 813 ret = drm_gem_handle_create(file, &obj->base, &handle);
@@ -772,9 +824,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
772int 824int
773i915_gem_init_userptr(struct drm_device *dev) 825i915_gem_init_userptr(struct drm_device *dev)
774{ 826{
775#if defined(CONFIG_MMU_NOTIFIER)
776 struct drm_i915_private *dev_priv = to_i915(dev); 827 struct drm_i915_private *dev_priv = to_i915(dev);
777 hash_init(dev_priv->mmu_notifiers); 828 mutex_init(&dev_priv->mm_lock);
778#endif 829 hash_init(dev_priv->mm_structs);
779 return 0; 830 return 0;
780} 831}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 390ccc2a3096..0050ee9470f1 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1189,8 +1189,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
1189 * some connectors */ 1189 * some connectors */
1190 if (hpd_disabled) { 1190 if (hpd_disabled) {
1191 drm_kms_helper_poll_enable(dev); 1191 drm_kms_helper_poll_enable(dev);
1192 mod_timer(&dev_priv->hotplug_reenable_timer, 1192 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
1193 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 1193 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
1194 } 1194 }
1195 1195
1196 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1196 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -1213,11 +1213,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
1213 drm_kms_helper_hotplug_event(dev); 1213 drm_kms_helper_hotplug_event(dev);
1214} 1214}
1215 1215
1216static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
1217{
1218 del_timer_sync(&dev_priv->hotplug_reenable_timer);
1219}
1220
1221static void ironlake_rps_change_irq_handler(struct drm_device *dev) 1216static void ironlake_rps_change_irq_handler(struct drm_device *dev)
1222{ 1217{
1223 struct drm_i915_private *dev_priv = dev->dev_private; 1218 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3892,8 +3887,6 @@ static void gen8_irq_uninstall(struct drm_device *dev)
3892 if (!dev_priv) 3887 if (!dev_priv)
3893 return; 3888 return;
3894 3889
3895 intel_hpd_irq_uninstall(dev_priv);
3896
3897 gen8_irq_reset(dev); 3890 gen8_irq_reset(dev);
3898} 3891}
3899 3892
@@ -3908,8 +3901,6 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
3908 3901
3909 I915_WRITE(VLV_MASTER_IER, 0); 3902 I915_WRITE(VLV_MASTER_IER, 0);
3910 3903
3911 intel_hpd_irq_uninstall(dev_priv);
3912
3913 for_each_pipe(pipe) 3904 for_each_pipe(pipe)
3914 I915_WRITE(PIPESTAT(pipe), 0xffff); 3905 I915_WRITE(PIPESTAT(pipe), 0xffff);
3915 3906
@@ -3988,8 +3979,6 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
3988 if (!dev_priv) 3979 if (!dev_priv)
3989 return; 3980 return;
3990 3981
3991 intel_hpd_irq_uninstall(dev_priv);
3992
3993 ironlake_irq_reset(dev); 3982 ironlake_irq_reset(dev);
3994} 3983}
3995 3984
@@ -4360,8 +4349,6 @@ static void i915_irq_uninstall(struct drm_device * dev)
4360 struct drm_i915_private *dev_priv = dev->dev_private; 4349 struct drm_i915_private *dev_priv = dev->dev_private;
4361 int pipe; 4350 int pipe;
4362 4351
4363 intel_hpd_irq_uninstall(dev_priv);
4364
4365 if (I915_HAS_HOTPLUG(dev)) { 4352 if (I915_HAS_HOTPLUG(dev)) {
4366 I915_WRITE(PORT_HOTPLUG_EN, 0); 4353 I915_WRITE(PORT_HOTPLUG_EN, 0);
4367 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4354 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -4598,8 +4585,6 @@ static void i965_irq_uninstall(struct drm_device * dev)
4598 if (!dev_priv) 4585 if (!dev_priv)
4599 return; 4586 return;
4600 4587
4601 intel_hpd_irq_uninstall(dev_priv);
4602
4603 I915_WRITE(PORT_HOTPLUG_EN, 0); 4588 I915_WRITE(PORT_HOTPLUG_EN, 0);
4604 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4589 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4605 4590
@@ -4615,14 +4600,18 @@ static void i965_irq_uninstall(struct drm_device * dev)
4615 I915_WRITE(IIR, I915_READ(IIR)); 4600 I915_WRITE(IIR, I915_READ(IIR));
4616} 4601}
4617 4602
4618static void intel_hpd_irq_reenable(unsigned long data) 4603static void intel_hpd_irq_reenable(struct work_struct *work)
4619{ 4604{
4620 struct drm_i915_private *dev_priv = (struct drm_i915_private *)data; 4605 struct drm_i915_private *dev_priv =
4606 container_of(work, typeof(*dev_priv),
4607 hotplug_reenable_work.work);
4621 struct drm_device *dev = dev_priv->dev; 4608 struct drm_device *dev = dev_priv->dev;
4622 struct drm_mode_config *mode_config = &dev->mode_config; 4609 struct drm_mode_config *mode_config = &dev->mode_config;
4623 unsigned long irqflags; 4610 unsigned long irqflags;
4624 int i; 4611 int i;
4625 4612
4613 intel_runtime_pm_get(dev_priv);
4614
4626 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4615 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4627 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 4616 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4628 struct drm_connector *connector; 4617 struct drm_connector *connector;
@@ -4648,6 +4637,8 @@ static void intel_hpd_irq_reenable(unsigned long data)
4648 if (dev_priv->display.hpd_irq_setup) 4637 if (dev_priv->display.hpd_irq_setup)
4649 dev_priv->display.hpd_irq_setup(dev); 4638 dev_priv->display.hpd_irq_setup(dev);
4650 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4639 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4640
4641 intel_runtime_pm_put(dev_priv);
4651} 4642}
4652 4643
4653void intel_irq_init(struct drm_device *dev) 4644void intel_irq_init(struct drm_device *dev)
@@ -4670,8 +4661,8 @@ void intel_irq_init(struct drm_device *dev)
4670 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 4661 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4671 i915_hangcheck_elapsed, 4662 i915_hangcheck_elapsed,
4672 (unsigned long) dev); 4663 (unsigned long) dev);
4673 setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable, 4664 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4674 (unsigned long) dev_priv); 4665 intel_hpd_irq_reenable);
4675 4666
4676 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4667 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4677 4668
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e4d7607da2c4..f29b44c86a2f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -334,16 +334,20 @@
334#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) 334#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
335#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) 335#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
336#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) 336#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
337#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) 337
338#define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2))
339#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
338#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) 340#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
339#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) 341#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
340#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) 342#define BLT_WRITE_A (2<<20)
341#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) 343#define BLT_WRITE_RGB (1<<20)
344#define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A)
342#define BLT_DEPTH_8 (0<<24) 345#define BLT_DEPTH_8 (0<<24)
343#define BLT_DEPTH_16_565 (1<<24) 346#define BLT_DEPTH_16_565 (1<<24)
344#define BLT_DEPTH_16_1555 (2<<24) 347#define BLT_DEPTH_16_1555 (2<<24)
345#define BLT_DEPTH_32 (3<<24) 348#define BLT_DEPTH_32 (3<<24)
346#define BLT_ROP_GXCOPY (0xcc<<16) 349#define BLT_ROP_SRC_COPY (0xcc<<16)
350#define BLT_ROP_COLOR_COPY (0xf0<<16)
347#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ 351#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */
348#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ 352#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */
349#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) 353#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index a66955037e4e..eee79e1c3222 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1123,7 +1123,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
1123 } 1123 }
1124} 1124}
1125 1125
1126static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) 1126static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
1127{ 1127{
1128 DRM_DEBUG_KMS("Falling back to manually reading VBT from " 1128 DRM_DEBUG_KMS("Falling back to manually reading VBT from "
1129 "VBIOS ROM for %s\n", 1129 "VBIOS ROM for %s\n",
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 2efaf8e8d9c4..9212e6504e0f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -699,16 +699,21 @@ intel_crt_detect(struct drm_connector *connector, bool force)
699 goto out; 699 goto out;
700 } 700 }
701 701
702 drm_modeset_acquire_init(&ctx, 0);
703
702 /* for pre-945g platforms use load detect */ 704 /* for pre-945g platforms use load detect */
703 if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) { 705 if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
704 if (intel_crt_detect_ddc(connector)) 706 if (intel_crt_detect_ddc(connector))
705 status = connector_status_connected; 707 status = connector_status_connected;
706 else 708 else
707 status = intel_crt_load_detect(crt); 709 status = intel_crt_load_detect(crt);
708 intel_release_load_detect_pipe(connector, &tmp, &ctx); 710 intel_release_load_detect_pipe(connector, &tmp);
709 } else 711 } else
710 status = connector_status_unknown; 712 status = connector_status_unknown;
711 713
714 drm_modeset_drop_locks(&ctx);
715 drm_modeset_acquire_fini(&ctx);
716
712out: 717out:
713 intel_display_power_put(dev_priv, power_domain); 718 intel_display_power_put(dev_priv, power_domain);
714 return status; 719 return status;
@@ -799,7 +804,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
799 .destroy = intel_encoder_destroy, 804 .destroy = intel_encoder_destroy,
800}; 805};
801 806
802static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id) 807static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
803{ 808{
804 DRM_INFO("Skipping CRT initialization for %s\n", id->ident); 809 DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
805 return 1; 810 return 1;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 018fb7222f60..d8324c69fa86 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2233,6 +2233,15 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
2233 if (need_vtd_wa(dev) && alignment < 256 * 1024) 2233 if (need_vtd_wa(dev) && alignment < 256 * 1024)
2234 alignment = 256 * 1024; 2234 alignment = 256 * 1024;
2235 2235
2236 /*
2237 * Global gtt pte registers are special registers which actually forward
2238 * writes to a chunk of system memory. Which means that there is no risk
2239 * that the register values disappear as soon as we call
2240 * intel_runtime_pm_put(), so it is correct to wrap only the
2241 * pin/unpin/fence and not more.
2242 */
2243 intel_runtime_pm_get(dev_priv);
2244
2236 dev_priv->mm.interruptible = false; 2245 dev_priv->mm.interruptible = false;
2237 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); 2246 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2238 if (ret) 2247 if (ret)
@@ -2250,12 +2259,14 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
2250 i915_gem_object_pin_fence(obj); 2259 i915_gem_object_pin_fence(obj);
2251 2260
2252 dev_priv->mm.interruptible = true; 2261 dev_priv->mm.interruptible = true;
2262 intel_runtime_pm_put(dev_priv);
2253 return 0; 2263 return 0;
2254 2264
2255err_unpin: 2265err_unpin:
2256 i915_gem_object_unpin_from_display_plane(obj); 2266 i915_gem_object_unpin_from_display_plane(obj);
2257err_interruptible: 2267err_interruptible:
2258 dev_priv->mm.interruptible = true; 2268 dev_priv->mm.interruptible = true;
2269 intel_runtime_pm_put(dev_priv);
2259 return ret; 2270 return ret;
2260} 2271}
2261 2272
@@ -4188,10 +4199,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
4188 intel_set_pch_fifo_underrun_reporting(dev, pipe, false); 4199 intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
4189 4200
4190 intel_disable_pipe(dev_priv, pipe); 4201 intel_disable_pipe(dev_priv, pipe);
4191
4192 if (intel_crtc->config.dp_encoder_is_mst)
4193 intel_ddi_set_vc_payload_alloc(crtc, false);
4194
4195 ironlake_pfit_disable(intel_crtc); 4202 ironlake_pfit_disable(intel_crtc);
4196 4203
4197 for_each_encoder_on_crtc(dev, crtc, encoder) 4204 for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4256,6 +4263,9 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
4256 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false); 4263 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
4257 intel_disable_pipe(dev_priv, pipe); 4264 intel_disable_pipe(dev_priv, pipe);
4258 4265
4266 if (intel_crtc->config.dp_encoder_is_mst)
4267 intel_ddi_set_vc_payload_alloc(crtc, false);
4268
4259 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 4269 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4260 4270
4261 ironlake_pfit_disable(intel_crtc); 4271 ironlake_pfit_disable(intel_crtc);
@@ -8240,6 +8250,15 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8240 goto fail_locked; 8250 goto fail_locked;
8241 } 8251 }
8242 8252
8253 /*
8254 * Global gtt pte registers are special registers which actually
8255 * forward writes to a chunk of system memory. Which means that
8256 * there is no risk that the register values disappear as soon
8257 * as we call intel_runtime_pm_put(), so it is correct to wrap
8258 * only the pin/unpin/fence and not more.
8259 */
8260 intel_runtime_pm_get(dev_priv);
8261
8243 /* Note that the w/a also requires 2 PTE of padding following 8262 /* Note that the w/a also requires 2 PTE of padding following
8244 * the bo. We currently fill all unused PTE with the shadow 8263 * the bo. We currently fill all unused PTE with the shadow
8245 * page and so we should always have valid PTE following the 8264 * page and so we should always have valid PTE following the
@@ -8252,16 +8271,20 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8252 ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL); 8271 ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
8253 if (ret) { 8272 if (ret) {
8254 DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n"); 8273 DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
8274 intel_runtime_pm_put(dev_priv);
8255 goto fail_locked; 8275 goto fail_locked;
8256 } 8276 }
8257 8277
8258 ret = i915_gem_object_put_fence(obj); 8278 ret = i915_gem_object_put_fence(obj);
8259 if (ret) { 8279 if (ret) {
8260 DRM_DEBUG_KMS("failed to release fence for cursor"); 8280 DRM_DEBUG_KMS("failed to release fence for cursor");
8281 intel_runtime_pm_put(dev_priv);
8261 goto fail_unpin; 8282 goto fail_unpin;
8262 } 8283 }
8263 8284
8264 addr = i915_gem_obj_ggtt_offset(obj); 8285 addr = i915_gem_obj_ggtt_offset(obj);
8286
8287 intel_runtime_pm_put(dev_priv);
8265 } else { 8288 } else {
8266 int align = IS_I830(dev) ? 16 * 1024 : 256; 8289 int align = IS_I830(dev) ? 16 * 1024 : 256;
8267 ret = i915_gem_object_attach_phys(obj, align); 8290 ret = i915_gem_object_attach_phys(obj, align);
@@ -8462,8 +8485,6 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
8462 connector->base.id, connector->name, 8485 connector->base.id, connector->name,
8463 encoder->base.id, encoder->name); 8486 encoder->base.id, encoder->name);
8464 8487
8465 drm_modeset_acquire_init(ctx, 0);
8466
8467retry: 8488retry:
8468 ret = drm_modeset_lock(&config->connection_mutex, ctx); 8489 ret = drm_modeset_lock(&config->connection_mutex, ctx);
8469 if (ret) 8490 if (ret)
@@ -8502,10 +8523,14 @@ retry:
8502 i++; 8523 i++;
8503 if (!(encoder->possible_crtcs & (1 << i))) 8524 if (!(encoder->possible_crtcs & (1 << i)))
8504 continue; 8525 continue;
8505 if (!possible_crtc->enabled) { 8526 if (possible_crtc->enabled)
8506 crtc = possible_crtc; 8527 continue;
8507 break; 8528 /* This can occur when applying the pipe A quirk on resume. */
8508 } 8529 if (to_intel_crtc(possible_crtc)->new_enabled)
8530 continue;
8531
8532 crtc = possible_crtc;
8533 break;
8509 } 8534 }
8510 8535
8511 /* 8536 /*
@@ -8574,15 +8599,11 @@ fail_unlock:
8574 goto retry; 8599 goto retry;
8575 } 8600 }
8576 8601
8577 drm_modeset_drop_locks(ctx);
8578 drm_modeset_acquire_fini(ctx);
8579
8580 return false; 8602 return false;
8581} 8603}
8582 8604
8583void intel_release_load_detect_pipe(struct drm_connector *connector, 8605void intel_release_load_detect_pipe(struct drm_connector *connector,
8584 struct intel_load_detect_pipe *old, 8606 struct intel_load_detect_pipe *old)
8585 struct drm_modeset_acquire_ctx *ctx)
8586{ 8607{
8587 struct intel_encoder *intel_encoder = 8608 struct intel_encoder *intel_encoder =
8588 intel_attached_encoder(connector); 8609 intel_attached_encoder(connector);
@@ -8606,17 +8627,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
8606 drm_framebuffer_unreference(old->release_fb); 8627 drm_framebuffer_unreference(old->release_fb);
8607 } 8628 }
8608 8629
8609 goto unlock;
8610 return; 8630 return;
8611 } 8631 }
8612 8632
8613 /* Switch crtc and encoder back off if necessary */ 8633 /* Switch crtc and encoder back off if necessary */
8614 if (old->dpms_mode != DRM_MODE_DPMS_ON) 8634 if (old->dpms_mode != DRM_MODE_DPMS_ON)
8615 connector->funcs->dpms(connector, old->dpms_mode); 8635 connector->funcs->dpms(connector, old->dpms_mode);
8616
8617unlock:
8618 drm_modeset_drop_locks(ctx);
8619 drm_modeset_acquire_fini(ctx);
8620} 8636}
8621 8637
8622static int i9xx_pll_refclk(struct drm_device *dev, 8638static int i9xx_pll_refclk(struct drm_device *dev,
@@ -11700,8 +11716,8 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
11700 }; 11716 };
11701 const struct drm_rect clip = { 11717 const struct drm_rect clip = {
11702 /* integer pixels */ 11718 /* integer pixels */
11703 .x2 = intel_crtc->config.pipe_src_w, 11719 .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
11704 .y2 = intel_crtc->config.pipe_src_h, 11720 .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
11705 }; 11721 };
11706 bool visible; 11722 bool visible;
11707 int ret; 11723 int ret;
@@ -12488,6 +12504,9 @@ static struct intel_quirk intel_quirks[] = {
12488 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ 12504 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
12489 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, 12505 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
12490 12506
12507 /* Acer C720 Chromebook (Core i3 4005U) */
12508 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
12509
12491 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 12510 /* Toshiba CB35 Chromebook (Celeron 2955U) */
12492 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 12511 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
12493 12512
@@ -12659,7 +12678,7 @@ static void intel_enable_pipe_a(struct drm_device *dev)
12659 struct intel_connector *connector; 12678 struct intel_connector *connector;
12660 struct drm_connector *crt = NULL; 12679 struct drm_connector *crt = NULL;
12661 struct intel_load_detect_pipe load_detect_temp; 12680 struct intel_load_detect_pipe load_detect_temp;
12662 struct drm_modeset_acquire_ctx ctx; 12681 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
12663 12682
12664 /* We can't just switch on the pipe A, we need to set things up with a 12683 /* We can't just switch on the pipe A, we need to set things up with a
12665 * proper mode and output configuration. As a gross hack, enable pipe A 12684 * proper mode and output configuration. As a gross hack, enable pipe A
@@ -12676,10 +12695,8 @@ static void intel_enable_pipe_a(struct drm_device *dev)
12676 if (!crt) 12695 if (!crt)
12677 return; 12696 return;
12678 12697
12679 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, &ctx)) 12698 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
12680 intel_release_load_detect_pipe(crt, &load_detect_temp, &ctx); 12699 intel_release_load_detect_pipe(crt, &load_detect_temp);
12681
12682
12683} 12700}
12684 12701
12685static bool 12702static bool
@@ -13112,7 +13129,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
13112 * experience fancy races otherwise. 13129 * experience fancy races otherwise.
13113 */ 13130 */
13114 drm_irq_uninstall(dev); 13131 drm_irq_uninstall(dev);
13115 cancel_work_sync(&dev_priv->hotplug_work); 13132 intel_hpd_cancel_work(dev_priv);
13116 dev_priv->pm._irqs_disabled = true; 13133 dev_priv->pm._irqs_disabled = true;
13117 13134
13118 /* 13135 /*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index ee3942f0b068..fdff1d420c14 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1631,6 +1631,10 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1631 1631
1632 pipe_config->adjusted_mode.flags |= flags; 1632 pipe_config->adjusted_mode.flags |= flags;
1633 1633
1634 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1635 tmp & DP_COLOR_RANGE_16_235)
1636 pipe_config->limited_color_range = true;
1637
1634 pipe_config->has_dp_encoder = true; 1638 pipe_config->has_dp_encoder = true;
1635 1639
1636 intel_dp_get_m_n(crtc, pipe_config); 1640 intel_dp_get_m_n(crtc, pipe_config);
@@ -3553,6 +3557,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
3553 if (WARN_ON(!intel_encoder->base.crtc)) 3557 if (WARN_ON(!intel_encoder->base.crtc))
3554 return; 3558 return;
3555 3559
3560 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3561 return;
3562
3556 /* Try to read receiver status if the link appears to be up */ 3563 /* Try to read receiver status if the link appears to be up */
3557 if (!intel_dp_get_link_status(intel_dp, link_status)) { 3564 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3558 return; 3565 return;
@@ -3658,24 +3665,12 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
3658 return intel_dp_detect_dpcd(intel_dp); 3665 return intel_dp_detect_dpcd(intel_dp);
3659} 3666}
3660 3667
3661static enum drm_connector_status 3668static int g4x_digital_port_connected(struct drm_device *dev,
3662g4x_dp_detect(struct intel_dp *intel_dp) 3669 struct intel_digital_port *intel_dig_port)
3663{ 3670{
3664 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3665 struct drm_i915_private *dev_priv = dev->dev_private; 3671 struct drm_i915_private *dev_priv = dev->dev_private;
3666 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3667 uint32_t bit; 3672 uint32_t bit;
3668 3673
3669 /* Can't disconnect eDP, but you can close the lid... */
3670 if (is_edp(intel_dp)) {
3671 enum drm_connector_status status;
3672
3673 status = intel_panel_detect(dev);
3674 if (status == connector_status_unknown)
3675 status = connector_status_connected;
3676 return status;
3677 }
3678
3679 if (IS_VALLEYVIEW(dev)) { 3674 if (IS_VALLEYVIEW(dev)) {
3680 switch (intel_dig_port->port) { 3675 switch (intel_dig_port->port) {
3681 case PORT_B: 3676 case PORT_B:
@@ -3688,7 +3683,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
3688 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; 3683 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
3689 break; 3684 break;
3690 default: 3685 default:
3691 return connector_status_unknown; 3686 return -EINVAL;
3692 } 3687 }
3693 } else { 3688 } else {
3694 switch (intel_dig_port->port) { 3689 switch (intel_dig_port->port) {
@@ -3702,11 +3697,36 @@ g4x_dp_detect(struct intel_dp *intel_dp)
3702 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; 3697 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
3703 break; 3698 break;
3704 default: 3699 default:
3705 return connector_status_unknown; 3700 return -EINVAL;
3706 } 3701 }
3707 } 3702 }
3708 3703
3709 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) 3704 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
3705 return 0;
3706 return 1;
3707}
3708
3709static enum drm_connector_status
3710g4x_dp_detect(struct intel_dp *intel_dp)
3711{
3712 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3713 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3714 int ret;
3715
3716 /* Can't disconnect eDP, but you can close the lid... */
3717 if (is_edp(intel_dp)) {
3718 enum drm_connector_status status;
3719
3720 status = intel_panel_detect(dev);
3721 if (status == connector_status_unknown)
3722 status = connector_status_connected;
3723 return status;
3724 }
3725
3726 ret = g4x_digital_port_connected(dev, intel_dig_port);
3727 if (ret == -EINVAL)
3728 return connector_status_unknown;
3729 else if (ret == 0)
3710 return connector_status_disconnected; 3730 return connector_status_disconnected;
3711 3731
3712 return intel_dp_detect_dpcd(intel_dp); 3732 return intel_dp_detect_dpcd(intel_dp);
@@ -4003,6 +4023,16 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4003 kfree(intel_dig_port); 4023 kfree(intel_dig_port);
4004} 4024}
4005 4025
4026static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4027{
4028 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4029
4030 if (!is_edp(intel_dp))
4031 return;
4032
4033 edp_panel_vdd_off_sync(intel_dp);
4034}
4035
4006static void intel_dp_encoder_reset(struct drm_encoder *encoder) 4036static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4007{ 4037{
4008 intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder)); 4038 intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
@@ -4037,18 +4067,30 @@ bool
4037intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) 4067intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4038{ 4068{
4039 struct intel_dp *intel_dp = &intel_dig_port->dp; 4069 struct intel_dp *intel_dp = &intel_dig_port->dp;
4070 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4040 struct drm_device *dev = intel_dig_port->base.base.dev; 4071 struct drm_device *dev = intel_dig_port->base.base.dev;
4041 struct drm_i915_private *dev_priv = dev->dev_private; 4072 struct drm_i915_private *dev_priv = dev->dev_private;
4042 int ret; 4073 enum intel_display_power_domain power_domain;
4074 bool ret = true;
4075
4043 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) 4076 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4044 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; 4077 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4045 4078
4046 DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port, 4079 DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
4047 long_hpd ? "long" : "short"); 4080 long_hpd ? "long" : "short");
4048 4081
4082 power_domain = intel_display_port_power_domain(intel_encoder);
4083 intel_display_power_get(dev_priv, power_domain);
4084
4049 if (long_hpd) { 4085 if (long_hpd) {
4050 if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) 4086
4051 goto mst_fail; 4087 if (HAS_PCH_SPLIT(dev)) {
4088 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4089 goto mst_fail;
4090 } else {
4091 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4092 goto mst_fail;
4093 }
4052 4094
4053 if (!intel_dp_get_dpcd(intel_dp)) { 4095 if (!intel_dp_get_dpcd(intel_dp)) {
4054 goto mst_fail; 4096 goto mst_fail;
@@ -4061,8 +4103,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4061 4103
4062 } else { 4104 } else {
4063 if (intel_dp->is_mst) { 4105 if (intel_dp->is_mst) {
4064 ret = intel_dp_check_mst_status(intel_dp); 4106 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4065 if (ret == -EINVAL)
4066 goto mst_fail; 4107 goto mst_fail;
4067 } 4108 }
4068 4109
@@ -4076,7 +4117,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4076 drm_modeset_unlock(&dev->mode_config.connection_mutex); 4117 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4077 } 4118 }
4078 } 4119 }
4079 return false; 4120 ret = false;
4121 goto put_power;
4080mst_fail: 4122mst_fail:
4081 /* if we were in MST mode, and device is not there get out of MST mode */ 4123 /* if we were in MST mode, and device is not there get out of MST mode */
4082 if (intel_dp->is_mst) { 4124 if (intel_dp->is_mst) {
@@ -4084,7 +4126,10 @@ mst_fail:
4084 intel_dp->is_mst = false; 4126 intel_dp->is_mst = false;
4085 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); 4127 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4086 } 4128 }
4087 return true; 4129put_power:
4130 intel_display_power_put(dev_priv, power_domain);
4131
4132 return ret;
4088} 4133}
4089 4134
4090/* Return which DP Port should be selected for Transcoder DP control */ 4135/* Return which DP Port should be selected for Transcoder DP control */
@@ -4722,6 +4767,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
4722 intel_encoder->disable = intel_disable_dp; 4767 intel_encoder->disable = intel_disable_dp;
4723 intel_encoder->get_hw_state = intel_dp_get_hw_state; 4768 intel_encoder->get_hw_state = intel_dp_get_hw_state;
4724 intel_encoder->get_config = intel_dp_get_config; 4769 intel_encoder->get_config = intel_dp_get_config;
4770 intel_encoder->suspend = intel_dp_encoder_suspend;
4725 if (IS_CHERRYVIEW(dev)) { 4771 if (IS_CHERRYVIEW(dev)) {
4726 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; 4772 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
4727 intel_encoder->pre_enable = chv_pre_enable_dp; 4773 intel_encoder->pre_enable = chv_pre_enable_dp;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 4b2664bd5b81..b8c8bbd8e5f9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -153,6 +153,12 @@ struct intel_encoder {
153 * be set correctly before calling this function. */ 153 * be set correctly before calling this function. */
154 void (*get_config)(struct intel_encoder *, 154 void (*get_config)(struct intel_encoder *,
155 struct intel_crtc_config *pipe_config); 155 struct intel_crtc_config *pipe_config);
156 /*
157 * Called during system suspend after all pending requests for the
158 * encoder are flushed (for example for DP AUX transactions) and
159 * device interrupts are disabled.
160 */
161 void (*suspend)(struct intel_encoder *);
156 int crtc_mask; 162 int crtc_mask;
157 enum hpd_pin hpd_pin; 163 enum hpd_pin hpd_pin;
158}; 164};
@@ -830,8 +836,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
830 struct intel_load_detect_pipe *old, 836 struct intel_load_detect_pipe *old,
831 struct drm_modeset_acquire_ctx *ctx); 837 struct drm_modeset_acquire_ctx *ctx);
832void intel_release_load_detect_pipe(struct drm_connector *connector, 838void intel_release_load_detect_pipe(struct drm_connector *connector,
833 struct intel_load_detect_pipe *old, 839 struct intel_load_detect_pipe *old);
834 struct drm_modeset_acquire_ctx *ctx);
835int intel_pin_and_fence_fb_obj(struct drm_device *dev, 840int intel_pin_and_fence_fb_obj(struct drm_device *dev,
836 struct drm_i915_gem_object *obj, 841 struct drm_i915_gem_object *obj,
837 struct intel_engine_cs *pipelined); 842 struct intel_engine_cs *pipelined);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f9151f6641d9..5a9de21637b7 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -712,7 +712,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
712 struct intel_crtc_config *pipe_config) 712 struct intel_crtc_config *pipe_config)
713{ 713{
714 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 714 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
715 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 715 struct drm_device *dev = encoder->base.dev;
716 struct drm_i915_private *dev_priv = dev->dev_private;
716 u32 tmp, flags = 0; 717 u32 tmp, flags = 0;
717 int dotclock; 718 int dotclock;
718 719
@@ -731,9 +732,13 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
731 if (tmp & HDMI_MODE_SELECT_HDMI) 732 if (tmp & HDMI_MODE_SELECT_HDMI)
732 pipe_config->has_hdmi_sink = true; 733 pipe_config->has_hdmi_sink = true;
733 734
734 if (tmp & HDMI_MODE_SELECT_HDMI) 735 if (tmp & SDVO_AUDIO_ENABLE)
735 pipe_config->has_audio = true; 736 pipe_config->has_audio = true;
736 737
738 if (!HAS_PCH_SPLIT(dev) &&
739 tmp & HDMI_COLOR_RANGE_16_235)
740 pipe_config->limited_color_range = true;
741
737 pipe_config->adjusted_mode.flags |= flags; 742 pipe_config->adjusted_mode.flags |= flags;
738 743
739 if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc) 744 if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 881361c0f27e..fdf40267249c 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -538,7 +538,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
538 .destroy = intel_encoder_destroy, 538 .destroy = intel_encoder_destroy,
539}; 539};
540 540
541static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) 541static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
542{ 542{
543 DRM_INFO("Skipping LVDS initialization for %s\n", id->ident); 543 DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
544 return 1; 544 return 1;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 59b028f0b1e8..8e374449c6b5 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -801,7 +801,7 @@ static void pch_enable_backlight(struct intel_connector *connector)
801 801
802 cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2); 802 cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
803 if (cpu_ctl2 & BLM_PWM_ENABLE) { 803 if (cpu_ctl2 & BLM_PWM_ENABLE) {
804 WARN(1, "cpu backlight already enabled\n"); 804 DRM_DEBUG_KMS("cpu backlight already enabled\n");
805 cpu_ctl2 &= ~BLM_PWM_ENABLE; 805 cpu_ctl2 &= ~BLM_PWM_ENABLE;
806 I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2); 806 I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
807 } 807 }
@@ -845,7 +845,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
845 845
846 ctl = I915_READ(BLC_PWM_CTL); 846 ctl = I915_READ(BLC_PWM_CTL);
847 if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) { 847 if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
848 WARN(1, "backlight already enabled\n"); 848 DRM_DEBUG_KMS("backlight already enabled\n");
849 I915_WRITE(BLC_PWM_CTL, 0); 849 I915_WRITE(BLC_PWM_CTL, 0);
850 } 850 }
851 851
@@ -876,7 +876,7 @@ static void i965_enable_backlight(struct intel_connector *connector)
876 876
877 ctl2 = I915_READ(BLC_PWM_CTL2); 877 ctl2 = I915_READ(BLC_PWM_CTL2);
878 if (ctl2 & BLM_PWM_ENABLE) { 878 if (ctl2 & BLM_PWM_ENABLE) {
879 WARN(1, "backlight already enabled\n"); 879 DRM_DEBUG_KMS("backlight already enabled\n");
880 ctl2 &= ~BLM_PWM_ENABLE; 880 ctl2 &= ~BLM_PWM_ENABLE;
881 I915_WRITE(BLC_PWM_CTL2, ctl2); 881 I915_WRITE(BLC_PWM_CTL2, ctl2);
882 } 882 }
@@ -910,7 +910,7 @@ static void vlv_enable_backlight(struct intel_connector *connector)
910 910
911 ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe)); 911 ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
912 if (ctl2 & BLM_PWM_ENABLE) { 912 if (ctl2 & BLM_PWM_ENABLE) {
913 WARN(1, "backlight already enabled\n"); 913 DRM_DEBUG_KMS("backlight already enabled\n");
914 ctl2 &= ~BLM_PWM_ENABLE; 914 ctl2 &= ~BLM_PWM_ENABLE;
915 I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2); 915 I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
916 } 916 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 16371a444426..47a126a0493f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1363,54 +1363,66 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
1363 1363
1364/* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 1364/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1365#define I830_BATCH_LIMIT (256*1024) 1365#define I830_BATCH_LIMIT (256*1024)
1366#define I830_TLB_ENTRIES (2)
1367#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1366static int 1368static int
1367i830_dispatch_execbuffer(struct intel_engine_cs *ring, 1369i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1368 u64 offset, u32 len, 1370 u64 offset, u32 len,
1369 unsigned flags) 1371 unsigned flags)
1370{ 1372{
1373 u32 cs_offset = ring->scratch.gtt_offset;
1371 int ret; 1374 int ret;
1372 1375
1373 if (flags & I915_DISPATCH_PINNED) { 1376 ret = intel_ring_begin(ring, 6);
1374 ret = intel_ring_begin(ring, 4); 1377 if (ret)
1375 if (ret) 1378 return ret;
1376 return ret;
1377 1379
1378 intel_ring_emit(ring, MI_BATCH_BUFFER); 1380 /* Evict the invalid PTE TLBs */
1379 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1381 intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
1380 intel_ring_emit(ring, offset + len - 8); 1382 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
1381 intel_ring_emit(ring, MI_NOOP); 1383 intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
1382 intel_ring_advance(ring); 1384 intel_ring_emit(ring, cs_offset);
1383 } else { 1385 intel_ring_emit(ring, 0xdeadbeef);
1384 u32 cs_offset = ring->scratch.gtt_offset; 1386 intel_ring_emit(ring, MI_NOOP);
1387 intel_ring_advance(ring);
1385 1388
1389 if ((flags & I915_DISPATCH_PINNED) == 0) {
1386 if (len > I830_BATCH_LIMIT) 1390 if (len > I830_BATCH_LIMIT)
1387 return -ENOSPC; 1391 return -ENOSPC;
1388 1392
1389 ret = intel_ring_begin(ring, 9+3); 1393 ret = intel_ring_begin(ring, 6 + 2);
1390 if (ret) 1394 if (ret)
1391 return ret; 1395 return ret;
1392 /* Blit the batch (which has now all relocs applied) to the stable batch 1396
1393 * scratch bo area (so that the CS never stumbles over its tlb 1397 /* Blit the batch (which has now all relocs applied) to the
1394 * invalidation bug) ... */ 1398 * stable batch scratch bo area (so that the CS never
1395 intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | 1399 * stumbles over its tlb invalidation bug) ...
1396 XY_SRC_COPY_BLT_WRITE_ALPHA | 1400 */
1397 XY_SRC_COPY_BLT_WRITE_RGB); 1401 intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
1398 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); 1402 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
1399 intel_ring_emit(ring, 0); 1403 intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
1400 intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
1401 intel_ring_emit(ring, cs_offset); 1404 intel_ring_emit(ring, cs_offset);
1402 intel_ring_emit(ring, 0);
1403 intel_ring_emit(ring, 4096); 1405 intel_ring_emit(ring, 4096);
1404 intel_ring_emit(ring, offset); 1406 intel_ring_emit(ring, offset);
1407
1405 intel_ring_emit(ring, MI_FLUSH); 1408 intel_ring_emit(ring, MI_FLUSH);
1409 intel_ring_emit(ring, MI_NOOP);
1410 intel_ring_advance(ring);
1406 1411
1407 /* ... and execute it. */ 1412 /* ... and execute it. */
1408 intel_ring_emit(ring, MI_BATCH_BUFFER); 1413 offset = cs_offset;
1409 intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1410 intel_ring_emit(ring, cs_offset + len - 8);
1411 intel_ring_advance(ring);
1412 } 1414 }
1413 1415
1416 ret = intel_ring_begin(ring, 4);
1417 if (ret)
1418 return ret;
1419
1420 intel_ring_emit(ring, MI_BATCH_BUFFER);
1421 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1422 intel_ring_emit(ring, offset + len - 8);
1423 intel_ring_emit(ring, MI_NOOP);
1424 intel_ring_advance(ring);
1425
1414 return 0; 1426 return 0;
1415} 1427}
1416 1428
@@ -2200,7 +2212,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2200 2212
2201 /* Workaround batchbuffer to combat CS tlb bug. */ 2213 /* Workaround batchbuffer to combat CS tlb bug. */
2202 if (HAS_BROKEN_CS_TLB(dev)) { 2214 if (HAS_BROKEN_CS_TLB(dev)) {
2203 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); 2215 obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
2204 if (obj == NULL) { 2216 if (obj == NULL) {
2205 DRM_ERROR("Failed to allocate batch bo\n"); 2217 DRM_ERROR("Failed to allocate batch bo\n");
2206 return -ENOMEM; 2218 return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index e211eef4b7e4..c14341ca3ef9 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -854,6 +854,10 @@ intel_enable_tv(struct intel_encoder *encoder)
854 struct drm_device *dev = encoder->base.dev; 854 struct drm_device *dev = encoder->base.dev;
855 struct drm_i915_private *dev_priv = dev->dev_private; 855 struct drm_i915_private *dev_priv = dev->dev_private;
856 856
857 /* Prevents vblank waits from timing out in intel_tv_detect_type() */
858 intel_wait_for_vblank(encoder->base.dev,
859 to_intel_crtc(encoder->base.crtc)->pipe);
860
857 I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); 861 I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
858} 862}
859 863
@@ -1311,6 +1315,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1311{ 1315{
1312 struct drm_display_mode mode; 1316 struct drm_display_mode mode;
1313 struct intel_tv *intel_tv = intel_attached_tv(connector); 1317 struct intel_tv *intel_tv = intel_attached_tv(connector);
1318 enum drm_connector_status status;
1314 int type; 1319 int type;
1315 1320
1316 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", 1321 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
@@ -1323,16 +1328,24 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1323 struct intel_load_detect_pipe tmp; 1328 struct intel_load_detect_pipe tmp;
1324 struct drm_modeset_acquire_ctx ctx; 1329 struct drm_modeset_acquire_ctx ctx;
1325 1330
1331 drm_modeset_acquire_init(&ctx, 0);
1332
1326 if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) { 1333 if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) {
1327 type = intel_tv_detect_type(intel_tv, connector); 1334 type = intel_tv_detect_type(intel_tv, connector);
1328 intel_release_load_detect_pipe(connector, &tmp, &ctx); 1335 intel_release_load_detect_pipe(connector, &tmp);
1336 status = type < 0 ?
1337 connector_status_disconnected :
1338 connector_status_connected;
1329 } else 1339 } else
1330 return connector_status_unknown; 1340 status = connector_status_unknown;
1341
1342 drm_modeset_drop_locks(&ctx);
1343 drm_modeset_acquire_fini(&ctx);
1331 } else 1344 } else
1332 return connector->status; 1345 return connector->status;
1333 1346
1334 if (type < 0) 1347 if (status != connector_status_connected)
1335 return connector_status_disconnected; 1348 return status;
1336 1349
1337 intel_tv->type = type; 1350 intel_tv->type = type;
1338 intel_tv_find_better_format(connector); 1351 intel_tv_find_better_format(connector);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index a125a7e32742..c6c9b02e0ada 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -258,28 +258,30 @@ static void set_hdmi_pdev(struct drm_device *dev,
258 priv->hdmi_pdev = pdev; 258 priv->hdmi_pdev = pdev;
259} 259}
260 260
261#ifdef CONFIG_OF
262static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
263{
264 int gpio = of_get_named_gpio(of_node, name, 0);
265 if (gpio < 0) {
266 char name2[32];
267 snprintf(name2, sizeof(name2), "%s-gpio", name);
268 gpio = of_get_named_gpio(of_node, name2, 0);
269 if (gpio < 0) {
270 dev_err(dev, "failed to get gpio: %s (%d)\n",
271 name, gpio);
272 gpio = -1;
273 }
274 }
275 return gpio;
276}
277#endif
278
261static int hdmi_bind(struct device *dev, struct device *master, void *data) 279static int hdmi_bind(struct device *dev, struct device *master, void *data)
262{ 280{
263 static struct hdmi_platform_config config = {}; 281 static struct hdmi_platform_config config = {};
264#ifdef CONFIG_OF 282#ifdef CONFIG_OF
265 struct device_node *of_node = dev->of_node; 283 struct device_node *of_node = dev->of_node;
266 284
267 int get_gpio(const char *name)
268 {
269 int gpio = of_get_named_gpio(of_node, name, 0);
270 if (gpio < 0) {
271 char name2[32];
272 snprintf(name2, sizeof(name2), "%s-gpio", name);
273 gpio = of_get_named_gpio(of_node, name2, 0);
274 if (gpio < 0) {
275 dev_err(dev, "failed to get gpio: %s (%d)\n",
276 name, gpio);
277 gpio = -1;
278 }
279 }
280 return gpio;
281 }
282
283 if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) { 285 if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) {
284 static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"}; 286 static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
285 static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"}; 287 static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
@@ -312,12 +314,12 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
312 } 314 }
313 315
314 config.mmio_name = "core_physical"; 316 config.mmio_name = "core_physical";
315 config.ddc_clk_gpio = get_gpio("qcom,hdmi-tx-ddc-clk"); 317 config.ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk");
316 config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data"); 318 config.ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data");
317 config.hpd_gpio = get_gpio("qcom,hdmi-tx-hpd"); 319 config.hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd");
318 config.mux_en_gpio = get_gpio("qcom,hdmi-tx-mux-en"); 320 config.mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en");
319 config.mux_sel_gpio = get_gpio("qcom,hdmi-tx-mux-sel"); 321 config.mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
320 config.mux_lpm_gpio = get_gpio("qcom,hdmi-tx-mux-lpm"); 322 config.mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
321 323
322#else 324#else
323 static const char *hpd_clk_names[] = { 325 static const char *hpd_clk_names[] = {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index 902d7685d441..f408b69486a8 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -15,19 +15,25 @@
15 * this program. If not, see <http://www.gnu.org/licenses/>. 15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#ifdef CONFIG_COMMON_CLK
18#include <linux/clk.h> 19#include <linux/clk.h>
19#include <linux/clk-provider.h> 20#include <linux/clk-provider.h>
21#endif
20 22
21#include "hdmi.h" 23#include "hdmi.h"
22 24
23struct hdmi_phy_8960 { 25struct hdmi_phy_8960 {
24 struct hdmi_phy base; 26 struct hdmi_phy base;
25 struct hdmi *hdmi; 27 struct hdmi *hdmi;
28#ifdef CONFIG_COMMON_CLK
26 struct clk_hw pll_hw; 29 struct clk_hw pll_hw;
27 struct clk *pll; 30 struct clk *pll;
28 unsigned long pixclk; 31 unsigned long pixclk;
32#endif
29}; 33};
30#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base) 34#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
35
36#ifdef CONFIG_COMMON_CLK
31#define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw) 37#define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw)
32 38
33/* 39/*
@@ -374,7 +380,7 @@ static struct clk_init_data pll_init = {
374 .parent_names = hdmi_pll_parents, 380 .parent_names = hdmi_pll_parents,
375 .num_parents = ARRAY_SIZE(hdmi_pll_parents), 381 .num_parents = ARRAY_SIZE(hdmi_pll_parents),
376}; 382};
377 383#endif
378 384
379/* 385/*
380 * HDMI Phy: 386 * HDMI Phy:
@@ -480,12 +486,15 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
480{ 486{
481 struct hdmi_phy_8960 *phy_8960; 487 struct hdmi_phy_8960 *phy_8960;
482 struct hdmi_phy *phy = NULL; 488 struct hdmi_phy *phy = NULL;
483 int ret, i; 489 int ret;
490#ifdef CONFIG_COMMON_CLK
491 int i;
484 492
485 /* sanity check: */ 493 /* sanity check: */
486 for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++) 494 for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++)
487 if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate)) 495 if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate))
488 return ERR_PTR(-EINVAL); 496 return ERR_PTR(-EINVAL);
497#endif
489 498
490 phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL); 499 phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
491 if (!phy_8960) { 500 if (!phy_8960) {
@@ -499,6 +508,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
499 508
500 phy_8960->hdmi = hdmi; 509 phy_8960->hdmi = hdmi;
501 510
511#ifdef CONFIG_COMMON_CLK
502 phy_8960->pll_hw.init = &pll_init; 512 phy_8960->pll_hw.init = &pll_init;
503 phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw); 513 phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw);
504 if (IS_ERR(phy_8960->pll)) { 514 if (IS_ERR(phy_8960->pll)) {
@@ -506,6 +516,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
506 phy_8960->pll = NULL; 516 phy_8960->pll = NULL;
507 goto fail; 517 goto fail;
508 } 518 }
519#endif
509 520
510 return phy; 521 return phy;
511 522
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 74cebb51e8c2..c6c80ea28c35 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -397,6 +397,7 @@ static void mdp4_crtc_prepare(struct drm_crtc *crtc)
397 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 397 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
398 DBG("%s", mdp4_crtc->name); 398 DBG("%s", mdp4_crtc->name);
399 /* make sure we hold a ref to mdp clks while setting up mode: */ 399 /* make sure we hold a ref to mdp clks while setting up mode: */
400 drm_crtc_vblank_get(crtc);
400 mdp4_enable(get_kms(crtc)); 401 mdp4_enable(get_kms(crtc));
401 mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 402 mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
402} 403}
@@ -407,6 +408,7 @@ static void mdp4_crtc_commit(struct drm_crtc *crtc)
407 crtc_flush(crtc); 408 crtc_flush(crtc);
408 /* drop the ref to mdp clk's that we got in prepare: */ 409 /* drop the ref to mdp clk's that we got in prepare: */
409 mdp4_disable(get_kms(crtc)); 410 mdp4_disable(get_kms(crtc));
411 drm_crtc_vblank_put(crtc);
410} 412}
411 413
412static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 414static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b447c01ad89c..fcf95680413d 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -52,7 +52,7 @@ module_param(reglog, bool, 0600);
52#define reglog 0 52#define reglog 0
53#endif 53#endif
54 54
55static char *vram; 55static char *vram = "16m";
56MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU"); 56MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
57module_param(vram, charp, 0); 57module_param(vram, charp, 0);
58 58
@@ -974,12 +974,11 @@ static int msm_pdev_probe(struct platform_device *pdev)
974 974
975 for (i = 0; i < ARRAY_SIZE(devnames); i++) { 975 for (i = 0; i < ARRAY_SIZE(devnames); i++) {
976 struct device *dev; 976 struct device *dev;
977 int ret;
978 977
979 dev = bus_find_device_by_name(&platform_bus_type, 978 dev = bus_find_device_by_name(&platform_bus_type,
980 NULL, devnames[i]); 979 NULL, devnames[i]);
981 if (!dev) { 980 if (!dev) {
982 dev_info(master, "still waiting for %s\n", devnames[i]); 981 dev_info(&pdev->dev, "still waiting for %s\n", devnames[i]);
983 return -EPROBE_DEFER; 982 return -EPROBE_DEFER;
984 } 983 }
985 984
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 9c5221ce391a..ab5bfd2d0ebf 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -143,7 +143,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
143 ret = msm_gem_get_iova_locked(fbdev->bo, 0, &paddr); 143 ret = msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
144 if (ret) { 144 if (ret) {
145 dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret); 145 dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
146 goto fail; 146 goto fail_unlock;
147 } 147 }
148 148
149 fbi = framebuffer_alloc(0, dev->dev); 149 fbi = framebuffer_alloc(0, dev->dev);
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 099af483fdf0..7acdaa5688b7 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -27,8 +27,8 @@ struct msm_iommu {
27static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev, 27static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
28 unsigned long iova, int flags, void *arg) 28 unsigned long iova, int flags, void *arg)
29{ 29{
30 DBG("*** fault: iova=%08lx, flags=%d", iova, flags); 30 pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
31 return -ENOSYS; 31 return 0;
32} 32}
33 33
34static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt) 34static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
diff --git a/drivers/gpu/drm/nouveau/core/core/parent.c b/drivers/gpu/drm/nouveau/core/core/parent.c
index 8701968a9743..30a2911878f8 100644
--- a/drivers/gpu/drm/nouveau/core/core/parent.c
+++ b/drivers/gpu/drm/nouveau/core/core/parent.c
@@ -86,7 +86,7 @@ nouveau_parent_lclass(struct nouveau_object *parent, u32 *lclass, int size)
86 sclass = nv_parent(parent)->sclass; 86 sclass = nv_parent(parent)->sclass;
87 while (sclass) { 87 while (sclass) {
88 if (++nr < size) 88 if (++nr < size)
89 lclass[nr] = sclass->oclass->handle; 89 lclass[nr] = sclass->oclass->handle & 0xffff;
90 sclass = sclass->sclass; 90 sclass = sclass->sclass;
91 } 91 }
92 92
@@ -96,7 +96,7 @@ nouveau_parent_lclass(struct nouveau_object *parent, u32 *lclass, int size)
96 if (engine && (oclass = engine->sclass)) { 96 if (engine && (oclass = engine->sclass)) {
97 while (oclass->ofuncs) { 97 while (oclass->ofuncs) {
98 if (++nr < size) 98 if (++nr < size)
99 lclass[nr] = oclass->handle; 99 lclass[nr] = oclass->handle & 0xffff;
100 oclass++; 100 oclass++;
101 } 101 }
102 } 102 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
index 0a44459844e3..05a278bab247 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
@@ -200,7 +200,6 @@ nvc0_bar_init(struct nouveau_object *object)
200 200
201 nv_mask(priv, 0x000200, 0x00000100, 0x00000000); 201 nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
202 nv_mask(priv, 0x000200, 0x00000100, 0x00000100); 202 nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
203 nv_mask(priv, 0x100c80, 0x00000001, 0x00000000);
204 203
205 nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12); 204 nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12);
206 if (priv->bar[0].mem) 205 if (priv->bar[0].mem)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index b19a2b3c1081..32f28dc73ef2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -60,6 +60,7 @@ nvc0_fb_init(struct nouveau_object *object)
60 60
61 if (priv->r100c10_page) 61 if (priv->r100c10_page)
62 nv_wr32(priv, 0x100c10, priv->r100c10 >> 8); 62 nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
63 nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
63 return 0; 64 return 0;
64} 65}
65 66
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
index b54b582e72c4..d5d65285efe5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
@@ -98,6 +98,7 @@ static int
98gf100_ltc_init(struct nouveau_object *object) 98gf100_ltc_init(struct nouveau_object *object)
99{ 99{
100 struct nvkm_ltc_priv *priv = (void *)object; 100 struct nvkm_ltc_priv *priv = (void *)object;
101 u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
101 int ret; 102 int ret;
102 103
103 ret = nvkm_ltc_init(priv); 104 ret = nvkm_ltc_init(priv);
@@ -107,6 +108,7 @@ gf100_ltc_init(struct nouveau_object *object)
107 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ 108 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
108 nv_wr32(priv, 0x17e8d8, priv->ltc_nr); 109 nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
109 nv_wr32(priv, 0x17e8d4, priv->tag_base); 110 nv_wr32(priv, 0x17e8d4, priv->tag_base);
111 nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
110 return 0; 112 return 0;
111} 113}
112 114
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c
index ea716569745d..b39b5d0eb8f9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c
@@ -28,6 +28,7 @@ static int
28gk104_ltc_init(struct nouveau_object *object) 28gk104_ltc_init(struct nouveau_object *object)
29{ 29{
30 struct nvkm_ltc_priv *priv = (void *)object; 30 struct nvkm_ltc_priv *priv = (void *)object;
31 u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
31 int ret; 32 int ret;
32 33
33 ret = nvkm_ltc_init(priv); 34 ret = nvkm_ltc_init(priv);
@@ -37,6 +38,7 @@ gk104_ltc_init(struct nouveau_object *object)
37 nv_wr32(priv, 0x17e8d8, priv->ltc_nr); 38 nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
38 nv_wr32(priv, 0x17e000, priv->ltc_nr); 39 nv_wr32(priv, 0x17e000, priv->ltc_nr);
39 nv_wr32(priv, 0x17e8d4, priv->tag_base); 40 nv_wr32(priv, 0x17e8d4, priv->tag_base);
41 nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
40 return 0; 42 return 0;
41} 43}
42 44
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
index 4761b2e9af00..a4de64289762 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
@@ -98,6 +98,7 @@ static int
98gm107_ltc_init(struct nouveau_object *object) 98gm107_ltc_init(struct nouveau_object *object)
99{ 99{
100 struct nvkm_ltc_priv *priv = (void *)object; 100 struct nvkm_ltc_priv *priv = (void *)object;
101 u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
101 int ret; 102 int ret;
102 103
103 ret = nvkm_ltc_init(priv); 104 ret = nvkm_ltc_init(priv);
@@ -106,6 +107,7 @@ gm107_ltc_init(struct nouveau_object *object)
106 107
107 nv_wr32(priv, 0x17e27c, priv->ltc_nr); 108 nv_wr32(priv, 0x17e27c, priv->ltc_nr);
108 nv_wr32(priv, 0x17e278, priv->tag_base); 109 nv_wr32(priv, 0x17e278, priv->tag_base);
110 nv_mask(priv, 0x17e264, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
109 return 0; 111 return 0;
110} 112}
111 113
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 279206997e5c..622424692b3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -46,7 +46,6 @@ static struct nouveau_dsm_priv {
46 bool dsm_detected; 46 bool dsm_detected;
47 bool optimus_detected; 47 bool optimus_detected;
48 acpi_handle dhandle; 48 acpi_handle dhandle;
49 acpi_handle other_handle;
50 acpi_handle rom_handle; 49 acpi_handle rom_handle;
51} nouveau_dsm_priv; 50} nouveau_dsm_priv;
52 51
@@ -222,10 +221,9 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
222 if (!dhandle) 221 if (!dhandle)
223 return false; 222 return false;
224 223
225 if (!acpi_has_method(dhandle, "_DSM")) { 224 if (!acpi_has_method(dhandle, "_DSM"))
226 nouveau_dsm_priv.other_handle = dhandle;
227 return false; 225 return false;
228 } 226
229 if (acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102, 227 if (acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102,
230 1 << NOUVEAU_DSM_POWER)) 228 1 << NOUVEAU_DSM_POWER))
231 retval |= NOUVEAU_DSM_HAS_MUX; 229 retval |= NOUVEAU_DSM_HAS_MUX;
@@ -301,16 +299,6 @@ static bool nouveau_dsm_detect(void)
301 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", 299 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
302 acpi_method_name); 300 acpi_method_name);
303 nouveau_dsm_priv.dsm_detected = true; 301 nouveau_dsm_priv.dsm_detected = true;
304 /*
305 * On some systems hotplug events are generated for the device
306 * being switched off when _DSM is executed. They cause ACPI
307 * hotplug to trigger and attempt to remove the device from
308 * the system, which causes it to break down. Prevent that from
309 * happening by setting the no_hotplug flag for the involved
310 * ACPI device objects.
311 */
312 acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle);
313 acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle);
314 ret = true; 302 ret = true;
315 } 303 }
316 304
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 250a5e88c751..9c3af96a7153 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -627,6 +627,7 @@ int nouveau_pmops_suspend(struct device *dev)
627 627
628 pci_save_state(pdev); 628 pci_save_state(pdev);
629 pci_disable_device(pdev); 629 pci_disable_device(pdev);
630 pci_ignore_hotplug(pdev);
630 pci_set_power_state(pdev, PCI_D3hot); 631 pci_set_power_state(pdev, PCI_D3hot);
631 return 0; 632 return 0;
632} 633}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 18d55d447248..c7592ec8ecb8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -108,7 +108,16 @@ void
108nouveau_vga_fini(struct nouveau_drm *drm) 108nouveau_vga_fini(struct nouveau_drm *drm)
109{ 109{
110 struct drm_device *dev = drm->dev; 110 struct drm_device *dev = drm->dev;
111 bool runtime = false;
112
113 if (nouveau_runtime_pm == 1)
114 runtime = true;
115 if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
116 runtime = true;
117
111 vga_switcheroo_unregister_client(dev->pdev); 118 vga_switcheroo_unregister_client(dev->pdev);
119 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
120 vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
112 vga_client_register(dev->pdev, NULL, NULL, NULL); 121 vga_client_register(dev->pdev, NULL, NULL, NULL);
113} 122}
114 123
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 0013ad0db9ef..f77b7135ee4c 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -76,7 +76,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o \ 76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o \
77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \ 77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
78 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \ 78 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
79 si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \ 79 si_blit_shaders.o radeon_prime.o cik.o cik_blit_shaders.o \
80 r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ 80 r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
81 rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ 81 rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
82 trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ 82 trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index b1e11f8434e2..ac14b67621d3 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -405,16 +405,13 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
405 u8 msg[DP_DPCD_SIZE]; 405 u8 msg[DP_DPCD_SIZE];
406 int ret; 406 int ret;
407 407
408 char dpcd_hex_dump[DP_DPCD_SIZE * 3];
409
410 ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, 408 ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
411 DP_DPCD_SIZE); 409 DP_DPCD_SIZE);
412 if (ret > 0) { 410 if (ret > 0) {
413 memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); 411 memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
414 412
415 hex_dump_to_buffer(dig_connector->dpcd, sizeof(dig_connector->dpcd), 413 DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
416 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); 414 dig_connector->dpcd);
417 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
418 415
419 radeon_dp_probe_oui(radeon_connector); 416 radeon_dp_probe_oui(radeon_connector);
420 417
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 022561e28707..d416bb2ff48d 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -869,6 +869,9 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
869 WREG32_SMC(CG_THERMAL_CTRL, tmp); 869 WREG32_SMC(CG_THERMAL_CTRL, tmp);
870#endif 870#endif
871 871
872 rdev->pm.dpm.thermal.min_temp = low_temp;
873 rdev->pm.dpm.thermal.max_temp = high_temp;
874
872 return 0; 875 return 0;
873} 876}
874 877
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index b625646bf3e2..3d546c606b43 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3483,7 +3483,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
3483 u32 mc_shared_chmap, mc_arb_ramcfg; 3483 u32 mc_shared_chmap, mc_arb_ramcfg;
3484 u32 hdp_host_path_cntl; 3484 u32 hdp_host_path_cntl;
3485 u32 tmp; 3485 u32 tmp;
3486 int i, j, k; 3486 int i, j;
3487 3487
3488 switch (rdev->family) { 3488 switch (rdev->family) {
3489 case CHIP_BONAIRE: 3489 case CHIP_BONAIRE:
@@ -3544,6 +3544,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
3544 (rdev->pdev->device == 0x130B) || 3544 (rdev->pdev->device == 0x130B) ||
3545 (rdev->pdev->device == 0x130E) || 3545 (rdev->pdev->device == 0x130E) ||
3546 (rdev->pdev->device == 0x1315) || 3546 (rdev->pdev->device == 0x1315) ||
3547 (rdev->pdev->device == 0x1318) ||
3547 (rdev->pdev->device == 0x131B)) { 3548 (rdev->pdev->device == 0x131B)) {
3548 rdev->config.cik.max_cu_per_sh = 4; 3549 rdev->config.cik.max_cu_per_sh = 4;
3549 rdev->config.cik.max_backends_per_se = 1; 3550 rdev->config.cik.max_backends_per_se = 1;
@@ -3672,12 +3673,11 @@ static void cik_gpu_init(struct radeon_device *rdev)
3672 rdev->config.cik.max_sh_per_se, 3673 rdev->config.cik.max_sh_per_se,
3673 rdev->config.cik.max_backends_per_se); 3674 rdev->config.cik.max_backends_per_se);
3674 3675
3676 rdev->config.cik.active_cus = 0;
3675 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) { 3677 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
3676 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) { 3678 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
3677 for (k = 0; k < rdev->config.cik.max_cu_per_sh; k++) { 3679 rdev->config.cik.active_cus +=
3678 rdev->config.cik.active_cus += 3680 hweight32(cik_get_cu_active_bitmap(rdev, i, j));
3679 hweight32(cik_get_cu_active_bitmap(rdev, i, j));
3680 }
3681 } 3681 }
3682 } 3682 }
3683 3683
@@ -3801,7 +3801,7 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
3801 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 3801 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
3802 radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2)); 3802 radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));
3803 radeon_ring_write(ring, 0xDEADBEEF); 3803 radeon_ring_write(ring, 0xDEADBEEF);
3804 radeon_ring_unlock_commit(rdev, ring); 3804 radeon_ring_unlock_commit(rdev, ring, false);
3805 3805
3806 for (i = 0; i < rdev->usec_timeout; i++) { 3806 for (i = 0; i < rdev->usec_timeout; i++) {
3807 tmp = RREG32(scratch); 3807 tmp = RREG32(scratch);
@@ -3920,6 +3920,17 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
3920 radeon_ring_write(ring, 0); 3920 radeon_ring_write(ring, 0);
3921} 3921}
3922 3922
3923/**
3924 * cik_semaphore_ring_emit - emit a semaphore on the CP ring
3925 *
3926 * @rdev: radeon_device pointer
3927 * @ring: radeon ring buffer object
3928 * @semaphore: radeon semaphore object
3929 * @emit_wait: Is this a sempahore wait?
3930 *
3931 * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
3932 * from running ahead of semaphore waits.
3933 */
3923bool cik_semaphore_ring_emit(struct radeon_device *rdev, 3934bool cik_semaphore_ring_emit(struct radeon_device *rdev,
3924 struct radeon_ring *ring, 3935 struct radeon_ring *ring,
3925 struct radeon_semaphore *semaphore, 3936 struct radeon_semaphore *semaphore,
@@ -3932,6 +3943,12 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
3932 radeon_ring_write(ring, lower_32_bits(addr)); 3943 radeon_ring_write(ring, lower_32_bits(addr));
3933 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); 3944 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
3934 3945
3946 if (emit_wait && ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
3947 /* Prevent the PFP from running ahead of the semaphore wait */
3948 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3949 radeon_ring_write(ring, 0x0);
3950 }
3951
3935 return true; 3952 return true;
3936} 3953}
3937 3954
@@ -4004,7 +4021,7 @@ int cik_copy_cpdma(struct radeon_device *rdev,
4004 return r; 4021 return r;
4005 } 4022 }
4006 4023
4007 radeon_ring_unlock_commit(rdev, ring); 4024 radeon_ring_unlock_commit(rdev, ring, false);
4008 radeon_semaphore_free(rdev, &sem, *fence); 4025 radeon_semaphore_free(rdev, &sem, *fence);
4009 4026
4010 return r; 4027 return r;
@@ -4103,7 +4120,7 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
4103 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2); 4120 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2);
4104 ib.ptr[2] = 0xDEADBEEF; 4121 ib.ptr[2] = 0xDEADBEEF;
4105 ib.length_dw = 3; 4122 ib.length_dw = 3;
4106 r = radeon_ib_schedule(rdev, &ib, NULL); 4123 r = radeon_ib_schedule(rdev, &ib, NULL, false);
4107 if (r) { 4124 if (r) {
4108 radeon_scratch_free(rdev, scratch); 4125 radeon_scratch_free(rdev, scratch);
4109 radeon_ib_free(rdev, &ib); 4126 radeon_ib_free(rdev, &ib);
@@ -4324,7 +4341,7 @@ static int cik_cp_gfx_start(struct radeon_device *rdev)
4324 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 4341 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
4325 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ 4342 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
4326 4343
4327 radeon_ring_unlock_commit(rdev, ring); 4344 radeon_ring_unlock_commit(rdev, ring, false);
4328 4345
4329 return 0; 4346 return 0;
4330} 4347}
@@ -4786,7 +4803,7 @@ struct bonaire_mqd
4786 */ 4803 */
4787static int cik_cp_compute_resume(struct radeon_device *rdev) 4804static int cik_cp_compute_resume(struct radeon_device *rdev)
4788{ 4805{
4789 int r, i, idx; 4806 int r, i, j, idx;
4790 u32 tmp; 4807 u32 tmp;
4791 bool use_doorbell = true; 4808 bool use_doorbell = true;
4792 u64 hqd_gpu_addr; 4809 u64 hqd_gpu_addr;
@@ -4905,7 +4922,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
4905 mqd->queue_state.cp_hqd_pq_wptr= 0; 4922 mqd->queue_state.cp_hqd_pq_wptr= 0;
4906 if (RREG32(CP_HQD_ACTIVE) & 1) { 4923 if (RREG32(CP_HQD_ACTIVE) & 1) {
4907 WREG32(CP_HQD_DEQUEUE_REQUEST, 1); 4924 WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
4908 for (i = 0; i < rdev->usec_timeout; i++) { 4925 for (j = 0; j < rdev->usec_timeout; j++) {
4909 if (!(RREG32(CP_HQD_ACTIVE) & 1)) 4926 if (!(RREG32(CP_HQD_ACTIVE) & 1))
4910 break; 4927 break;
4911 udelay(1); 4928 udelay(1);
@@ -5732,20 +5749,17 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
5732 WREG32(0x15D8, 0); 5749 WREG32(0x15D8, 0);
5733 WREG32(0x15DC, 0); 5750 WREG32(0x15DC, 0);
5734 5751
5735 /* empty context1-15 */ 5752 /* restore context1-15 */
5736 /* FIXME start with 4G, once using 2 level pt switch to full
5737 * vm size space
5738 */
5739 /* set vm size, must be a multiple of 4 */ 5753 /* set vm size, must be a multiple of 4 */
5740 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); 5754 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
5741 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); 5755 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
5742 for (i = 1; i < 16; i++) { 5756 for (i = 1; i < 16; i++) {
5743 if (i < 8) 5757 if (i < 8)
5744 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), 5758 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
5745 rdev->gart.table_addr >> 12); 5759 rdev->vm_manager.saved_table_addr[i]);
5746 else 5760 else
5747 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2), 5761 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
5748 rdev->gart.table_addr >> 12); 5762 rdev->vm_manager.saved_table_addr[i]);
5749 } 5763 }
5750 5764
5751 /* enable context1-15 */ 5765 /* enable context1-15 */
@@ -5810,6 +5824,17 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
5810 */ 5824 */
5811static void cik_pcie_gart_disable(struct radeon_device *rdev) 5825static void cik_pcie_gart_disable(struct radeon_device *rdev)
5812{ 5826{
5827 unsigned i;
5828
5829 for (i = 1; i < 16; ++i) {
5830 uint32_t reg;
5831 if (i < 8)
5832 reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2);
5833 else
5834 reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
5835 rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
5836 }
5837
5813 /* Disable all tables */ 5838 /* Disable all tables */
5814 WREG32(VM_CONTEXT0_CNTL, 0); 5839 WREG32(VM_CONTEXT0_CNTL, 0);
5815 WREG32(VM_CONTEXT1_CNTL, 0); 5840 WREG32(VM_CONTEXT1_CNTL, 0);
@@ -5958,14 +5983,14 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
5958 5983
5959 /* update SH_MEM_* regs */ 5984 /* update SH_MEM_* regs */
5960 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5985 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5961 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5986 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
5962 WRITE_DATA_DST_SEL(0))); 5987 WRITE_DATA_DST_SEL(0)));
5963 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); 5988 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
5964 radeon_ring_write(ring, 0); 5989 radeon_ring_write(ring, 0);
5965 radeon_ring_write(ring, VMID(vm->id)); 5990 radeon_ring_write(ring, VMID(vm->id));
5966 5991
5967 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6)); 5992 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
5968 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5993 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
5969 WRITE_DATA_DST_SEL(0))); 5994 WRITE_DATA_DST_SEL(0)));
5970 radeon_ring_write(ring, SH_MEM_BASES >> 2); 5995 radeon_ring_write(ring, SH_MEM_BASES >> 2);
5971 radeon_ring_write(ring, 0); 5996 radeon_ring_write(ring, 0);
@@ -5976,7 +6001,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
5976 radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */ 6001 radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */
5977 6002
5978 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 6003 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5979 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 6004 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
5980 WRITE_DATA_DST_SEL(0))); 6005 WRITE_DATA_DST_SEL(0)));
5981 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); 6006 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
5982 radeon_ring_write(ring, 0); 6007 radeon_ring_write(ring, 0);
@@ -5987,7 +6012,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
5987 6012
5988 /* bits 0-15 are the VM contexts0-15 */ 6013 /* bits 0-15 are the VM contexts0-15 */
5989 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 6014 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5990 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 6015 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
5991 WRITE_DATA_DST_SEL(0))); 6016 WRITE_DATA_DST_SEL(0)));
5992 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); 6017 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
5993 radeon_ring_write(ring, 0); 6018 radeon_ring_write(ring, 0);
@@ -7726,17 +7751,17 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
7726 wptr = RREG32(IH_RB_WPTR); 7751 wptr = RREG32(IH_RB_WPTR);
7727 7752
7728 if (wptr & RB_OVERFLOW) { 7753 if (wptr & RB_OVERFLOW) {
7754 wptr &= ~RB_OVERFLOW;
7729 /* When a ring buffer overflow happen start parsing interrupt 7755 /* When a ring buffer overflow happen start parsing interrupt
7730 * from the last not overwritten vector (wptr + 16). Hopefully 7756 * from the last not overwritten vector (wptr + 16). Hopefully
7731 * this should allow us to catchup. 7757 * this should allow us to catchup.
7732 */ 7758 */
7733 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", 7759 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
7734 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); 7760 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
7735 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; 7761 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
7736 tmp = RREG32(IH_RB_CNTL); 7762 tmp = RREG32(IH_RB_CNTL);
7737 tmp |= IH_WPTR_OVERFLOW_CLEAR; 7763 tmp |= IH_WPTR_OVERFLOW_CLEAR;
7738 WREG32(IH_RB_CNTL, tmp); 7764 WREG32(IH_RB_CNTL, tmp);
7739 wptr &= ~RB_OVERFLOW;
7740 } 7765 }
7741 return (wptr & rdev->ih.ptr_mask); 7766 return (wptr & rdev->ih.ptr_mask);
7742} 7767}
@@ -8226,6 +8251,7 @@ restart_ih:
8226 /* wptr/rptr are in bytes! */ 8251 /* wptr/rptr are in bytes! */
8227 rptr += 16; 8252 rptr += 16;
8228 rptr &= rdev->ih.ptr_mask; 8253 rptr &= rdev->ih.ptr_mask;
8254 WREG32(IH_RB_RPTR, rptr);
8229 } 8255 }
8230 if (queue_hotplug) 8256 if (queue_hotplug)
8231 schedule_work(&rdev->hotplug_work); 8257 schedule_work(&rdev->hotplug_work);
@@ -8234,7 +8260,6 @@ restart_ih:
8234 if (queue_thermal) 8260 if (queue_thermal)
8235 schedule_work(&rdev->pm.dpm.thermal.work); 8261 schedule_work(&rdev->pm.dpm.thermal.work);
8236 rdev->ih.rptr = rptr; 8262 rdev->ih.rptr = rptr;
8237 WREG32(IH_RB_RPTR, rdev->ih.rptr);
8238 atomic_set(&rdev->ih.lock, 0); 8263 atomic_set(&rdev->ih.lock, 0);
8239 8264
8240 /* make sure wptr hasn't changed while processing */ 8265 /* make sure wptr hasn't changed while processing */
@@ -9538,6 +9563,9 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
9538 int ret, i; 9563 int ret, i;
9539 u16 tmp16; 9564 u16 tmp16;
9540 9565
9566 if (pci_is_root_bus(rdev->pdev->bus))
9567 return;
9568
9541 if (radeon_pcie_gen2 == 0) 9569 if (radeon_pcie_gen2 == 0)
9542 return; 9570 return;
9543 9571
@@ -9764,7 +9792,8 @@ static void cik_program_aspm(struct radeon_device *rdev)
9764 if (orig != data) 9792 if (orig != data)
9765 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data); 9793 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
9766 9794
9767 if (!disable_clkreq) { 9795 if (!disable_clkreq &&
9796 !pci_is_root_bus(rdev->pdev->bus)) {
9768 struct pci_dev *root = rdev->pdev->bus->self; 9797 struct pci_dev *root = rdev->pdev->bus->self;
9769 u32 lnkcap; 9798 u32 lnkcap;
9770 9799
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index bcf480510ac2..c4ffa54b1e3d 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -489,13 +489,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
489{ 489{
490 int r; 490 int r;
491 491
492 /* Reset dma */
493 WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
494 RREG32(SRBM_SOFT_RESET);
495 udelay(50);
496 WREG32(SRBM_SOFT_RESET, 0);
497 RREG32(SRBM_SOFT_RESET);
498
499 r = cik_sdma_load_microcode(rdev); 492 r = cik_sdma_load_microcode(rdev);
500 if (r) 493 if (r)
501 return r; 494 return r;
@@ -596,7 +589,7 @@ int cik_copy_dma(struct radeon_device *rdev,
596 return r; 589 return r;
597 } 590 }
598 591
599 radeon_ring_unlock_commit(rdev, ring); 592 radeon_ring_unlock_commit(rdev, ring, false);
600 radeon_semaphore_free(rdev, &sem, *fence); 593 radeon_semaphore_free(rdev, &sem, *fence);
601 594
602 return r; 595 return r;
@@ -638,7 +631,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
638 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr)); 631 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr));
639 radeon_ring_write(ring, 1); /* number of DWs to follow */ 632 radeon_ring_write(ring, 1); /* number of DWs to follow */
640 radeon_ring_write(ring, 0xDEADBEEF); 633 radeon_ring_write(ring, 0xDEADBEEF);
641 radeon_ring_unlock_commit(rdev, ring); 634 radeon_ring_unlock_commit(rdev, ring, false);
642 635
643 for (i = 0; i < rdev->usec_timeout; i++) { 636 for (i = 0; i < rdev->usec_timeout; i++) {
644 tmp = readl(ptr); 637 tmp = readl(ptr);
@@ -695,7 +688,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
695 ib.ptr[4] = 0xDEADBEEF; 688 ib.ptr[4] = 0xDEADBEEF;
696 ib.length_dw = 5; 689 ib.length_dw = 5;
697 690
698 r = radeon_ib_schedule(rdev, &ib, NULL); 691 r = radeon_ib_schedule(rdev, &ib, NULL, false);
699 if (r) { 692 if (r) {
700 radeon_ib_free(rdev, &ib); 693 radeon_ib_free(rdev, &ib);
701 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 694 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 4fedd14e670a..e50807c29f69 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2869,7 +2869,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
2869 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 2869 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2870 radeon_ring_write(ring, 0); 2870 radeon_ring_write(ring, 0);
2871 radeon_ring_write(ring, 0); 2871 radeon_ring_write(ring, 0);
2872 radeon_ring_unlock_commit(rdev, ring); 2872 radeon_ring_unlock_commit(rdev, ring, false);
2873 2873
2874 cp_me = 0xff; 2874 cp_me = 0xff;
2875 WREG32(CP_ME_CNTL, cp_me); 2875 WREG32(CP_ME_CNTL, cp_me);
@@ -2912,7 +2912,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
2912 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 2912 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
2913 radeon_ring_write(ring, 0x00000010); /* */ 2913 radeon_ring_write(ring, 0x00000010); /* */
2914 2914
2915 radeon_ring_unlock_commit(rdev, ring); 2915 radeon_ring_unlock_commit(rdev, ring, false);
2916 2916
2917 return 0; 2917 return 0;
2918} 2918}
@@ -4749,17 +4749,17 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
4749 wptr = RREG32(IH_RB_WPTR); 4749 wptr = RREG32(IH_RB_WPTR);
4750 4750
4751 if (wptr & RB_OVERFLOW) { 4751 if (wptr & RB_OVERFLOW) {
4752 wptr &= ~RB_OVERFLOW;
4752 /* When a ring buffer overflow happen start parsing interrupt 4753 /* When a ring buffer overflow happen start parsing interrupt
4753 * from the last not overwritten vector (wptr + 16). Hopefully 4754 * from the last not overwritten vector (wptr + 16). Hopefully
4754 * this should allow us to catchup. 4755 * this should allow us to catchup.
4755 */ 4756 */
4756 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", 4757 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
4757 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); 4758 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
4758 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; 4759 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
4759 tmp = RREG32(IH_RB_CNTL); 4760 tmp = RREG32(IH_RB_CNTL);
4760 tmp |= IH_WPTR_OVERFLOW_CLEAR; 4761 tmp |= IH_WPTR_OVERFLOW_CLEAR;
4761 WREG32(IH_RB_CNTL, tmp); 4762 WREG32(IH_RB_CNTL, tmp);
4762 wptr &= ~RB_OVERFLOW;
4763 } 4763 }
4764 return (wptr & rdev->ih.ptr_mask); 4764 return (wptr & rdev->ih.ptr_mask);
4765} 4765}
@@ -5137,6 +5137,7 @@ restart_ih:
5137 /* wptr/rptr are in bytes! */ 5137 /* wptr/rptr are in bytes! */
5138 rptr += 16; 5138 rptr += 16;
5139 rptr &= rdev->ih.ptr_mask; 5139 rptr &= rdev->ih.ptr_mask;
5140 WREG32(IH_RB_RPTR, rptr);
5140 } 5141 }
5141 if (queue_hotplug) 5142 if (queue_hotplug)
5142 schedule_work(&rdev->hotplug_work); 5143 schedule_work(&rdev->hotplug_work);
@@ -5145,7 +5146,6 @@ restart_ih:
5145 if (queue_thermal && rdev->pm.dpm_enabled) 5146 if (queue_thermal && rdev->pm.dpm_enabled)
5146 schedule_work(&rdev->pm.dpm.thermal.work); 5147 schedule_work(&rdev->pm.dpm.thermal.work);
5147 rdev->ih.rptr = rptr; 5148 rdev->ih.rptr = rptr;
5148 WREG32(IH_RB_RPTR, rdev->ih.rptr);
5149 atomic_set(&rdev->ih.lock, 0); 5149 atomic_set(&rdev->ih.lock, 0);
5150 5150
5151 /* make sure wptr hasn't changed while processing */ 5151 /* make sure wptr hasn't changed while processing */
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 478caefe0fef..afaba388c36d 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -155,7 +155,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
155 return r; 155 return r;
156 } 156 }
157 157
158 radeon_ring_unlock_commit(rdev, ring); 158 radeon_ring_unlock_commit(rdev, ring, false);
159 radeon_semaphore_free(rdev, &sem, *fence); 159 radeon_semaphore_free(rdev, &sem, *fence);
160 160
161 return r; 161 return r;
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 9ef8c38f2d66..67cb472d188c 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -33,6 +33,8 @@
33#define KV_MINIMUM_ENGINE_CLOCK 800 33#define KV_MINIMUM_ENGINE_CLOCK 800
34#define SMC_RAM_END 0x40000 34#define SMC_RAM_END 0x40000
35 35
36static int kv_enable_nb_dpm(struct radeon_device *rdev,
37 bool enable);
36static void kv_init_graphics_levels(struct radeon_device *rdev); 38static void kv_init_graphics_levels(struct radeon_device *rdev);
37static int kv_calculate_ds_divider(struct radeon_device *rdev); 39static int kv_calculate_ds_divider(struct radeon_device *rdev);
38static int kv_calculate_nbps_level_settings(struct radeon_device *rdev); 40static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
@@ -1295,6 +1297,9 @@ void kv_dpm_disable(struct radeon_device *rdev)
1295{ 1297{
1296 kv_smc_bapm_enable(rdev, false); 1298 kv_smc_bapm_enable(rdev, false);
1297 1299
1300 if (rdev->family == CHIP_MULLINS)
1301 kv_enable_nb_dpm(rdev, false);
1302
1298 /* powerup blocks */ 1303 /* powerup blocks */
1299 kv_dpm_powergate_acp(rdev, false); 1304 kv_dpm_powergate_acp(rdev, false);
1300 kv_dpm_powergate_samu(rdev, false); 1305 kv_dpm_powergate_samu(rdev, false);
@@ -1438,14 +1443,14 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
1438 return kv_enable_uvd_dpm(rdev, !gate); 1443 return kv_enable_uvd_dpm(rdev, !gate);
1439} 1444}
1440 1445
1441static u8 kv_get_vce_boot_level(struct radeon_device *rdev) 1446static u8 kv_get_vce_boot_level(struct radeon_device *rdev, u32 evclk)
1442{ 1447{
1443 u8 i; 1448 u8 i;
1444 struct radeon_vce_clock_voltage_dependency_table *table = 1449 struct radeon_vce_clock_voltage_dependency_table *table =
1445 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1450 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1446 1451
1447 for (i = 0; i < table->count; i++) { 1452 for (i = 0; i < table->count; i++) {
1448 if (table->entries[i].evclk >= 0) /* XXX */ 1453 if (table->entries[i].evclk >= evclk)
1449 break; 1454 break;
1450 } 1455 }
1451 1456
@@ -1468,7 +1473,7 @@ static int kv_update_vce_dpm(struct radeon_device *rdev,
1468 if (pi->caps_stable_p_state) 1473 if (pi->caps_stable_p_state)
1469 pi->vce_boot_level = table->count - 1; 1474 pi->vce_boot_level = table->count - 1;
1470 else 1475 else
1471 pi->vce_boot_level = kv_get_vce_boot_level(rdev); 1476 pi->vce_boot_level = kv_get_vce_boot_level(rdev, radeon_new_state->evclk);
1472 1477
1473 ret = kv_copy_bytes_to_smc(rdev, 1478 ret = kv_copy_bytes_to_smc(rdev,
1474 pi->dpm_table_start + 1479 pi->dpm_table_start +
@@ -1769,15 +1774,24 @@ static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
1769 return ret; 1774 return ret;
1770} 1775}
1771 1776
1772static int kv_enable_nb_dpm(struct radeon_device *rdev) 1777static int kv_enable_nb_dpm(struct radeon_device *rdev,
1778 bool enable)
1773{ 1779{
1774 struct kv_power_info *pi = kv_get_pi(rdev); 1780 struct kv_power_info *pi = kv_get_pi(rdev);
1775 int ret = 0; 1781 int ret = 0;
1776 1782
1777 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { 1783 if (enable) {
1778 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable); 1784 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
1779 if (ret == 0) 1785 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
1780 pi->nb_dpm_enabled = true; 1786 if (ret == 0)
1787 pi->nb_dpm_enabled = true;
1788 }
1789 } else {
1790 if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
1791 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Disable);
1792 if (ret == 0)
1793 pi->nb_dpm_enabled = false;
1794 }
1781 } 1795 }
1782 1796
1783 return ret; 1797 return ret;
@@ -1864,7 +1878,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
1864 } 1878 }
1865 kv_update_sclk_t(rdev); 1879 kv_update_sclk_t(rdev);
1866 if (rdev->family == CHIP_MULLINS) 1880 if (rdev->family == CHIP_MULLINS)
1867 kv_enable_nb_dpm(rdev); 1881 kv_enable_nb_dpm(rdev, true);
1868 } 1882 }
1869 } else { 1883 } else {
1870 if (pi->enable_dpm) { 1884 if (pi->enable_dpm) {
@@ -1889,7 +1903,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
1889 } 1903 }
1890 kv_update_acp_boot_level(rdev); 1904 kv_update_acp_boot_level(rdev);
1891 kv_update_sclk_t(rdev); 1905 kv_update_sclk_t(rdev);
1892 kv_enable_nb_dpm(rdev); 1906 kv_enable_nb_dpm(rdev, true);
1893 } 1907 }
1894 } 1908 }
1895 1909
@@ -2726,7 +2740,10 @@ int kv_dpm_init(struct radeon_device *rdev)
2726 pi->caps_sclk_ds = true; 2740 pi->caps_sclk_ds = true;
2727 pi->enable_auto_thermal_throttling = true; 2741 pi->enable_auto_thermal_throttling = true;
2728 pi->disable_nb_ps3_in_battery = false; 2742 pi->disable_nb_ps3_in_battery = false;
2729 pi->bapm_enable = true; 2743 if (radeon_bapm == 0)
2744 pi->bapm_enable = false;
2745 else
2746 pi->bapm_enable = true;
2730 pi->voltage_drop_t = 0; 2747 pi->voltage_drop_t = 0;
2731 pi->caps_sclk_throttle_low_notification = false; 2748 pi->caps_sclk_throttle_low_notification = false;
2732 pi->caps_fps = false; /* true? */ 2749 pi->caps_fps = false; /* true? */
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 327b85f7fd0d..3faee58946dd 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1271,7 +1271,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
1271 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); 1271 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
1272 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn); 1272 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
1273 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), 1273 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
1274 rdev->gart.table_addr >> 12); 1274 rdev->vm_manager.saved_table_addr[i]);
1275 } 1275 }
1276 1276
1277 /* enable context1-7 */ 1277 /* enable context1-7 */
@@ -1303,6 +1303,13 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
1303 1303
1304static void cayman_pcie_gart_disable(struct radeon_device *rdev) 1304static void cayman_pcie_gart_disable(struct radeon_device *rdev)
1305{ 1305{
1306 unsigned i;
1307
1308 for (i = 1; i < 8; ++i) {
1309 rdev->vm_manager.saved_table_addr[i] = RREG32(
1310 VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2));
1311 }
1312
1306 /* Disable all tables */ 1313 /* Disable all tables */
1307 WREG32(VM_CONTEXT0_CNTL, 0); 1314 WREG32(VM_CONTEXT0_CNTL, 0);
1308 WREG32(VM_CONTEXT1_CNTL, 0); 1315 WREG32(VM_CONTEXT1_CNTL, 0);
@@ -1505,7 +1512,7 @@ static int cayman_cp_start(struct radeon_device *rdev)
1505 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 1512 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1506 radeon_ring_write(ring, 0); 1513 radeon_ring_write(ring, 0);
1507 radeon_ring_write(ring, 0); 1514 radeon_ring_write(ring, 0);
1508 radeon_ring_unlock_commit(rdev, ring); 1515 radeon_ring_unlock_commit(rdev, ring, false);
1509 1516
1510 cayman_cp_enable(rdev, true); 1517 cayman_cp_enable(rdev, true);
1511 1518
@@ -1547,7 +1554,7 @@ static int cayman_cp_start(struct radeon_device *rdev)
1547 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 1554 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1548 radeon_ring_write(ring, 0x00000010); /* */ 1555 radeon_ring_write(ring, 0x00000010); /* */
1549 1556
1550 radeon_ring_unlock_commit(rdev, ring); 1557 radeon_ring_unlock_commit(rdev, ring, false);
1551 1558
1552 /* XXX init other rings */ 1559 /* XXX init other rings */
1553 1560
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index 8a3e6221cece..f26f0a9fb522 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -191,12 +191,6 @@ int cayman_dma_resume(struct radeon_device *rdev)
191 u32 reg_offset, wb_offset; 191 u32 reg_offset, wb_offset;
192 int i, r; 192 int i, r;
193 193
194 /* Reset dma */
195 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
196 RREG32(SRBM_SOFT_RESET);
197 udelay(50);
198 WREG32(SRBM_SOFT_RESET, 0);
199
200 for (i = 0; i < 2; i++) { 194 for (i = 0; i < 2; i++) {
201 if (i == 0) { 195 if (i == 0) {
202 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 196 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 04b5940b8923..b0098e792e62 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -821,6 +821,20 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
821 return RREG32(RADEON_CRTC2_CRNT_FRAME); 821 return RREG32(RADEON_CRTC2_CRNT_FRAME);
822} 822}
823 823
824/**
825 * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
826 * rdev: radeon device structure
827 * ring: ring buffer struct for emitting packets
828 */
829static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
830{
831 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
832 radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
833 RADEON_HDP_READ_BUFFER_INVALIDATE);
834 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
835 radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
836}
837
824/* Who ever call radeon_fence_emit should call ring_lock and ask 838/* Who ever call radeon_fence_emit should call ring_lock and ask
825 * for enough space (today caller are ib schedule and buffer move) */ 839 * for enough space (today caller are ib schedule and buffer move) */
826void r100_fence_ring_emit(struct radeon_device *rdev, 840void r100_fence_ring_emit(struct radeon_device *rdev,
@@ -925,7 +939,7 @@ int r100_copy_blit(struct radeon_device *rdev,
925 if (fence) { 939 if (fence) {
926 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); 940 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
927 } 941 }
928 radeon_ring_unlock_commit(rdev, ring); 942 radeon_ring_unlock_commit(rdev, ring, false);
929 return r; 943 return r;
930} 944}
931 945
@@ -958,7 +972,7 @@ void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
958 RADEON_ISYNC_ANY3D_IDLE2D | 972 RADEON_ISYNC_ANY3D_IDLE2D |
959 RADEON_ISYNC_WAIT_IDLEGUI | 973 RADEON_ISYNC_WAIT_IDLEGUI |
960 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 974 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
961 radeon_ring_unlock_commit(rdev, ring); 975 radeon_ring_unlock_commit(rdev, ring, false);
962} 976}
963 977
964 978
@@ -1056,20 +1070,6 @@ void r100_gfx_set_wptr(struct radeon_device *rdev,
1056 (void)RREG32(RADEON_CP_RB_WPTR); 1070 (void)RREG32(RADEON_CP_RB_WPTR);
1057} 1071}
1058 1072
1059/**
1060 * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
1061 * rdev: radeon device structure
1062 * ring: ring buffer struct for emitting packets
1063 */
1064void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
1065{
1066 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
1067 radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
1068 RADEON_HDP_READ_BUFFER_INVALIDATE);
1069 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
1070 radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
1071}
1072
1073static void r100_cp_load_microcode(struct radeon_device *rdev) 1073static void r100_cp_load_microcode(struct radeon_device *rdev)
1074{ 1074{
1075 const __be32 *fw_data; 1075 const __be32 *fw_data;
@@ -3638,7 +3638,7 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
3638 } 3638 }
3639 radeon_ring_write(ring, PACKET0(scratch, 0)); 3639 radeon_ring_write(ring, PACKET0(scratch, 0));
3640 radeon_ring_write(ring, 0xDEADBEEF); 3640 radeon_ring_write(ring, 0xDEADBEEF);
3641 radeon_ring_unlock_commit(rdev, ring); 3641 radeon_ring_unlock_commit(rdev, ring, false);
3642 for (i = 0; i < rdev->usec_timeout; i++) { 3642 for (i = 0; i < rdev->usec_timeout; i++) {
3643 tmp = RREG32(scratch); 3643 tmp = RREG32(scratch);
3644 if (tmp == 0xDEADBEEF) { 3644 if (tmp == 0xDEADBEEF) {
@@ -3700,7 +3700,7 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3700 ib.ptr[6] = PACKET2(0); 3700 ib.ptr[6] = PACKET2(0);
3701 ib.ptr[7] = PACKET2(0); 3701 ib.ptr[7] = PACKET2(0);
3702 ib.length_dw = 8; 3702 ib.length_dw = 8;
3703 r = radeon_ib_schedule(rdev, &ib, NULL); 3703 r = radeon_ib_schedule(rdev, &ib, NULL, false);
3704 if (r) { 3704 if (r) {
3705 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3705 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3706 goto free_ib; 3706 goto free_ib;
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 58f0473aa73f..67780374a652 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -121,7 +121,7 @@ int r200_copy_dma(struct radeon_device *rdev,
121 if (fence) { 121 if (fence) {
122 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); 122 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
123 } 123 }
124 radeon_ring_unlock_commit(rdev, ring); 124 radeon_ring_unlock_commit(rdev, ring, false);
125 return r; 125 return r;
126} 126}
127 127
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 75b30338c226..1bc4704034ce 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -295,7 +295,7 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
295 radeon_ring_write(ring, 295 radeon_ring_write(ring,
296 R300_GEOMETRY_ROUND_NEAREST | 296 R300_GEOMETRY_ROUND_NEAREST |
297 R300_COLOR_ROUND_NEAREST); 297 R300_COLOR_ROUND_NEAREST);
298 radeon_ring_unlock_commit(rdev, ring); 298 radeon_ring_unlock_commit(rdev, ring, false);
299} 299}
300 300
301static void r300_errata(struct radeon_device *rdev) 301static void r300_errata(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 802b19220a21..2828605aef3f 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -219,7 +219,7 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
219 radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1)); 219 radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
220 radeon_ring_write(ring, rdev->config.r300.resync_scratch); 220 radeon_ring_write(ring, rdev->config.r300.resync_scratch);
221 radeon_ring_write(ring, 0xDEADBEEF); 221 radeon_ring_write(ring, 0xDEADBEEF);
222 radeon_ring_unlock_commit(rdev, ring); 222 radeon_ring_unlock_commit(rdev, ring, false);
223} 223}
224 224
225static void r420_cp_errata_fini(struct radeon_device *rdev) 225static void r420_cp_errata_fini(struct radeon_device *rdev)
@@ -232,7 +232,7 @@ static void r420_cp_errata_fini(struct radeon_device *rdev)
232 radeon_ring_lock(rdev, ring, 8); 232 radeon_ring_lock(rdev, ring, 8);
233 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 233 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
234 radeon_ring_write(ring, R300_RB3D_DC_FINISH); 234 radeon_ring_write(ring, R300_RB3D_DC_FINISH);
235 radeon_ring_unlock_commit(rdev, ring); 235 radeon_ring_unlock_commit(rdev, ring, false);
236 radeon_scratch_free(rdev, rdev->config.r300.resync_scratch); 236 radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
237} 237}
238 238
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c70a504d96af..ea5c9af722ef 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1812,7 +1812,6 @@ static void r600_gpu_init(struct radeon_device *rdev)
1812{ 1812{
1813 u32 tiling_config; 1813 u32 tiling_config;
1814 u32 ramcfg; 1814 u32 ramcfg;
1815 u32 cc_rb_backend_disable;
1816 u32 cc_gc_shader_pipe_config; 1815 u32 cc_gc_shader_pipe_config;
1817 u32 tmp; 1816 u32 tmp;
1818 int i, j; 1817 int i, j;
@@ -1939,29 +1938,20 @@ static void r600_gpu_init(struct radeon_device *rdev)
1939 } 1938 }
1940 tiling_config |= BANK_SWAPS(1); 1939 tiling_config |= BANK_SWAPS(1);
1941 1940
1942 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1943 tmp = R6XX_MAX_BACKENDS -
1944 r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
1945 if (tmp < rdev->config.r600.max_backends) {
1946 rdev->config.r600.max_backends = tmp;
1947 }
1948
1949 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00; 1941 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
1950 tmp = R6XX_MAX_PIPES -
1951 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
1952 if (tmp < rdev->config.r600.max_pipes) {
1953 rdev->config.r600.max_pipes = tmp;
1954 }
1955 tmp = R6XX_MAX_SIMDS -
1956 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
1957 if (tmp < rdev->config.r600.max_simds) {
1958 rdev->config.r600.max_simds = tmp;
1959 }
1960 tmp = rdev->config.r600.max_simds - 1942 tmp = rdev->config.r600.max_simds -
1961 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK); 1943 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
1962 rdev->config.r600.active_simds = tmp; 1944 rdev->config.r600.active_simds = tmp;
1963 1945
1964 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK; 1946 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
1947 tmp = 0;
1948 for (i = 0; i < rdev->config.r600.max_backends; i++)
1949 tmp |= (1 << i);
1950 /* if all the backends are disabled, fix it up here */
1951 if ((disabled_rb_mask & tmp) == tmp) {
1952 for (i = 0; i < rdev->config.r600.max_backends; i++)
1953 disabled_rb_mask &= ~(1 << i);
1954 }
1965 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; 1955 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
1966 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends, 1956 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
1967 R6XX_MAX_BACKENDS, disabled_rb_mask); 1957 R6XX_MAX_BACKENDS, disabled_rb_mask);
@@ -2547,7 +2537,7 @@ int r600_cp_start(struct radeon_device *rdev)
2547 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 2537 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2548 radeon_ring_write(ring, 0); 2538 radeon_ring_write(ring, 0);
2549 radeon_ring_write(ring, 0); 2539 radeon_ring_write(ring, 0);
2550 radeon_ring_unlock_commit(rdev, ring); 2540 radeon_ring_unlock_commit(rdev, ring, false);
2551 2541
2552 cp_me = 0xff; 2542 cp_me = 0xff;
2553 WREG32(R_0086D8_CP_ME_CNTL, cp_me); 2543 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
@@ -2683,7 +2673,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2683 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2673 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2684 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 2674 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2685 radeon_ring_write(ring, 0xDEADBEEF); 2675 radeon_ring_write(ring, 0xDEADBEEF);
2686 radeon_ring_unlock_commit(rdev, ring); 2676 radeon_ring_unlock_commit(rdev, ring, false);
2687 for (i = 0; i < rdev->usec_timeout; i++) { 2677 for (i = 0; i < rdev->usec_timeout; i++) {
2688 tmp = RREG32(scratch); 2678 tmp = RREG32(scratch);
2689 if (tmp == 0xDEADBEEF) 2679 if (tmp == 0xDEADBEEF)
@@ -2753,6 +2743,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
2753 } 2743 }
2754} 2744}
2755 2745
2746/**
2747 * r600_semaphore_ring_emit - emit a semaphore on the CP ring
2748 *
2749 * @rdev: radeon_device pointer
2750 * @ring: radeon ring buffer object
2751 * @semaphore: radeon semaphore object
2752 * @emit_wait: Is this a sempahore wait?
2753 *
2754 * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
2755 * from running ahead of semaphore waits.
2756 */
2756bool r600_semaphore_ring_emit(struct radeon_device *rdev, 2757bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2757 struct radeon_ring *ring, 2758 struct radeon_ring *ring,
2758 struct radeon_semaphore *semaphore, 2759 struct radeon_semaphore *semaphore,
@@ -2768,6 +2769,13 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2768 radeon_ring_write(ring, lower_32_bits(addr)); 2769 radeon_ring_write(ring, lower_32_bits(addr));
2769 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); 2770 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2770 2771
2772 /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */
2773 if (emit_wait && (rdev->family >= CHIP_CEDAR)) {
2774 /* Prevent the PFP from running ahead of the semaphore wait */
2775 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2776 radeon_ring_write(ring, 0x0);
2777 }
2778
2771 return true; 2779 return true;
2772} 2780}
2773 2781
@@ -2845,7 +2853,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,
2845 return r; 2853 return r;
2846 } 2854 }
2847 2855
2848 radeon_ring_unlock_commit(rdev, ring); 2856 radeon_ring_unlock_commit(rdev, ring, false);
2849 radeon_semaphore_free(rdev, &sem, *fence); 2857 radeon_semaphore_free(rdev, &sem, *fence);
2850 2858
2851 return r; 2859 return r;
@@ -3165,7 +3173,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3165 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 3173 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3166 ib.ptr[2] = 0xDEADBEEF; 3174 ib.ptr[2] = 0xDEADBEEF;
3167 ib.length_dw = 3; 3175 ib.length_dw = 3;
3168 r = radeon_ib_schedule(rdev, &ib, NULL); 3176 r = radeon_ib_schedule(rdev, &ib, NULL, false);
3169 if (r) { 3177 if (r) {
3170 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3178 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3171 goto free_ib; 3179 goto free_ib;
@@ -3784,17 +3792,17 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
3784 wptr = RREG32(IH_RB_WPTR); 3792 wptr = RREG32(IH_RB_WPTR);
3785 3793
3786 if (wptr & RB_OVERFLOW) { 3794 if (wptr & RB_OVERFLOW) {
3795 wptr &= ~RB_OVERFLOW;
3787 /* When a ring buffer overflow happen start parsing interrupt 3796 /* When a ring buffer overflow happen start parsing interrupt
3788 * from the last not overwritten vector (wptr + 16). Hopefully 3797 * from the last not overwritten vector (wptr + 16). Hopefully
3789 * this should allow us to catchup. 3798 * this should allow us to catchup.
3790 */ 3799 */
3791 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", 3800 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
3792 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); 3801 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
3793 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; 3802 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3794 tmp = RREG32(IH_RB_CNTL); 3803 tmp = RREG32(IH_RB_CNTL);
3795 tmp |= IH_WPTR_OVERFLOW_CLEAR; 3804 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3796 WREG32(IH_RB_CNTL, tmp); 3805 WREG32(IH_RB_CNTL, tmp);
3797 wptr &= ~RB_OVERFLOW;
3798 } 3806 }
3799 return (wptr & rdev->ih.ptr_mask); 3807 return (wptr & rdev->ih.ptr_mask);
3800} 3808}
@@ -4040,6 +4048,7 @@ restart_ih:
4040 /* wptr/rptr are in bytes! */ 4048 /* wptr/rptr are in bytes! */
4041 rptr += 16; 4049 rptr += 16;
4042 rptr &= rdev->ih.ptr_mask; 4050 rptr &= rdev->ih.ptr_mask;
4051 WREG32(IH_RB_RPTR, rptr);
4043 } 4052 }
4044 if (queue_hotplug) 4053 if (queue_hotplug)
4045 schedule_work(&rdev->hotplug_work); 4054 schedule_work(&rdev->hotplug_work);
@@ -4048,7 +4057,6 @@ restart_ih:
4048 if (queue_thermal && rdev->pm.dpm_enabled) 4057 if (queue_thermal && rdev->pm.dpm_enabled)
4049 schedule_work(&rdev->pm.dpm.thermal.work); 4058 schedule_work(&rdev->pm.dpm.thermal.work);
4050 rdev->ih.rptr = rptr; 4059 rdev->ih.rptr = rptr;
4051 WREG32(IH_RB_RPTR, rdev->ih.rptr);
4052 atomic_set(&rdev->ih.lock, 0); 4060 atomic_set(&rdev->ih.lock, 0);
4053 4061
4054 /* make sure wptr hasn't changed while processing */ 4062 /* make sure wptr hasn't changed while processing */
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 4969cef44a19..a908daa006d2 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -124,15 +124,6 @@ int r600_dma_resume(struct radeon_device *rdev)
124 u32 rb_bufsz; 124 u32 rb_bufsz;
125 int r; 125 int r;
126 126
127 /* Reset dma */
128 if (rdev->family >= CHIP_RV770)
129 WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
130 else
131 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
132 RREG32(SRBM_SOFT_RESET);
133 udelay(50);
134 WREG32(SRBM_SOFT_RESET, 0);
135
136 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0); 127 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
137 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); 128 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
138 129
@@ -261,7 +252,7 @@ int r600_dma_ring_test(struct radeon_device *rdev,
261 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); 252 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
262 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); 253 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
263 radeon_ring_write(ring, 0xDEADBEEF); 254 radeon_ring_write(ring, 0xDEADBEEF);
264 radeon_ring_unlock_commit(rdev, ring); 255 radeon_ring_unlock_commit(rdev, ring, false);
265 256
266 for (i = 0; i < rdev->usec_timeout; i++) { 257 for (i = 0; i < rdev->usec_timeout; i++) {
267 tmp = readl(ptr); 258 tmp = readl(ptr);
@@ -368,7 +359,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
368 ib.ptr[3] = 0xDEADBEEF; 359 ib.ptr[3] = 0xDEADBEEF;
369 ib.length_dw = 4; 360 ib.length_dw = 4;
370 361
371 r = radeon_ib_schedule(rdev, &ib, NULL); 362 r = radeon_ib_schedule(rdev, &ib, NULL, false);
372 if (r) { 363 if (r) {
373 radeon_ib_free(rdev, &ib); 364 radeon_ib_free(rdev, &ib);
374 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 365 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
@@ -493,7 +484,7 @@ int r600_copy_dma(struct radeon_device *rdev,
493 return r; 484 return r;
494 } 485 }
495 486
496 radeon_ring_unlock_commit(rdev, ring); 487 radeon_ring_unlock_commit(rdev, ring, false);
497 radeon_semaphore_free(rdev, &sem, *fence); 488 radeon_semaphore_free(rdev, &sem, *fence);
498 489
499 return r; 490 return r;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index f94e7a9afe75..31e1052ad3e3 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -44,13 +44,6 @@
44#define R6XX_MAX_PIPES 8 44#define R6XX_MAX_PIPES 8
45#define R6XX_MAX_PIPES_MASK 0xff 45#define R6XX_MAX_PIPES_MASK 0xff
46 46
47/* PTE flags */
48#define PTE_VALID (1 << 0)
49#define PTE_SYSTEM (1 << 1)
50#define PTE_SNOOPED (1 << 2)
51#define PTE_READABLE (1 << 5)
52#define PTE_WRITEABLE (1 << 6)
53
54/* tiling bits */ 47/* tiling bits */
55#define ARRAY_LINEAR_GENERAL 0x00000000 48#define ARRAY_LINEAR_GENERAL 0x00000000
56#define ARRAY_LINEAR_ALIGNED 0x00000001 49#define ARRAY_LINEAR_ALIGNED 0x00000001
@@ -1597,6 +1590,7 @@
1597 */ 1590 */
1598# define PACKET3_CP_DMA_CMD_SAIC (1 << 28) 1591# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
1599# define PACKET3_CP_DMA_CMD_DAIC (1 << 29) 1592# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
1593#define PACKET3_PFP_SYNC_ME 0x42 /* r7xx+ only */
1600#define PACKET3_SURFACE_SYNC 0x43 1594#define PACKET3_SURFACE_SYNC 0x43
1601# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) 1595# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
1602# define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */ 1596# define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 9e1732eb402c..3247bfd14410 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -105,6 +105,8 @@ extern int radeon_vm_size;
105extern int radeon_vm_block_size; 105extern int radeon_vm_block_size;
106extern int radeon_deep_color; 106extern int radeon_deep_color;
107extern int radeon_use_pflipirq; 107extern int radeon_use_pflipirq;
108extern int radeon_bapm;
109extern int radeon_backlight;
108 110
109/* 111/*
110 * Copy from radeon_drv.h so we don't have to include both and have conflicting 112 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -914,6 +916,8 @@ struct radeon_vm_manager {
914 u64 vram_base_offset; 916 u64 vram_base_offset;
915 /* is vm enabled? */ 917 /* is vm enabled? */
916 bool enabled; 918 bool enabled;
919 /* for hw to save the PD addr on suspend/resume */
920 uint32_t saved_table_addr[RADEON_NUM_VM];
917}; 921};
918 922
919/* 923/*
@@ -967,7 +971,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
967 unsigned size); 971 unsigned size);
968void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); 972void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
969int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, 973int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
970 struct radeon_ib *const_ib); 974 struct radeon_ib *const_ib, bool hdp_flush);
971int radeon_ib_pool_init(struct radeon_device *rdev); 975int radeon_ib_pool_init(struct radeon_device *rdev);
972void radeon_ib_pool_fini(struct radeon_device *rdev); 976void radeon_ib_pool_fini(struct radeon_device *rdev);
973int radeon_ib_ring_tests(struct radeon_device *rdev); 977int radeon_ib_ring_tests(struct radeon_device *rdev);
@@ -977,8 +981,10 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
977void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); 981void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
978int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); 982int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
979int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); 983int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
980void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp); 984void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp,
981void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp); 985 bool hdp_flush);
986void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp,
987 bool hdp_flush);
982void radeon_ring_undo(struct radeon_ring *ring); 988void radeon_ring_undo(struct radeon_ring *ring);
983void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp); 989void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
984int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); 990int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index eeeeabe09758..2dd5847f9b98 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -185,7 +185,6 @@ static struct radeon_asic_ring r100_gfx_ring = {
185 .get_rptr = &r100_gfx_get_rptr, 185 .get_rptr = &r100_gfx_get_rptr,
186 .get_wptr = &r100_gfx_get_wptr, 186 .get_wptr = &r100_gfx_get_wptr,
187 .set_wptr = &r100_gfx_set_wptr, 187 .set_wptr = &r100_gfx_set_wptr,
188 .hdp_flush = &r100_ring_hdp_flush,
189}; 188};
190 189
191static struct radeon_asic r100_asic = { 190static struct radeon_asic r100_asic = {
@@ -332,7 +331,6 @@ static struct radeon_asic_ring r300_gfx_ring = {
332 .get_rptr = &r100_gfx_get_rptr, 331 .get_rptr = &r100_gfx_get_rptr,
333 .get_wptr = &r100_gfx_get_wptr, 332 .get_wptr = &r100_gfx_get_wptr,
334 .set_wptr = &r100_gfx_set_wptr, 333 .set_wptr = &r100_gfx_set_wptr,
335 .hdp_flush = &r100_ring_hdp_flush,
336}; 334};
337 335
338static struct radeon_asic r300_asic = { 336static struct radeon_asic r300_asic = {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 275a5dc01780..7756bc1e1cd3 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -148,8 +148,7 @@ u32 r100_gfx_get_wptr(struct radeon_device *rdev,
148 struct radeon_ring *ring); 148 struct radeon_ring *ring);
149void r100_gfx_set_wptr(struct radeon_device *rdev, 149void r100_gfx_set_wptr(struct radeon_device *rdev,
150 struct radeon_ring *ring); 150 struct radeon_ring *ring);
151void r100_ring_hdp_flush(struct radeon_device *rdev, 151
152 struct radeon_ring *ring);
153/* 152/*
154 * r200,rv250,rs300,rv280 153 * r200,rv250,rs300,rv280
155 */ 154 */
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 92b2d8dd4735..e74c7e387dde 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -447,6 +447,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
447 } 447 }
448 } 448 }
449 449
450 /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
451 if ((dev->pdev->device == 0x9805) &&
452 (dev->pdev->subsystem_vendor == 0x1734) &&
453 (dev->pdev->subsystem_device == 0x11bd)) {
454 if (*connector_type == DRM_MODE_CONNECTOR_VGA)
455 return false;
456 }
450 457
451 return true; 458 return true;
452} 459}
@@ -2281,19 +2288,31 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
2281 (controller->ucFanParameters & 2288 (controller->ucFanParameters &
2282 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 2289 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2283 rdev->pm.int_thermal_type = THERMAL_TYPE_KV; 2290 rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
2284 } else if ((controller->ucType == 2291 } else if (controller->ucType ==
2285 ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || 2292 ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
2286 (controller->ucType == 2293 DRM_INFO("External GPIO thermal controller %s fan control\n",
2287 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) || 2294 (controller->ucFanParameters &
2288 (controller->ucType == 2295 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2289 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) { 2296 rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
2290 DRM_INFO("Special thermal controller config\n"); 2297 } else if (controller->ucType ==
2298 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
2299 DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
2300 (controller->ucFanParameters &
2301 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2302 rdev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
2303 } else if (controller->ucType ==
2304 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
2305 DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
2306 (controller->ucFanParameters &
2307 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2308 rdev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
2291 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { 2309 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
2292 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", 2310 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
2293 pp_lib_thermal_controller_names[controller->ucType], 2311 pp_lib_thermal_controller_names[controller->ucType],
2294 controller->ucI2cAddress >> 1, 2312 controller->ucI2cAddress >> 1,
2295 (controller->ucFanParameters & 2313 (controller->ucFanParameters &
2296 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 2314 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2315 rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
2297 i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); 2316 i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
2298 rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); 2317 rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
2299 if (rdev->pm.i2c_bus) { 2318 if (rdev->pm.i2c_bus) {
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index a9fb0d016d38..8bc7d0bbd3c8 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -33,7 +33,6 @@ static struct radeon_atpx_priv {
33 bool atpx_detected; 33 bool atpx_detected;
34 /* handle for device - and atpx */ 34 /* handle for device - and atpx */
35 acpi_handle dhandle; 35 acpi_handle dhandle;
36 acpi_handle other_handle;
37 struct radeon_atpx atpx; 36 struct radeon_atpx atpx;
38} radeon_atpx_priv; 37} radeon_atpx_priv;
39 38
@@ -453,10 +452,9 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
453 return false; 452 return false;
454 453
455 status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); 454 status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
456 if (ACPI_FAILURE(status)) { 455 if (ACPI_FAILURE(status))
457 radeon_atpx_priv.other_handle = dhandle;
458 return false; 456 return false;
459 } 457
460 radeon_atpx_priv.dhandle = dhandle; 458 radeon_atpx_priv.dhandle = dhandle;
461 radeon_atpx_priv.atpx.handle = atpx_handle; 459 radeon_atpx_priv.atpx.handle = atpx_handle;
462 return true; 460 return true;
@@ -540,16 +538,6 @@ static bool radeon_atpx_detect(void)
540 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", 538 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
541 acpi_method_name); 539 acpi_method_name);
542 radeon_atpx_priv.atpx_detected = true; 540 radeon_atpx_priv.atpx_detected = true;
543 /*
544 * On some systems hotplug events are generated for the device
545 * being switched off when ATPX is executed. They cause ACPI
546 * hotplug to trigger and attempt to remove the device from
547 * the system, which causes it to break down. Prevent that from
548 * happening by setting the no_hotplug flag for the involved
549 * ACPI device objects.
550 */
551 acpi_bus_no_hotplug(radeon_atpx_priv.dhandle);
552 acpi_bus_no_hotplug(radeon_atpx_priv.other_handle);
553 return true; 541 return true;
554 } 542 }
555 return false; 543 return false;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index ee712c199b25..83f382e8e40e 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -132,7 +132,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
132 * the buffers used for read only, which doubles the range 132 * the buffers used for read only, which doubles the range
133 * to 0 to 31. 32 is reserved for the kernel driver. 133 * to 0 to 31. 32 is reserved for the kernel driver.
134 */ 134 */
135 priority = (r->flags & 0xf) * 2 + !!r->write_domain; 135 priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
136 + !!r->write_domain;
136 137
137 /* the first reloc of an UVD job is the msg and that must be in 138 /* the first reloc of an UVD job is the msg and that must be in
138 VRAM, also but everything into VRAM on AGP cards to avoid 139 VRAM, also but everything into VRAM on AGP cards to avoid
@@ -450,7 +451,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
450 radeon_vce_note_usage(rdev); 451 radeon_vce_note_usage(rdev);
451 452
452 radeon_cs_sync_rings(parser); 453 radeon_cs_sync_rings(parser);
453 r = radeon_ib_schedule(rdev, &parser->ib, NULL); 454 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
454 if (r) { 455 if (r) {
455 DRM_ERROR("Failed to schedule IB !\n"); 456 DRM_ERROR("Failed to schedule IB !\n");
456 } 457 }
@@ -541,9 +542,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
541 542
542 if ((rdev->family >= CHIP_TAHITI) && 543 if ((rdev->family >= CHIP_TAHITI) &&
543 (parser->chunk_const_ib_idx != -1)) { 544 (parser->chunk_const_ib_idx != -1)) {
544 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib); 545 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
545 } else { 546 } else {
546 r = radeon_ib_schedule(rdev, &parser->ib, NULL); 547 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
547 } 548 }
548 549
549out: 550out:
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index c8ea050c8fa4..12c8329644c4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -123,6 +123,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
123 * https://bugzilla.kernel.org/show_bug.cgi?id=51381 123 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
124 */ 124 */
125 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX }, 125 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
126 /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
127 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
128 */
129 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
126 /* macbook pro 8.2 */ 130 /* macbook pro 8.2 */
127 { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, 131 { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
128 { 0, 0, 0, 0, 0 }, 132 { 0, 0, 0, 0, 0 },
@@ -1393,7 +1397,7 @@ int radeon_device_init(struct radeon_device *rdev,
1393 1397
1394 r = radeon_init(rdev); 1398 r = radeon_init(rdev);
1395 if (r) 1399 if (r)
1396 return r; 1400 goto failed;
1397 1401
1398 r = radeon_ib_ring_tests(rdev); 1402 r = radeon_ib_ring_tests(rdev);
1399 if (r) 1403 if (r)
@@ -1413,7 +1417,7 @@ int radeon_device_init(struct radeon_device *rdev,
1413 radeon_agp_disable(rdev); 1417 radeon_agp_disable(rdev);
1414 r = radeon_init(rdev); 1418 r = radeon_init(rdev);
1415 if (r) 1419 if (r)
1416 return r; 1420 goto failed;
1417 } 1421 }
1418 1422
1419 if ((radeon_testing & 1)) { 1423 if ((radeon_testing & 1)) {
@@ -1435,6 +1439,11 @@ int radeon_device_init(struct radeon_device *rdev,
1435 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n"); 1439 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1436 } 1440 }
1437 return 0; 1441 return 0;
1442
1443failed:
1444 if (runtime)
1445 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1446 return r;
1438} 1447}
1439 1448
1440static void radeon_debugfs_remove_files(struct radeon_device *rdev); 1449static void radeon_debugfs_remove_files(struct radeon_device *rdev);
@@ -1455,6 +1464,8 @@ void radeon_device_fini(struct radeon_device *rdev)
1455 radeon_bo_evict_vram(rdev); 1464 radeon_bo_evict_vram(rdev);
1456 radeon_fini(rdev); 1465 radeon_fini(rdev);
1457 vga_switcheroo_unregister_client(rdev->pdev); 1466 vga_switcheroo_unregister_client(rdev->pdev);
1467 if (rdev->flags & RADEON_IS_PX)
1468 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1458 vga_client_register(rdev->pdev, NULL, NULL, NULL); 1469 vga_client_register(rdev->pdev, NULL, NULL, NULL);
1459 if (rdev->rio_mem) 1470 if (rdev->rio_mem)
1460 pci_iounmap(rdev->pdev, rdev->rio_mem); 1471 pci_iounmap(rdev->pdev, rdev->rio_mem);
@@ -1680,8 +1691,8 @@ int radeon_gpu_reset(struct radeon_device *rdev)
1680 radeon_save_bios_scratch_regs(rdev); 1691 radeon_save_bios_scratch_regs(rdev);
1681 /* block TTM */ 1692 /* block TTM */
1682 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 1693 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1683 radeon_pm_suspend(rdev);
1684 radeon_suspend(rdev); 1694 radeon_suspend(rdev);
1695 radeon_hpd_fini(rdev);
1685 1696
1686 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1697 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1687 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i], 1698 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
@@ -1726,9 +1737,39 @@ retry:
1726 } 1737 }
1727 } 1738 }
1728 1739
1729 radeon_pm_resume(rdev); 1740 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1741 /* do dpm late init */
1742 r = radeon_pm_late_init(rdev);
1743 if (r) {
1744 rdev->pm.dpm_enabled = false;
1745 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1746 }
1747 } else {
1748 /* resume old pm late */
1749 radeon_pm_resume(rdev);
1750 }
1751
1752 /* init dig PHYs, disp eng pll */
1753 if (rdev->is_atom_bios) {
1754 radeon_atom_encoder_init(rdev);
1755 radeon_atom_disp_eng_pll_init(rdev);
1756 /* turn on the BL */
1757 if (rdev->mode_info.bl_encoder) {
1758 u8 bl_level = radeon_get_backlight_level(rdev,
1759 rdev->mode_info.bl_encoder);
1760 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1761 bl_level);
1762 }
1763 }
1764 /* reset hpd state */
1765 radeon_hpd_init(rdev);
1766
1730 drm_helper_resume_force_mode(rdev->ddev); 1767 drm_helper_resume_force_mode(rdev->ddev);
1731 1768
1769 /* set the power state here in case we are a PX system or headless */
1770 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1771 radeon_pm_compute_clocks(rdev);
1772
1732 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 1773 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1733 if (r) { 1774 if (r) {
1734 /* bad news, how to tell it to userspace ? */ 1775 /* bad news, how to tell it to userspace ? */
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 092d067f93e1..f9d17b29b343 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -83,7 +83,7 @@
83 * CIK: 1D and linear tiling modes contain valid PIPE_CONFIG 83 * CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
84 * 2.39.0 - Add INFO query for number of active CUs 84 * 2.39.0 - Add INFO query for number of active CUs
85 * 2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting 85 * 2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting
86 * CS to GPU 86 * CS to GPU on >= r600
87 */ 87 */
88#define KMS_DRIVER_MAJOR 2 88#define KMS_DRIVER_MAJOR 2
89#define KMS_DRIVER_MINOR 40 89#define KMS_DRIVER_MINOR 40
@@ -180,6 +180,8 @@ int radeon_vm_size = 8;
180int radeon_vm_block_size = -1; 180int radeon_vm_block_size = -1;
181int radeon_deep_color = 0; 181int radeon_deep_color = 0;
182int radeon_use_pflipirq = 2; 182int radeon_use_pflipirq = 2;
183int radeon_bapm = -1;
184int radeon_backlight = -1;
183 185
184MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 186MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
185module_param_named(no_wb, radeon_no_wb, int, 0444); 187module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -259,6 +261,12 @@ module_param_named(deep_color, radeon_deep_color, int, 0444);
259MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))"); 261MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))");
260module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444); 262module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444);
261 263
264MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
265module_param_named(bapm, radeon_bapm, int, 0444);
266
267MODULE_PARM_DESC(backlight, "backlight support (1 = enable, 0 = disable, -1 = auto)");
268module_param_named(backlight, radeon_backlight, int, 0444);
269
262static struct pci_device_id pciidlist[] = { 270static struct pci_device_id pciidlist[] = {
263 radeon_PCI_IDS 271 radeon_PCI_IDS
264}; 272};
@@ -436,6 +444,7 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
436 ret = radeon_suspend_kms(drm_dev, false, false); 444 ret = radeon_suspend_kms(drm_dev, false, false);
437 pci_save_state(pdev); 445 pci_save_state(pdev);
438 pci_disable_device(pdev); 446 pci_disable_device(pdev);
447 pci_ignore_hotplug(pdev);
439 pci_set_power_state(pdev, PCI_D3cold); 448 pci_set_power_state(pdev, PCI_D3cold);
440 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; 449 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
441 450
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 3c2094c25b53..15edf23b465c 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -158,10 +158,43 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8
158 return ret; 158 return ret;
159} 159}
160 160
161static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
162 struct drm_connector *connector)
163{
164 struct drm_device *dev = radeon_encoder->base.dev;
165 struct radeon_device *rdev = dev->dev_private;
166 bool use_bl = false;
167
168 if (!(radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)))
169 return;
170
171 if (radeon_backlight == 0) {
172 return;
173 } else if (radeon_backlight == 1) {
174 use_bl = true;
175 } else if (radeon_backlight == -1) {
176 /* Quirks */
177 /* Amilo Xi 2550 only works with acpi bl */
178 if ((rdev->pdev->device == 0x9583) &&
179 (rdev->pdev->subsystem_vendor == 0x1734) &&
180 (rdev->pdev->subsystem_device == 0x1107))
181 use_bl = false;
182 else
183 use_bl = true;
184 }
185
186 if (use_bl) {
187 if (rdev->is_atom_bios)
188 radeon_atom_backlight_init(radeon_encoder, connector);
189 else
190 radeon_legacy_backlight_init(radeon_encoder, connector);
191 rdev->mode_info.bl_encoder = radeon_encoder;
192 }
193}
194
161void 195void
162radeon_link_encoder_connector(struct drm_device *dev) 196radeon_link_encoder_connector(struct drm_device *dev)
163{ 197{
164 struct radeon_device *rdev = dev->dev_private;
165 struct drm_connector *connector; 198 struct drm_connector *connector;
166 struct radeon_connector *radeon_connector; 199 struct radeon_connector *radeon_connector;
167 struct drm_encoder *encoder; 200 struct drm_encoder *encoder;
@@ -174,13 +207,8 @@ radeon_link_encoder_connector(struct drm_device *dev)
174 radeon_encoder = to_radeon_encoder(encoder); 207 radeon_encoder = to_radeon_encoder(encoder);
175 if (radeon_encoder->devices & radeon_connector->devices) { 208 if (radeon_encoder->devices & radeon_connector->devices) {
176 drm_mode_connector_attach_encoder(connector, encoder); 209 drm_mode_connector_attach_encoder(connector, encoder);
177 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 210 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
178 if (rdev->is_atom_bios) 211 radeon_encoder_add_backlight(radeon_encoder, connector);
179 radeon_atom_backlight_init(radeon_encoder, connector);
180 else
181 radeon_legacy_backlight_init(radeon_encoder, connector);
182 rdev->mode_info.bl_encoder = radeon_encoder;
183 }
184 } 212 }
185 } 213 }
186 } 214 }
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index 65b0c213488d..5bf2c0a05827 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -107,6 +107,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
107 * @rdev: radeon_device pointer 107 * @rdev: radeon_device pointer
108 * @ib: IB object to schedule 108 * @ib: IB object to schedule
109 * @const_ib: Const IB to schedule (SI only) 109 * @const_ib: Const IB to schedule (SI only)
110 * @hdp_flush: Whether or not to perform an HDP cache flush
110 * 111 *
111 * Schedule an IB on the associated ring (all asics). 112 * Schedule an IB on the associated ring (all asics).
112 * Returns 0 on success, error on failure. 113 * Returns 0 on success, error on failure.
@@ -122,7 +123,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
122 * to SI there was just a DE IB. 123 * to SI there was just a DE IB.
123 */ 124 */
124int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, 125int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
125 struct radeon_ib *const_ib) 126 struct radeon_ib *const_ib, bool hdp_flush)
126{ 127{
127 struct radeon_ring *ring = &rdev->ring[ib->ring]; 128 struct radeon_ring *ring = &rdev->ring[ib->ring];
128 int r = 0; 129 int r = 0;
@@ -176,7 +177,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
176 if (ib->vm) 177 if (ib->vm)
177 radeon_vm_fence(rdev, ib->vm, ib->fence); 178 radeon_vm_fence(rdev, ib->vm, ib->fence);
178 179
179 radeon_ring_unlock_commit(rdev, ring); 180 radeon_ring_unlock_commit(rdev, ring, hdp_flush);
180 return 0; 181 return 0;
181} 182}
182 183
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 23314be49480..164898b0010c 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -460,10 +460,6 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
460 struct radeon_device *rdev = ddev->dev_private; 460 struct radeon_device *rdev = ddev->dev_private;
461 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; 461 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
462 462
463 if ((rdev->flags & RADEON_IS_PX) &&
464 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
465 return snprintf(buf, PAGE_SIZE, "off\n");
466
467 return snprintf(buf, PAGE_SIZE, "%s\n", 463 return snprintf(buf, PAGE_SIZE, "%s\n",
468 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 464 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
469 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 465 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -477,11 +473,6 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
477 struct drm_device *ddev = dev_get_drvdata(dev); 473 struct drm_device *ddev = dev_get_drvdata(dev);
478 struct radeon_device *rdev = ddev->dev_private; 474 struct radeon_device *rdev = ddev->dev_private;
479 475
480 /* Can't set dpm state when the card is off */
481 if ((rdev->flags & RADEON_IS_PX) &&
482 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
483 return -EINVAL;
484
485 mutex_lock(&rdev->pm.mutex); 476 mutex_lock(&rdev->pm.mutex);
486 if (strncmp("battery", buf, strlen("battery")) == 0) 477 if (strncmp("battery", buf, strlen("battery")) == 0)
487 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; 478 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
@@ -495,7 +486,12 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
495 goto fail; 486 goto fail;
496 } 487 }
497 mutex_unlock(&rdev->pm.mutex); 488 mutex_unlock(&rdev->pm.mutex);
498 radeon_pm_compute_clocks(rdev); 489
490 /* Can't set dpm state when the card is off */
491 if (!(rdev->flags & RADEON_IS_PX) ||
492 (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
493 radeon_pm_compute_clocks(rdev);
494
499fail: 495fail:
500 return count; 496 return count;
501} 497}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 5b4e0cf231a0..d65607902537 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -177,16 +177,18 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
177 * 177 *
178 * @rdev: radeon_device pointer 178 * @rdev: radeon_device pointer
179 * @ring: radeon_ring structure holding ring information 179 * @ring: radeon_ring structure holding ring information
180 * @hdp_flush: Whether or not to perform an HDP cache flush
180 * 181 *
181 * Update the wptr (write pointer) to tell the GPU to 182 * Update the wptr (write pointer) to tell the GPU to
182 * execute new commands on the ring buffer (all asics). 183 * execute new commands on the ring buffer (all asics).
183 */ 184 */
184void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) 185void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring,
186 bool hdp_flush)
185{ 187{
186 /* If we are emitting the HDP flush via the ring buffer, we need to 188 /* If we are emitting the HDP flush via the ring buffer, we need to
187 * do it before padding. 189 * do it before padding.
188 */ 190 */
189 if (rdev->asic->ring[ring->idx]->hdp_flush) 191 if (hdp_flush && rdev->asic->ring[ring->idx]->hdp_flush)
190 rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring); 192 rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring);
191 /* We pad to match fetch size */ 193 /* We pad to match fetch size */
192 while (ring->wptr & ring->align_mask) { 194 while (ring->wptr & ring->align_mask) {
@@ -196,7 +198,7 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
196 /* If we are emitting the HDP flush via MMIO, we need to do it after 198 /* If we are emitting the HDP flush via MMIO, we need to do it after
197 * all CPU writes to VRAM finished. 199 * all CPU writes to VRAM finished.
198 */ 200 */
199 if (rdev->asic->mmio_hdp_flush) 201 if (hdp_flush && rdev->asic->mmio_hdp_flush)
200 rdev->asic->mmio_hdp_flush(rdev); 202 rdev->asic->mmio_hdp_flush(rdev);
201 radeon_ring_set_wptr(rdev, ring); 203 radeon_ring_set_wptr(rdev, ring);
202} 204}
@@ -207,12 +209,14 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
207 * 209 *
208 * @rdev: radeon_device pointer 210 * @rdev: radeon_device pointer
209 * @ring: radeon_ring structure holding ring information 211 * @ring: radeon_ring structure holding ring information
212 * @hdp_flush: Whether or not to perform an HDP cache flush
210 * 213 *
211 * Call radeon_ring_commit() then unlock the ring (all asics). 214 * Call radeon_ring_commit() then unlock the ring (all asics).
212 */ 215 */
213void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring) 216void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring,
217 bool hdp_flush)
214{ 218{
215 radeon_ring_commit(rdev, ring); 219 radeon_ring_commit(rdev, ring, hdp_flush);
216 mutex_unlock(&rdev->ring_lock); 220 mutex_unlock(&rdev->ring_lock);
217} 221}
218 222
@@ -372,7 +376,7 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
372 radeon_ring_write(ring, data[i]); 376 radeon_ring_write(ring, data[i]);
373 } 377 }
374 378
375 radeon_ring_unlock_commit(rdev, ring); 379 radeon_ring_unlock_commit(rdev, ring, false);
376 kfree(data); 380 kfree(data);
377 return 0; 381 return 0;
378} 382}
@@ -400,9 +404,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
400 /* Allocate ring buffer */ 404 /* Allocate ring buffer */
401 if (ring->ring_obj == NULL) { 405 if (ring->ring_obj == NULL) {
402 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, 406 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
403 RADEON_GEM_DOMAIN_GTT, 407 RADEON_GEM_DOMAIN_GTT, 0,
404 (rdev->flags & RADEON_IS_PCIE) ?
405 RADEON_GEM_GTT_WC : 0,
406 NULL, &ring->ring_obj); 408 NULL, &ring->ring_obj);
407 if (r) { 409 if (r) {
408 dev_err(rdev->dev, "(%d) ring create failed\n", r); 410 dev_err(rdev->dev, "(%d) ring create failed\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index dbd6bcde92de..abd6753a570a 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -34,7 +34,7 @@
34int radeon_semaphore_create(struct radeon_device *rdev, 34int radeon_semaphore_create(struct radeon_device *rdev,
35 struct radeon_semaphore **semaphore) 35 struct radeon_semaphore **semaphore)
36{ 36{
37 uint32_t *cpu_addr; 37 uint64_t *cpu_addr;
38 int i, r; 38 int i, r;
39 39
40 *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); 40 *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
@@ -179,7 +179,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
179 continue; 179 continue;
180 } 180 }
181 181
182 radeon_ring_commit(rdev, &rdev->ring[i]); 182 radeon_ring_commit(rdev, &rdev->ring[i], false);
183 radeon_fence_note_sync(fence, ring); 183 radeon_fence_note_sync(fence, ring);
184 184
185 semaphore->gpu_addr += 8; 185 semaphore->gpu_addr += 8;
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 5adf4207453d..17bc3dced9f1 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -288,7 +288,7 @@ static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
288 return r; 288 return r;
289 } 289 }
290 radeon_fence_emit(rdev, fence, ring->idx); 290 radeon_fence_emit(rdev, fence, ring->idx);
291 radeon_ring_unlock_commit(rdev, ring); 291 radeon_ring_unlock_commit(rdev, ring, false);
292 } 292 }
293 return 0; 293 return 0;
294} 294}
@@ -313,7 +313,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
313 goto out_cleanup; 313 goto out_cleanup;
314 } 314 }
315 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 315 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
316 radeon_ring_unlock_commit(rdev, ringA); 316 radeon_ring_unlock_commit(rdev, ringA, false);
317 317
318 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1); 318 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
319 if (r) 319 if (r)
@@ -325,7 +325,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
325 goto out_cleanup; 325 goto out_cleanup;
326 } 326 }
327 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 327 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
328 radeon_ring_unlock_commit(rdev, ringA); 328 radeon_ring_unlock_commit(rdev, ringA, false);
329 329
330 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2); 330 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
331 if (r) 331 if (r)
@@ -344,7 +344,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
344 goto out_cleanup; 344 goto out_cleanup;
345 } 345 }
346 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); 346 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
347 radeon_ring_unlock_commit(rdev, ringB); 347 radeon_ring_unlock_commit(rdev, ringB, false);
348 348
349 r = radeon_fence_wait(fence1, false); 349 r = radeon_fence_wait(fence1, false);
350 if (r) { 350 if (r) {
@@ -365,7 +365,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
365 goto out_cleanup; 365 goto out_cleanup;
366 } 366 }
367 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); 367 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
368 radeon_ring_unlock_commit(rdev, ringB); 368 radeon_ring_unlock_commit(rdev, ringB, false);
369 369
370 r = radeon_fence_wait(fence2, false); 370 r = radeon_fence_wait(fence2, false);
371 if (r) { 371 if (r) {
@@ -408,7 +408,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
408 goto out_cleanup; 408 goto out_cleanup;
409 } 409 }
410 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 410 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
411 radeon_ring_unlock_commit(rdev, ringA); 411 radeon_ring_unlock_commit(rdev, ringA, false);
412 412
413 r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA); 413 r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
414 if (r) 414 if (r)
@@ -420,7 +420,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
420 goto out_cleanup; 420 goto out_cleanup;
421 } 421 }
422 radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); 422 radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
423 radeon_ring_unlock_commit(rdev, ringB); 423 radeon_ring_unlock_commit(rdev, ringB, false);
424 r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB); 424 r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
425 if (r) 425 if (r)
426 goto out_cleanup; 426 goto out_cleanup;
@@ -442,7 +442,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
442 goto out_cleanup; 442 goto out_cleanup;
443 } 443 }
444 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); 444 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
445 radeon_ring_unlock_commit(rdev, ringC); 445 radeon_ring_unlock_commit(rdev, ringC, false);
446 446
447 for (i = 0; i < 30; ++i) { 447 for (i = 0; i < 30; ++i) {
448 mdelay(100); 448 mdelay(100);
@@ -468,7 +468,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
468 goto out_cleanup; 468 goto out_cleanup;
469 } 469 }
470 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); 470 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
471 radeon_ring_unlock_commit(rdev, ringC); 471 radeon_ring_unlock_commit(rdev, ringC, false);
472 472
473 mdelay(1000); 473 mdelay(1000);
474 474
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 6bf55ec85b62..341848a14376 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -646,7 +646,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
646 ib.ptr[i] = PACKET2(0); 646 ib.ptr[i] = PACKET2(0);
647 ib.length_dw = 16; 647 ib.length_dw = 16;
648 648
649 r = radeon_ib_schedule(rdev, &ib, NULL); 649 r = radeon_ib_schedule(rdev, &ib, NULL, false);
650 if (r) 650 if (r)
651 goto err; 651 goto err;
652 ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); 652 ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index f9b70a43aa52..c7190aadbd89 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -368,7 +368,7 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
368 for (i = ib.length_dw; i < ib_size_dw; ++i) 368 for (i = ib.length_dw; i < ib_size_dw; ++i)
369 ib.ptr[i] = 0x0; 369 ib.ptr[i] = 0x0;
370 370
371 r = radeon_ib_schedule(rdev, &ib, NULL); 371 r = radeon_ib_schedule(rdev, &ib, NULL, false);
372 if (r) { 372 if (r) {
373 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 373 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
374 } 374 }
@@ -425,7 +425,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
425 for (i = ib.length_dw; i < ib_size_dw; ++i) 425 for (i = ib.length_dw; i < ib_size_dw; ++i)
426 ib.ptr[i] = 0x0; 426 ib.ptr[i] = 0x0;
427 427
428 r = radeon_ib_schedule(rdev, &ib, NULL); 428 r = radeon_ib_schedule(rdev, &ib, NULL, false);
429 if (r) { 429 if (r) {
430 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 430 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
431 } 431 }
@@ -715,7 +715,7 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
715 return r; 715 return r;
716 } 716 }
717 radeon_ring_write(ring, VCE_CMD_END); 717 radeon_ring_write(ring, VCE_CMD_END);
718 radeon_ring_unlock_commit(rdev, ring); 718 radeon_ring_unlock_commit(rdev, ring, false);
719 719
720 for (i = 0; i < rdev->usec_timeout; i++) { 720 for (i = 0; i < rdev->usec_timeout; i++) {
721 if (vce_v1_0_get_rptr(rdev, ring) != rptr) 721 if (vce_v1_0_get_rptr(rdev, ring) != rptr)
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index ccae4d9dc3de..088ffdc2f577 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -420,7 +420,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
420 radeon_asic_vm_pad_ib(rdev, &ib); 420 radeon_asic_vm_pad_ib(rdev, &ib);
421 WARN_ON(ib.length_dw > 64); 421 WARN_ON(ib.length_dw > 64);
422 422
423 r = radeon_ib_schedule(rdev, &ib, NULL); 423 r = radeon_ib_schedule(rdev, &ib, NULL, false);
424 if (r) 424 if (r)
425 goto error; 425 goto error;
426 426
@@ -483,6 +483,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
483 /* add a clone of the bo_va to clear the old address */ 483 /* add a clone of the bo_va to clear the old address */
484 struct radeon_bo_va *tmp; 484 struct radeon_bo_va *tmp;
485 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); 485 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
486 if (!tmp) {
487 mutex_unlock(&vm->mutex);
488 return -ENOMEM;
489 }
486 tmp->it.start = bo_va->it.start; 490 tmp->it.start = bo_va->it.start;
487 tmp->it.last = bo_va->it.last; 491 tmp->it.last = bo_va->it.last;
488 tmp->vm = vm; 492 tmp->vm = vm;
@@ -693,7 +697,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
693 radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); 697 radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
694 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); 698 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
695 WARN_ON(ib.length_dw > ndw); 699 WARN_ON(ib.length_dw > ndw);
696 r = radeon_ib_schedule(rdev, &ib, NULL); 700 r = radeon_ib_schedule(rdev, &ib, NULL, false);
697 if (r) { 701 if (r) {
698 radeon_ib_free(rdev, &ib); 702 radeon_ib_free(rdev, &ib);
699 return r; 703 return r;
@@ -957,7 +961,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
957 WARN_ON(ib.length_dw > ndw); 961 WARN_ON(ib.length_dw > ndw);
958 962
959 radeon_semaphore_sync_to(ib.semaphore, vm->fence); 963 radeon_semaphore_sync_to(ib.semaphore, vm->fence);
960 r = radeon_ib_schedule(rdev, &ib, NULL); 964 r = radeon_ib_schedule(rdev, &ib, NULL, false);
961 if (r) { 965 if (r) {
962 radeon_ib_free(rdev, &ib); 966 radeon_ib_free(rdev, &ib);
963 return r; 967 return r;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 6c1fc339d228..c5799f16aa4b 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -221,9 +221,9 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
221 entry = (lower_32_bits(addr) & PAGE_MASK) | 221 entry = (lower_32_bits(addr) & PAGE_MASK) |
222 ((upper_32_bits(addr) & 0xff) << 4); 222 ((upper_32_bits(addr) & 0xff) << 4);
223 if (flags & RADEON_GART_PAGE_READ) 223 if (flags & RADEON_GART_PAGE_READ)
224 addr |= RS400_PTE_READABLE; 224 entry |= RS400_PTE_READABLE;
225 if (flags & RADEON_GART_PAGE_WRITE) 225 if (flags & RADEON_GART_PAGE_WRITE)
226 addr |= RS400_PTE_WRITEABLE; 226 entry |= RS400_PTE_WRITEABLE;
227 if (!(flags & RADEON_GART_PAGE_SNOOP)) 227 if (!(flags & RADEON_GART_PAGE_SNOOP))
228 entry |= RS400_PTE_UNSNOOPED; 228 entry |= RS400_PTE_UNSNOOPED;
229 entry = cpu_to_le32(entry); 229 entry = cpu_to_le32(entry);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 3e21e869015f..8a477bf1fdb3 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -124,7 +124,7 @@ void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
124 radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST); 124 radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
125 radeon_ring_write(ring, PACKET0(0x20C8, 0)); 125 radeon_ring_write(ring, PACKET0(0x20C8, 0));
126 radeon_ring_write(ring, 0); 126 radeon_ring_write(ring, 0);
127 radeon_ring_unlock_commit(rdev, ring); 127 radeon_ring_unlock_commit(rdev, ring, false);
128} 128}
129 129
130int rv515_mc_wait_for_idle(struct radeon_device *rdev) 130int rv515_mc_wait_for_idle(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 2983f17ea1b3..d9f5ce715c9b 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1177,7 +1177,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
1177 u32 hdp_host_path_cntl; 1177 u32 hdp_host_path_cntl;
1178 u32 sq_dyn_gpr_size_simd_ab_0; 1178 u32 sq_dyn_gpr_size_simd_ab_0;
1179 u32 gb_tiling_config = 0; 1179 u32 gb_tiling_config = 0;
1180 u32 cc_rb_backend_disable = 0;
1181 u32 cc_gc_shader_pipe_config = 0; 1180 u32 cc_gc_shader_pipe_config = 0;
1182 u32 mc_arb_ramcfg; 1181 u32 mc_arb_ramcfg;
1183 u32 db_debug4, tmp; 1182 u32 db_debug4, tmp;
@@ -1311,21 +1310,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
1311 WREG32(SPI_CONFIG_CNTL, 0); 1310 WREG32(SPI_CONFIG_CNTL, 0);
1312 } 1311 }
1313 1312
1314 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1315 tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
1316 if (tmp < rdev->config.rv770.max_backends) {
1317 rdev->config.rv770.max_backends = tmp;
1318 }
1319
1320 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; 1313 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1321 tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
1322 if (tmp < rdev->config.rv770.max_pipes) {
1323 rdev->config.rv770.max_pipes = tmp;
1324 }
1325 tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
1326 if (tmp < rdev->config.rv770.max_simds) {
1327 rdev->config.rv770.max_simds = tmp;
1328 }
1329 tmp = rdev->config.rv770.max_simds - 1314 tmp = rdev->config.rv770.max_simds -
1330 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK); 1315 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
1331 rdev->config.rv770.active_simds = tmp; 1316 rdev->config.rv770.active_simds = tmp;
@@ -1348,6 +1333,14 @@ static void rv770_gpu_init(struct radeon_device *rdev)
1348 rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes; 1333 rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
1349 1334
1350 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK; 1335 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
1336 tmp = 0;
1337 for (i = 0; i < rdev->config.rv770.max_backends; i++)
1338 tmp |= (1 << i);
1339 /* if all the backends are disabled, fix it up here */
1340 if ((disabled_rb_mask & tmp) == tmp) {
1341 for (i = 0; i < rdev->config.rv770.max_backends; i++)
1342 disabled_rb_mask &= ~(1 << i);
1343 }
1351 tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; 1344 tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
1352 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends, 1345 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
1353 R7XX_MAX_BACKENDS, disabled_rb_mask); 1346 R7XX_MAX_BACKENDS, disabled_rb_mask);
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index bbf2e076ee45..74426ac2bb5c 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -90,7 +90,7 @@ int rv770_copy_dma(struct radeon_device *rdev,
90 return r; 90 return r;
91 } 91 }
92 92
93 radeon_ring_unlock_commit(rdev, ring); 93 radeon_ring_unlock_commit(rdev, ring, false);
94 radeon_semaphore_free(rdev, &sem, *fence); 94 radeon_semaphore_free(rdev, &sem, *fence);
95 95
96 return r; 96 return r;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 011779bd2b3d..3a0b973e8a96 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3057,7 +3057,7 @@ static void si_gpu_init(struct radeon_device *rdev)
3057 u32 sx_debug_1; 3057 u32 sx_debug_1;
3058 u32 hdp_host_path_cntl; 3058 u32 hdp_host_path_cntl;
3059 u32 tmp; 3059 u32 tmp;
3060 int i, j, k; 3060 int i, j;
3061 3061
3062 switch (rdev->family) { 3062 switch (rdev->family) {
3063 case CHIP_TAHITI: 3063 case CHIP_TAHITI:
@@ -3255,12 +3255,11 @@ static void si_gpu_init(struct radeon_device *rdev)
3255 rdev->config.si.max_sh_per_se, 3255 rdev->config.si.max_sh_per_se,
3256 rdev->config.si.max_cu_per_sh); 3256 rdev->config.si.max_cu_per_sh);
3257 3257
3258 rdev->config.si.active_cus = 0;
3258 for (i = 0; i < rdev->config.si.max_shader_engines; i++) { 3259 for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
3259 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) { 3260 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
3260 for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) { 3261 rdev->config.si.active_cus +=
3261 rdev->config.si.active_cus += 3262 hweight32(si_get_cu_active_bitmap(rdev, i, j));
3262 hweight32(si_get_cu_active_bitmap(rdev, i, j));
3263 }
3264 } 3263 }
3265 } 3264 }
3266 3265
@@ -3541,7 +3540,7 @@ static int si_cp_start(struct radeon_device *rdev)
3541 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); 3540 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3542 radeon_ring_write(ring, 0xc000); 3541 radeon_ring_write(ring, 0xc000);
3543 radeon_ring_write(ring, 0xe000); 3542 radeon_ring_write(ring, 0xe000);
3544 radeon_ring_unlock_commit(rdev, ring); 3543 radeon_ring_unlock_commit(rdev, ring, false);
3545 3544
3546 si_cp_enable(rdev, true); 3545 si_cp_enable(rdev, true);
3547 3546
@@ -3570,7 +3569,7 @@ static int si_cp_start(struct radeon_device *rdev)
3570 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 3569 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
3571 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ 3570 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
3572 3571
3573 radeon_ring_unlock_commit(rdev, ring); 3572 radeon_ring_unlock_commit(rdev, ring, false);
3574 3573
3575 for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) { 3574 for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
3576 ring = &rdev->ring[i]; 3575 ring = &rdev->ring[i];
@@ -3580,7 +3579,7 @@ static int si_cp_start(struct radeon_device *rdev)
3580 radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0)); 3579 radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
3581 radeon_ring_write(ring, 0); 3580 radeon_ring_write(ring, 0);
3582 3581
3583 radeon_ring_unlock_commit(rdev, ring); 3582 radeon_ring_unlock_commit(rdev, ring, false);
3584 } 3583 }
3585 3584
3586 return 0; 3585 return 0;
@@ -4291,10 +4290,10 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
4291 for (i = 1; i < 16; i++) { 4290 for (i = 1; i < 16; i++) {
4292 if (i < 8) 4291 if (i < 8)
4293 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), 4292 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
4294 rdev->gart.table_addr >> 12); 4293 rdev->vm_manager.saved_table_addr[i]);
4295 else 4294 else
4296 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2), 4295 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
4297 rdev->gart.table_addr >> 12); 4296 rdev->vm_manager.saved_table_addr[i]);
4298 } 4297 }
4299 4298
4300 /* enable context1-15 */ 4299 /* enable context1-15 */
@@ -4326,6 +4325,17 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
4326 4325
4327static void si_pcie_gart_disable(struct radeon_device *rdev) 4326static void si_pcie_gart_disable(struct radeon_device *rdev)
4328{ 4327{
4328 unsigned i;
4329
4330 for (i = 1; i < 16; ++i) {
4331 uint32_t reg;
4332 if (i < 8)
4333 reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2);
4334 else
4335 reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
4336 rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
4337 }
4338
4329 /* Disable all tables */ 4339 /* Disable all tables */
4330 WREG32(VM_CONTEXT0_CNTL, 0); 4340 WREG32(VM_CONTEXT0_CNTL, 0);
4331 WREG32(VM_CONTEXT1_CNTL, 0); 4341 WREG32(VM_CONTEXT1_CNTL, 0);
@@ -5028,7 +5038,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
5028 5038
5029 /* flush hdp cache */ 5039 /* flush hdp cache */
5030 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5040 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5031 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5041 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5032 WRITE_DATA_DST_SEL(0))); 5042 WRITE_DATA_DST_SEL(0)));
5033 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); 5043 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
5034 radeon_ring_write(ring, 0); 5044 radeon_ring_write(ring, 0);
@@ -5036,7 +5046,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
5036 5046
5037 /* bits 0-15 are the VM contexts0-15 */ 5047 /* bits 0-15 are the VM contexts0-15 */
5038 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5048 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5039 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5049 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5040 WRITE_DATA_DST_SEL(0))); 5050 WRITE_DATA_DST_SEL(0)));
5041 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); 5051 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
5042 radeon_ring_write(ring, 0); 5052 radeon_ring_write(ring, 0);
@@ -6306,17 +6316,17 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
6306 wptr = RREG32(IH_RB_WPTR); 6316 wptr = RREG32(IH_RB_WPTR);
6307 6317
6308 if (wptr & RB_OVERFLOW) { 6318 if (wptr & RB_OVERFLOW) {
6319 wptr &= ~RB_OVERFLOW;
6309 /* When a ring buffer overflow happen start parsing interrupt 6320 /* When a ring buffer overflow happen start parsing interrupt
6310 * from the last not overwritten vector (wptr + 16). Hopefully 6321 * from the last not overwritten vector (wptr + 16). Hopefully
6311 * this should allow us to catchup. 6322 * this should allow us to catchup.
6312 */ 6323 */
6313 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", 6324 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
6314 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); 6325 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
6315 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; 6326 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
6316 tmp = RREG32(IH_RB_CNTL); 6327 tmp = RREG32(IH_RB_CNTL);
6317 tmp |= IH_WPTR_OVERFLOW_CLEAR; 6328 tmp |= IH_WPTR_OVERFLOW_CLEAR;
6318 WREG32(IH_RB_CNTL, tmp); 6329 WREG32(IH_RB_CNTL, tmp);
6319 wptr &= ~RB_OVERFLOW;
6320 } 6330 }
6321 return (wptr & rdev->ih.ptr_mask); 6331 return (wptr & rdev->ih.ptr_mask);
6322} 6332}
@@ -6654,13 +6664,13 @@ restart_ih:
6654 /* wptr/rptr are in bytes! */ 6664 /* wptr/rptr are in bytes! */
6655 rptr += 16; 6665 rptr += 16;
6656 rptr &= rdev->ih.ptr_mask; 6666 rptr &= rdev->ih.ptr_mask;
6667 WREG32(IH_RB_RPTR, rptr);
6657 } 6668 }
6658 if (queue_hotplug) 6669 if (queue_hotplug)
6659 schedule_work(&rdev->hotplug_work); 6670 schedule_work(&rdev->hotplug_work);
6660 if (queue_thermal && rdev->pm.dpm_enabled) 6671 if (queue_thermal && rdev->pm.dpm_enabled)
6661 schedule_work(&rdev->pm.dpm.thermal.work); 6672 schedule_work(&rdev->pm.dpm.thermal.work);
6662 rdev->ih.rptr = rptr; 6673 rdev->ih.rptr = rptr;
6663 WREG32(IH_RB_RPTR, rdev->ih.rptr);
6664 atomic_set(&rdev->ih.lock, 0); 6674 atomic_set(&rdev->ih.lock, 0);
6665 6675
6666 /* make sure wptr hasn't changed while processing */ 6676 /* make sure wptr hasn't changed while processing */
@@ -7178,6 +7188,9 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
7178 int ret, i; 7188 int ret, i;
7179 u16 tmp16; 7189 u16 tmp16;
7180 7190
7191 if (pci_is_root_bus(rdev->pdev->bus))
7192 return;
7193
7181 if (radeon_pcie_gen2 == 0) 7194 if (radeon_pcie_gen2 == 0)
7182 return; 7195 return;
7183 7196
@@ -7455,7 +7468,8 @@ static void si_program_aspm(struct radeon_device *rdev)
7455 if (orig != data) 7468 if (orig != data)
7456 WREG32_PIF_PHY1(PB1_PIF_CNTL, data); 7469 WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
7457 7470
7458 if (!disable_clkreq) { 7471 if (!disable_clkreq &&
7472 !pci_is_root_bus(rdev->pdev->bus)) {
7459 struct pci_dev *root = rdev->pdev->bus->self; 7473 struct pci_dev *root = rdev->pdev->bus->self;
7460 u32 lnkcap; 7474 u32 lnkcap;
7461 7475
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 716505129450..7c22baaf94db 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -275,7 +275,7 @@ int si_copy_dma(struct radeon_device *rdev,
275 return r; 275 return r;
276 } 276 }
277 277
278 radeon_ring_unlock_commit(rdev, ring); 278 radeon_ring_unlock_commit(rdev, ring, false);
279 radeon_semaphore_free(rdev, &sem, *fence); 279 radeon_semaphore_free(rdev, &sem, *fence);
280 280
281 return r; 281 return r;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 32e50be9c4ac..57f780053b3e 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1874,16 +1874,22 @@ int trinity_dpm_init(struct radeon_device *rdev)
1874 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 1874 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
1875 pi->at[i] = TRINITY_AT_DFLT; 1875 pi->at[i] = TRINITY_AT_DFLT;
1876 1876
1877 /* There are stability issues reported on with 1877 if (radeon_bapm == -1) {
1878 * bapm enabled when switching between AC and battery 1878 /* There are stability issues reported on with
1879 * power. At the same time, some MSI boards hang 1879 * bapm enabled when switching between AC and battery
1880 * if it's not enabled and dpm is enabled. Just enable 1880 * power. At the same time, some MSI boards hang
1881 * it for MSI boards right now. 1881 * if it's not enabled and dpm is enabled. Just enable
1882 */ 1882 * it for MSI boards right now.
1883 if (rdev->pdev->subsystem_vendor == 0x1462) 1883 */
1884 pi->enable_bapm = true; 1884 if (rdev->pdev->subsystem_vendor == 0x1462)
1885 else 1885 pi->enable_bapm = true;
1886 else
1887 pi->enable_bapm = false;
1888 } else if (radeon_bapm == 0) {
1886 pi->enable_bapm = false; 1889 pi->enable_bapm = false;
1890 } else {
1891 pi->enable_bapm = true;
1892 }
1887 pi->enable_nbps_policy = true; 1893 pi->enable_nbps_policy = true;
1888 pi->enable_sclk_ds = true; 1894 pi->enable_sclk_ds = true;
1889 pi->enable_gfx_power_gating = true; 1895 pi->enable_gfx_power_gating = true;
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index be42c8125203..cda391347286 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -124,7 +124,7 @@ int uvd_v1_0_init(struct radeon_device *rdev)
124 radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0)); 124 radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
125 radeon_ring_write(ring, 3); 125 radeon_ring_write(ring, 3);
126 126
127 radeon_ring_unlock_commit(rdev, ring); 127 radeon_ring_unlock_commit(rdev, ring, false);
128 128
129done: 129done:
130 /* lower clocks again */ 130 /* lower clocks again */
@@ -331,7 +331,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
331 } 331 }
332 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); 332 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
333 radeon_ring_write(ring, 0xDEADBEEF); 333 radeon_ring_write(ring, 0xDEADBEEF);
334 radeon_ring_unlock_commit(rdev, ring); 334 radeon_ring_unlock_commit(rdev, ring, false);
335 for (i = 0; i < rdev->usec_timeout; i++) { 335 for (i = 0; i < rdev->usec_timeout; i++) {
336 tmp = RREG32(UVD_CONTEXT_ID); 336 tmp = RREG32(UVD_CONTEXT_ID);
337 if (tmp == 0xDEADBEEF) 337 if (tmp == 0xDEADBEEF)
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index 2d9d4252d598..ae8850f3e63b 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -1,6 +1,7 @@
1config DRM_STI 1config DRM_STI
2 tristate "DRM Support for STMicroelectronics SoC stiH41x Series" 2 tristate "DRM Support for STMicroelectronics SoC stiH41x Series"
3 depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM) 3 depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM)
4 select RESET_CONTROLLER
4 select DRM_KMS_HELPER 5 select DRM_KMS_HELPER
5 select DRM_GEM_CMA_HELPER 6 select DRM_GEM_CMA_HELPER
6 select DRM_KMS_CMA_HELPER 7 select DRM_KMS_CMA_HELPER
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drm_drv.c
index a7cc24917a96..223d93c3a05d 100644
--- a/drivers/gpu/drm/sti/sti_drm_drv.c
+++ b/drivers/gpu/drm/sti/sti_drm_drv.c
@@ -201,8 +201,8 @@ static int sti_drm_platform_probe(struct platform_device *pdev)
201 master = platform_device_register_resndata(dev, 201 master = platform_device_register_resndata(dev,
202 DRIVER_NAME "__master", -1, 202 DRIVER_NAME "__master", -1,
203 NULL, 0, NULL, 0); 203 NULL, 0, NULL, 0);
204 if (!master) 204 if (IS_ERR(master))
205 return -EINVAL; 205 return PTR_ERR(master);
206 206
207 platform_set_drvdata(pdev, master); 207 platform_set_drvdata(pdev, master);
208 return 0; 208 return 0;
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 72d957f81c05..2ae9a9b73666 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -730,16 +730,16 @@ static int sti_hda_probe(struct platform_device *pdev)
730 return -ENOMEM; 730 return -ENOMEM;
731 } 731 }
732 hda->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); 732 hda->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
733 if (IS_ERR(hda->regs)) 733 if (!hda->regs)
734 return PTR_ERR(hda->regs); 734 return -ENOMEM;
735 735
736 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 736 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
737 "video-dacs-ctrl"); 737 "video-dacs-ctrl");
738 if (res) { 738 if (res) {
739 hda->video_dacs_ctrl = devm_ioremap_nocache(dev, res->start, 739 hda->video_dacs_ctrl = devm_ioremap_nocache(dev, res->start,
740 resource_size(res)); 740 resource_size(res));
741 if (IS_ERR(hda->video_dacs_ctrl)) 741 if (!hda->video_dacs_ctrl)
742 return PTR_ERR(hda->video_dacs_ctrl); 742 return -ENOMEM;
743 } else { 743 } else {
744 /* If no existing video-dacs-ctrl resource continue the probe */ 744 /* If no existing video-dacs-ctrl resource continue the probe */
745 DRM_DEBUG_DRIVER("No video-dacs-ctrl resource\n"); 745 DRM_DEBUG_DRIVER("No video-dacs-ctrl resource\n");
@@ -770,7 +770,7 @@ static int sti_hda_remove(struct platform_device *pdev)
770 return 0; 770 return 0;
771} 771}
772 772
773static struct of_device_id hda_of_match[] = { 773static const struct of_device_id hda_of_match[] = {
774 { .compatible = "st,stih416-hda", }, 774 { .compatible = "st,stih416-hda", },
775 { .compatible = "st,stih407-hda", }, 775 { .compatible = "st,stih407-hda", },
776 { /* end node */ } 776 { /* end node */ }
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 284e541d970d..b22968c08d1f 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -298,7 +298,6 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
298 hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD2(HDMI_IFRAME_SLOT_AVI)); 298 hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD2(HDMI_IFRAME_SLOT_AVI));
299 299
300 val = frame[0xC]; 300 val = frame[0xC];
301 val |= frame[0xD] << 8;
302 hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD3(HDMI_IFRAME_SLOT_AVI)); 301 hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD3(HDMI_IFRAME_SLOT_AVI));
303 302
304 /* Enable transmission slot for AVI infoframe 303 /* Enable transmission slot for AVI infoframe
@@ -677,7 +676,7 @@ static const struct component_ops sti_hdmi_ops = {
677 .unbind = sti_hdmi_unbind, 676 .unbind = sti_hdmi_unbind,
678}; 677};
679 678
680static struct of_device_id hdmi_of_match[] = { 679static const struct of_device_id hdmi_of_match[] = {
681 { 680 {
682 .compatible = "st,stih416-hdmi", 681 .compatible = "st,stih416-hdmi",
683 .data = &tx3g0c55phy_ops, 682 .data = &tx3g0c55phy_ops,
@@ -713,8 +712,8 @@ static int sti_hdmi_probe(struct platform_device *pdev)
713 return -ENOMEM; 712 return -ENOMEM;
714 } 713 }
715 hdmi->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); 714 hdmi->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
716 if (IS_ERR(hdmi->regs)) 715 if (!hdmi->regs)
717 return PTR_ERR(hdmi->regs); 716 return -ENOMEM;
718 717
719 if (of_device_is_compatible(np, "st,stih416-hdmi")) { 718 if (of_device_is_compatible(np, "st,stih416-hdmi")) {
720 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 719 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -725,8 +724,8 @@ static int sti_hdmi_probe(struct platform_device *pdev)
725 } 724 }
726 hdmi->syscfg = devm_ioremap_nocache(dev, res->start, 725 hdmi->syscfg = devm_ioremap_nocache(dev, res->start,
727 resource_size(res)); 726 resource_size(res));
728 if (IS_ERR(hdmi->syscfg)) 727 if (!hdmi->syscfg)
729 return PTR_ERR(hdmi->syscfg); 728 return -ENOMEM;
730 729
731 } 730 }
732 731
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index b69e26fee76e..b8afe490356a 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -591,8 +591,8 @@ static int sti_tvout_probe(struct platform_device *pdev)
591 return -ENOMEM; 591 return -ENOMEM;
592 } 592 }
593 tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); 593 tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
594 if (IS_ERR(tvout->regs)) 594 if (!tvout->regs)
595 return PTR_ERR(tvout->regs); 595 return -ENOMEM;
596 596
597 /* get reset resources */ 597 /* get reset resources */
598 tvout->reset = devm_reset_control_get(dev, "tvout"); 598 tvout->reset = devm_reset_control_get(dev, "tvout");
@@ -624,7 +624,7 @@ static int sti_tvout_remove(struct platform_device *pdev)
624 return 0; 624 return 0;
625} 625}
626 626
627static struct of_device_id tvout_of_match[] = { 627static const struct of_device_id tvout_of_match[] = {
628 { .compatible = "st,stih416-tvout", }, 628 { .compatible = "st,stih416-tvout", },
629 { .compatible = "st,stih407-tvout", }, 629 { .compatible = "st,stih407-tvout", },
630 { /* end node */ } 630 { /* end node */ }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 7bfdaa163a33..36b871686d3c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -450,11 +450,11 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
450 res, 450 res,
451 id_loc - sw_context->buf_start); 451 id_loc - sw_context->buf_start);
452 if (unlikely(ret != 0)) 452 if (unlikely(ret != 0))
453 goto out_err; 453 return ret;
454 454
455 ret = vmw_resource_val_add(sw_context, res, &node); 455 ret = vmw_resource_val_add(sw_context, res, &node);
456 if (unlikely(ret != 0)) 456 if (unlikely(ret != 0))
457 goto out_err; 457 return ret;
458 458
459 if (res_type == vmw_res_context && dev_priv->has_mob && 459 if (res_type == vmw_res_context && dev_priv->has_mob &&
460 node->first_usage) { 460 node->first_usage) {
@@ -468,13 +468,13 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
468 468
469 ret = vmw_resource_context_res_add(dev_priv, sw_context, res); 469 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
470 if (unlikely(ret != 0)) 470 if (unlikely(ret != 0))
471 goto out_err; 471 return ret;
472 node->staged_bindings = 472 node->staged_bindings =
473 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); 473 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
474 if (node->staged_bindings == NULL) { 474 if (node->staged_bindings == NULL) {
475 DRM_ERROR("Failed to allocate context binding " 475 DRM_ERROR("Failed to allocate context binding "
476 "information.\n"); 476 "information.\n");
477 goto out_err; 477 return -ENOMEM;
478 } 478 }
479 INIT_LIST_HEAD(&node->staged_bindings->list); 479 INIT_LIST_HEAD(&node->staged_bindings->list);
480 } 480 }
@@ -482,8 +482,7 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
482 if (p_val) 482 if (p_val)
483 *p_val = node; 483 *p_val = node;
484 484
485out_err: 485 return 0;
486 return ret;
487} 486}
488 487
489 488
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 6ccd993e26bf..6eae14d2a3f7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -180,8 +180,9 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
180 180
181 mutex_lock(&dev_priv->hw_mutex); 181 mutex_lock(&dev_priv->hw_mutex);
182 182
183 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
183 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) 184 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
184 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); 185 ;
185 186
186 dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 187 dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
187 188
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 6866448083b2..37ac7b5dbd06 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -660,6 +660,12 @@ int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *
660} 660}
661EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops); 661EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops);
662 662
663void vga_switcheroo_fini_domain_pm_ops(struct device *dev)
664{
665 dev->pm_domain = NULL;
666}
667EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops);
668
663static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev) 669static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
664{ 670{
665 struct pci_dev *pdev = to_pci_dev(dev); 671 struct pci_dev *pdev = to_pci_dev(dev);
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index d2077f040f3e..77711623b973 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -41,6 +41,7 @@
41#include <linux/poll.h> 41#include <linux/poll.h>
42#include <linux/miscdevice.h> 42#include <linux/miscdevice.h>
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <linux/screen_info.h>
44 45
45#include <linux/uaccess.h> 46#include <linux/uaccess.h>
46 47
@@ -112,10 +113,8 @@ both:
112 return 1; 113 return 1;
113} 114}
114 115
115#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
116/* this is only used a cookie - it should not be dereferenced */ 116/* this is only used a cookie - it should not be dereferenced */
117static struct pci_dev *vga_default; 117static struct pci_dev *vga_default;
118#endif
119 118
120static void vga_arb_device_card_gone(struct pci_dev *pdev); 119static void vga_arb_device_card_gone(struct pci_dev *pdev);
121 120
@@ -131,7 +130,6 @@ static struct vga_device *vgadev_find(struct pci_dev *pdev)
131} 130}
132 131
133/* Returns the default VGA device (vgacon's babe) */ 132/* Returns the default VGA device (vgacon's babe) */
134#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
135struct pci_dev *vga_default_device(void) 133struct pci_dev *vga_default_device(void)
136{ 134{
137 return vga_default; 135 return vga_default;
@@ -147,7 +145,6 @@ void vga_set_default_device(struct pci_dev *pdev)
147 pci_dev_put(vga_default); 145 pci_dev_put(vga_default);
148 vga_default = pci_dev_get(pdev); 146 vga_default = pci_dev_get(pdev);
149} 147}
150#endif
151 148
152static inline void vga_irq_set_state(struct vga_device *vgadev, bool state) 149static inline void vga_irq_set_state(struct vga_device *vgadev, bool state)
153{ 150{
@@ -583,11 +580,12 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
583 /* Deal with VGA default device. Use first enabled one 580 /* Deal with VGA default device. Use first enabled one
584 * by default if arch doesn't have it's own hook 581 * by default if arch doesn't have it's own hook
585 */ 582 */
586#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
587 if (vga_default == NULL && 583 if (vga_default == NULL &&
588 ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) 584 ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) {
585 pr_info("vgaarb: setting as boot device: PCI:%s\n",
586 pci_name(pdev));
589 vga_set_default_device(pdev); 587 vga_set_default_device(pdev);
590#endif 588 }
591 589
592 vga_arbiter_check_bridge_sharing(vgadev); 590 vga_arbiter_check_bridge_sharing(vgadev);
593 591
@@ -621,10 +619,8 @@ static bool vga_arbiter_del_pci_device(struct pci_dev *pdev)
621 goto bail; 619 goto bail;
622 } 620 }
623 621
624#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
625 if (vga_default == pdev) 622 if (vga_default == pdev)
626 vga_set_default_device(NULL); 623 vga_set_default_device(NULL);
627#endif
628 624
629 if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)) 625 if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
630 vga_decode_count--; 626 vga_decode_count--;
@@ -1320,6 +1316,38 @@ static int __init vga_arb_device_init(void)
1320 pr_info("vgaarb: loaded\n"); 1316 pr_info("vgaarb: loaded\n");
1321 1317
1322 list_for_each_entry(vgadev, &vga_list, list) { 1318 list_for_each_entry(vgadev, &vga_list, list) {
1319#if defined(CONFIG_X86) || defined(CONFIG_IA64)
1320 /* Override I/O based detection done by vga_arbiter_add_pci_device()
1321 * as it may take the wrong device (e.g. on Apple system under EFI).
1322 *
1323 * Select the device owning the boot framebuffer if there is one.
1324 */
1325 resource_size_t start, end;
1326 int i;
1327
1328 /* Does firmware framebuffer belong to us? */
1329 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1330 if (!(pci_resource_flags(vgadev->pdev, i) & IORESOURCE_MEM))
1331 continue;
1332
1333 start = pci_resource_start(vgadev->pdev, i);
1334 end = pci_resource_end(vgadev->pdev, i);
1335
1336 if (!start || !end)
1337 continue;
1338
1339 if (screen_info.lfb_base < start ||
1340 (screen_info.lfb_base + screen_info.lfb_size) >= end)
1341 continue;
1342 if (!vga_default_device())
1343 pr_info("vgaarb: setting as boot device: PCI:%s\n",
1344 pci_name(vgadev->pdev));
1345 else if (vgadev->pdev != vga_default_device())
1346 pr_info("vgaarb: overriding boot device: PCI:%s\n",
1347 pci_name(vgadev->pdev));
1348 vga_set_default_device(vgadev->pdev);
1349 }
1350#endif
1323 if (vgadev->bridge_has_one_vga) 1351 if (vgadev->bridge_has_one_vga)
1324 pr_info("vgaarb: bridge control possible %s\n", pci_name(vgadev->pdev)); 1352 pr_info("vgaarb: bridge control possible %s\n", pci_name(vgadev->pdev));
1325 else 1353 else