aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_crtc.c10
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c19
-rw-r--r--drivers/gpu/drm/drm_edid.c26
-rw-r--r--drivers/gpu/drm/drm_fops.c1
-rw-r--r--drivers/gpu/drm/drm_irq.c21
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c26
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c720
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h11
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c44
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c34
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c159
-rw-r--r--drivers/gpu/drm/i915/intel_display.c186
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c210
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c11
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c133
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c158
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h8
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c94
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c77
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h55
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c49
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.c71
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_calc.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c35
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c52
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c1
-rw-r--r--drivers/gpu/drm/radeon/atom.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c66
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h1
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c41
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c318
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/r600d.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c43
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c68
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c382
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h22
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c5
-rw-r--r--drivers/gpu/drm/radeon/rs400.c2
-rw-r--r--drivers/gpu/drm/radeon/rs600.c4
-rw-r--r--drivers/gpu/drm/radeon/rv770.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c97
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c81
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c14
-rw-r--r--drivers/gpu/stub/Kconfig3
101 files changed, 2539 insertions, 1353 deletions
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 6985cb1da72c..2baa6708e44c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -156,12 +156,12 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
156 { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 }, 156 { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
157 { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 }, 157 { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
158 { DRM_MODE_CONNECTOR_Component, "Component", 0 }, 158 { DRM_MODE_CONNECTOR_Component, "Component", 0 },
159 { DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN", 0 }, 159 { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
160 { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 }, 160 { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
161 { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 }, 161 { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
162 { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 }, 162 { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
163 { DRM_MODE_CONNECTOR_TV, "TV", 0 }, 163 { DRM_MODE_CONNECTOR_TV, "TV", 0 },
164 { DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 }, 164 { DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
165}; 165};
166 166
167static struct drm_prop_enum_list drm_encoder_enum_list[] = 167static struct drm_prop_enum_list drm_encoder_enum_list[] =
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index dcbeb98f195a..2d4e17a004db 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
276 struct drm_crtc *tmp; 276 struct drm_crtc *tmp;
277 int crtc_mask = 1; 277 int crtc_mask = 1;
278 278
279 WARN(!crtc, "checking null crtc?"); 279 WARN(!crtc, "checking null crtc?\n");
280 280
281 dev = crtc->dev; 281 dev = crtc->dev;
282 282
@@ -471,6 +471,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
471 int count = 0, ro, fail = 0; 471 int count = 0, ro, fail = 0;
472 struct drm_crtc_helper_funcs *crtc_funcs; 472 struct drm_crtc_helper_funcs *crtc_funcs;
473 int ret = 0; 473 int ret = 0;
474 int i;
474 475
475 DRM_DEBUG_KMS("\n"); 476 DRM_DEBUG_KMS("\n");
476 477
@@ -666,6 +667,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
666 if (ret != 0) 667 if (ret != 0)
667 goto fail; 668 goto fail;
668 } 669 }
670 DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
671 for (i = 0; i < set->num_connectors; i++) {
672 DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
673 drm_get_connector_name(set->connectors[i]));
674 set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
675 }
669 676
670 kfree(save_connectors); 677 kfree(save_connectors);
671 kfree(save_encoders); 678 kfree(save_encoders);
@@ -841,7 +848,7 @@ static void output_poll_execute(struct work_struct *work)
841 struct delayed_work *delayed_work = to_delayed_work(work); 848 struct delayed_work *delayed_work = to_delayed_work(work);
842 struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work); 849 struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
843 struct drm_connector *connector; 850 struct drm_connector *connector;
844 enum drm_connector_status old_status, status; 851 enum drm_connector_status old_status;
845 bool repoll = false, changed = false; 852 bool repoll = false, changed = false;
846 853
847 if (!drm_kms_helper_poll) 854 if (!drm_kms_helper_poll)
@@ -866,8 +873,12 @@ static void output_poll_execute(struct work_struct *work)
866 !(connector->polled & DRM_CONNECTOR_POLL_HPD)) 873 !(connector->polled & DRM_CONNECTOR_POLL_HPD))
867 continue; 874 continue;
868 875
869 status = connector->funcs->detect(connector, false); 876 connector->status = connector->funcs->detect(connector, false);
870 if (old_status != status) 877 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
878 connector->base.id,
879 drm_get_connector_name(connector),
880 old_status, connector->status);
881 if (old_status != connector->status)
871 changed = true; 882 changed = true;
872 } 883 }
873 884
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c1a26217a530..a245d17165ae 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -240,7 +240,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
240 .addr = DDC_ADDR, 240 .addr = DDC_ADDR,
241 .flags = I2C_M_RD, 241 .flags = I2C_M_RD,
242 .len = len, 242 .len = len,
243 .buf = buf + start, 243 .buf = buf,
244 } 244 }
245 }; 245 };
246 246
@@ -253,7 +253,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
253static u8 * 253static u8 *
254drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 254drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
255{ 255{
256 int i, j = 0; 256 int i, j = 0, valid_extensions = 0;
257 u8 *block, *new; 257 u8 *block, *new;
258 258
259 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) 259 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
@@ -280,14 +280,28 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
280 280
281 for (j = 1; j <= block[0x7e]; j++) { 281 for (j = 1; j <= block[0x7e]; j++) {
282 for (i = 0; i < 4; i++) { 282 for (i = 0; i < 4; i++) {
283 if (drm_do_probe_ddc_edid(adapter, block, j, 283 if (drm_do_probe_ddc_edid(adapter,
284 EDID_LENGTH)) 284 block + (valid_extensions + 1) * EDID_LENGTH,
285 j, EDID_LENGTH))
285 goto out; 286 goto out;
286 if (drm_edid_block_valid(block + j * EDID_LENGTH)) 287 if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
288 valid_extensions++;
287 break; 289 break;
290 }
288 } 291 }
289 if (i == 4) 292 if (i == 4)
290 goto carp; 293 dev_warn(connector->dev->dev,
294 "%s: Ignoring invalid EDID block %d.\n",
295 drm_get_connector_name(connector), j);
296 }
297
298 if (valid_extensions != block[0x7e]) {
299 block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
300 block[0x7e] = valid_extensions;
301 new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
302 if (!new)
303 goto out;
304 block = new;
291 } 305 }
292 306
293 return block; 307 return block;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index b744dad5c237..a39794bac04b 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -37,7 +37,6 @@
37#include "drmP.h" 37#include "drmP.h"
38#include <linux/poll.h> 38#include <linux/poll.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/smp_lock.h>
41 40
42/* from BKL pushdown: note that nothing else serializes idr_find() */ 41/* from BKL pushdown: note that nothing else serializes idr_find() */
43DEFINE_MUTEX(drm_global_mutex); 42DEFINE_MUTEX(drm_global_mutex);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 9d3a5030b6e1..16d5155edad1 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -585,10 +585,13 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
585 struct timeval now; 585 struct timeval now;
586 unsigned long flags; 586 unsigned long flags;
587 unsigned int seq; 587 unsigned int seq;
588 int ret;
588 589
589 e = kzalloc(sizeof *e, GFP_KERNEL); 590 e = kzalloc(sizeof *e, GFP_KERNEL);
590 if (e == NULL) 591 if (e == NULL) {
591 return -ENOMEM; 592 ret = -ENOMEM;
593 goto err_put;
594 }
592 595
593 e->pipe = pipe; 596 e->pipe = pipe;
594 e->base.pid = current->pid; 597 e->base.pid = current->pid;
@@ -603,9 +606,8 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
603 spin_lock_irqsave(&dev->event_lock, flags); 606 spin_lock_irqsave(&dev->event_lock, flags);
604 607
605 if (file_priv->event_space < sizeof e->event) { 608 if (file_priv->event_space < sizeof e->event) {
606 spin_unlock_irqrestore(&dev->event_lock, flags); 609 ret = -EBUSY;
607 kfree(e); 610 goto err_unlock;
608 return -ENOMEM;
609 } 611 }
610 612
611 file_priv->event_space -= sizeof e->event; 613 file_priv->event_space -= sizeof e->event;
@@ -626,7 +628,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
626 if ((seq - vblwait->request.sequence) <= (1 << 23)) { 628 if ((seq - vblwait->request.sequence) <= (1 << 23)) {
627 e->event.tv_sec = now.tv_sec; 629 e->event.tv_sec = now.tv_sec;
628 e->event.tv_usec = now.tv_usec; 630 e->event.tv_usec = now.tv_usec;
629 drm_vblank_put(dev, e->pipe); 631 drm_vblank_put(dev, pipe);
630 list_add_tail(&e->base.link, &e->base.file_priv->event_list); 632 list_add_tail(&e->base.link, &e->base.file_priv->event_list);
631 wake_up_interruptible(&e->base.file_priv->event_wait); 633 wake_up_interruptible(&e->base.file_priv->event_wait);
632 trace_drm_vblank_event_delivered(current->pid, pipe, 634 trace_drm_vblank_event_delivered(current->pid, pipe,
@@ -638,6 +640,13 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
638 spin_unlock_irqrestore(&dev->event_lock, flags); 640 spin_unlock_irqrestore(&dev->event_lock, flags);
639 641
640 return 0; 642 return 0;
643
644err_unlock:
645 spin_unlock_irqrestore(&dev->event_lock, flags);
646 kfree(e);
647err_put:
648 drm_vblank_put(dev, pipe);
649 return ret;
641} 650}
642 651
643/** 652/**
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index af70337567ce..d3e8c540f778 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -242,7 +242,7 @@ fail:
242 242
243static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo) 243static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo)
244{ 244{
245 return connector_status_unknown; 245 return connector_status_connected;
246} 246}
247 247
248static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo, 248static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo,
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 7a26f4dd21ae..cb900dc83d95 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -34,6 +34,7 @@
34#include "i915_drm.h" 34#include "i915_drm.h"
35#include "i915_drv.h" 35#include "i915_drv.h"
36#include "i915_trace.h" 36#include "i915_trace.h"
37#include "../../../platform/x86/intel_ips.h"
37#include <linux/pci.h> 38#include <linux/pci.h>
38#include <linux/vgaarb.h> 39#include <linux/vgaarb.h>
39#include <linux/acpi.h> 40#include <linux/acpi.h>
@@ -767,6 +768,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
767 case I915_PARAM_HAS_BLT: 768 case I915_PARAM_HAS_BLT:
768 value = HAS_BLT(dev); 769 value = HAS_BLT(dev);
769 break; 770 break;
771 case I915_PARAM_HAS_COHERENT_RINGS:
772 value = 1;
773 break;
770 default: 774 default:
771 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 775 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
772 param->param); 776 param->param);
@@ -1868,6 +1872,26 @@ out_unlock:
1868EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); 1872EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
1869 1873
1870/** 1874/**
1875 * Tells the intel_ips driver that the i915 driver is now loaded, if
1876 * IPS got loaded first.
1877 *
1878 * This awkward dance is so that neither module has to depend on the
1879 * other in order for IPS to do the appropriate communication of
1880 * GPU turbo limits to i915.
1881 */
1882static void
1883ips_ping_for_i915_load(void)
1884{
1885 void (*link)(void);
1886
1887 link = symbol_get(ips_link_to_i915_driver);
1888 if (link) {
1889 link();
1890 symbol_put(ips_link_to_i915_driver);
1891 }
1892}
1893
1894/**
1871 * i915_driver_load - setup chip and create an initial config 1895 * i915_driver_load - setup chip and create an initial config
1872 * @dev: DRM device 1896 * @dev: DRM device
1873 * @flags: startup flags 1897 * @flags: startup flags
@@ -2072,6 +2096,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2072 dev_priv->mchdev_lock = &mchdev_lock; 2096 dev_priv->mchdev_lock = &mchdev_lock;
2073 spin_unlock(&mchdev_lock); 2097 spin_unlock(&mchdev_lock);
2074 2098
2099 ips_ping_for_i915_load();
2100
2075 return 0; 2101 return 0;
2076 2102
2077out_workqueue_free: 2103out_workqueue_free:
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3467dd420760..f737960712e6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -44,7 +44,7 @@ unsigned int i915_fbpercrtc = 0;
44module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); 44module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
45 45
46unsigned int i915_powersave = 1; 46unsigned int i915_powersave = 1;
47module_param_named(powersave, i915_powersave, int, 0400); 47module_param_named(powersave, i915_powersave, int, 0600);
48 48
49unsigned int i915_lvds_downclock = 0; 49unsigned int i915_lvds_downclock = 0;
50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
@@ -150,7 +150,8 @@ static const struct intel_device_info intel_ironlake_d_info = {
150 150
151static const struct intel_device_info intel_ironlake_m_info = { 151static const struct intel_device_info intel_ironlake_m_info = {
152 .gen = 5, .is_mobile = 1, 152 .gen = 5, .is_mobile = 1,
153 .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, 153 .need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1,
154 .has_fbc = 0, /* disabled due to buggy hardware */
154 .has_bsd_ring = 1, 155 .has_bsd_ring = 1,
155}; 156};
156 157
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2c2c19b6285e..409826da3099 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1045,6 +1045,8 @@ void i915_gem_clflush_object(struct drm_gem_object *obj);
1045int i915_gem_object_set_domain(struct drm_gem_object *obj, 1045int i915_gem_object_set_domain(struct drm_gem_object *obj,
1046 uint32_t read_domains, 1046 uint32_t read_domains,
1047 uint32_t write_domain); 1047 uint32_t write_domain);
1048int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
1049 bool interruptible);
1048int i915_gem_init_ringbuffer(struct drm_device *dev); 1050int i915_gem_init_ringbuffer(struct drm_device *dev);
1049void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1051void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1050int i915_gem_do_init(struct drm_device *dev, unsigned long start, 1052int i915_gem_do_init(struct drm_device *dev, unsigned long start,
@@ -1321,6 +1323,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
1321 1323
1322#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 1324#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1323#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1325#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1326#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1324 1327
1325#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1328#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1326 1329
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8eb8453208b5..275ec6ed43ae 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -38,8 +38,7 @@
38 38
39static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); 39static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
40 40
41static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, 41static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
42 bool pipelined);
43static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 42static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
44static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); 43static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
45static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, 44static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
@@ -547,6 +546,19 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
547 struct drm_i915_gem_object *obj_priv; 546 struct drm_i915_gem_object *obj_priv;
548 int ret = 0; 547 int ret = 0;
549 548
549 if (args->size == 0)
550 return 0;
551
552 if (!access_ok(VERIFY_WRITE,
553 (char __user *)(uintptr_t)args->data_ptr,
554 args->size))
555 return -EFAULT;
556
557 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
558 args->size);
559 if (ret)
560 return -EFAULT;
561
550 ret = i915_mutex_lock_interruptible(dev); 562 ret = i915_mutex_lock_interruptible(dev);
551 if (ret) 563 if (ret)
552 return ret; 564 return ret;
@@ -564,23 +576,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
564 goto out; 576 goto out;
565 } 577 }
566 578
567 if (args->size == 0)
568 goto out;
569
570 if (!access_ok(VERIFY_WRITE,
571 (char __user *)(uintptr_t)args->data_ptr,
572 args->size)) {
573 ret = -EFAULT;
574 goto out;
575 }
576
577 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
578 args->size);
579 if (ret) {
580 ret = -EFAULT;
581 goto out;
582 }
583
584 ret = i915_gem_object_get_pages_or_evict(obj); 579 ret = i915_gem_object_get_pages_or_evict(obj);
585 if (ret) 580 if (ret)
586 goto out; 581 goto out;
@@ -981,7 +976,20 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
981 struct drm_i915_gem_pwrite *args = data; 976 struct drm_i915_gem_pwrite *args = data;
982 struct drm_gem_object *obj; 977 struct drm_gem_object *obj;
983 struct drm_i915_gem_object *obj_priv; 978 struct drm_i915_gem_object *obj_priv;
984 int ret = 0; 979 int ret;
980
981 if (args->size == 0)
982 return 0;
983
984 if (!access_ok(VERIFY_READ,
985 (char __user *)(uintptr_t)args->data_ptr,
986 args->size))
987 return -EFAULT;
988
989 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
990 args->size);
991 if (ret)
992 return -EFAULT;
985 993
986 ret = i915_mutex_lock_interruptible(dev); 994 ret = i915_mutex_lock_interruptible(dev);
987 if (ret) 995 if (ret)
@@ -994,30 +1002,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
994 } 1002 }
995 obj_priv = to_intel_bo(obj); 1003 obj_priv = to_intel_bo(obj);
996 1004
997
998 /* Bounds check destination. */ 1005 /* Bounds check destination. */
999 if (args->offset > obj->size || args->size > obj->size - args->offset) { 1006 if (args->offset > obj->size || args->size > obj->size - args->offset) {
1000 ret = -EINVAL; 1007 ret = -EINVAL;
1001 goto out; 1008 goto out;
1002 } 1009 }
1003 1010
1004 if (args->size == 0)
1005 goto out;
1006
1007 if (!access_ok(VERIFY_READ,
1008 (char __user *)(uintptr_t)args->data_ptr,
1009 args->size)) {
1010 ret = -EFAULT;
1011 goto out;
1012 }
1013
1014 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
1015 args->size);
1016 if (ret) {
1017 ret = -EFAULT;
1018 goto out;
1019 }
1020
1021 /* We can only do the GTT pwrite on untiled buffers, as otherwise 1011 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1022 * it would end up going through the fenced access, and we'll get 1012 * it would end up going through the fenced access, and we'll get
1023 * different detiling behavior between reading and writing. 1013 * different detiling behavior between reading and writing.
@@ -2172,7 +2162,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2172static int i915_ring_idle(struct drm_device *dev, 2162static int i915_ring_idle(struct drm_device *dev,
2173 struct intel_ring_buffer *ring) 2163 struct intel_ring_buffer *ring)
2174{ 2164{
2175 if (list_empty(&ring->gpu_write_list)) 2165 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2176 return 0; 2166 return 0;
2177 2167
2178 i915_gem_flush_ring(dev, NULL, ring, 2168 i915_gem_flush_ring(dev, NULL, ring,
@@ -2190,9 +2180,7 @@ i915_gpu_idle(struct drm_device *dev)
2190 int ret; 2180 int ret;
2191 2181
2192 lists_empty = (list_empty(&dev_priv->mm.flushing_list) && 2182 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2193 list_empty(&dev_priv->render_ring.active_list) && 2183 list_empty(&dev_priv->mm.active_list));
2194 list_empty(&dev_priv->bsd_ring.active_list) &&
2195 list_empty(&dev_priv->blt_ring.active_list));
2196 if (lists_empty) 2184 if (lists_empty)
2197 return 0; 2185 return 0;
2198 2186
@@ -2605,7 +2593,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
2605 if (reg->gpu) { 2593 if (reg->gpu) {
2606 int ret; 2594 int ret;
2607 2595
2608 ret = i915_gem_object_flush_gpu_write_domain(obj, true); 2596 ret = i915_gem_object_flush_gpu_write_domain(obj);
2609 if (ret) 2597 if (ret)
2610 return ret; 2598 return ret;
2611 2599
@@ -2753,8 +2741,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
2753 2741
2754/** Flushes any GPU write domain for the object if it's dirty. */ 2742/** Flushes any GPU write domain for the object if it's dirty. */
2755static int 2743static int
2756i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, 2744i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2757 bool pipelined)
2758{ 2745{
2759 struct drm_device *dev = obj->dev; 2746 struct drm_device *dev = obj->dev;
2760 uint32_t old_write_domain; 2747 uint32_t old_write_domain;
@@ -2773,10 +2760,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
2773 obj->read_domains, 2760 obj->read_domains,
2774 old_write_domain); 2761 old_write_domain);
2775 2762
2776 if (pipelined) 2763 return 0;
2777 return 0;
2778
2779 return i915_gem_object_wait_rendering(obj, true);
2780} 2764}
2781 2765
2782/** Flushes the GTT write domain for the object if it's dirty. */ 2766/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2837,18 +2821,15 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2837 if (obj_priv->gtt_space == NULL) 2821 if (obj_priv->gtt_space == NULL)
2838 return -EINVAL; 2822 return -EINVAL;
2839 2823
2840 ret = i915_gem_object_flush_gpu_write_domain(obj, false); 2824 ret = i915_gem_object_flush_gpu_write_domain(obj);
2841 if (ret != 0) 2825 if (ret != 0)
2842 return ret; 2826 return ret;
2827 ret = i915_gem_object_wait_rendering(obj, true);
2828 if (ret)
2829 return ret;
2843 2830
2844 i915_gem_object_flush_cpu_write_domain(obj); 2831 i915_gem_object_flush_cpu_write_domain(obj);
2845 2832
2846 if (write) {
2847 ret = i915_gem_object_wait_rendering(obj, true);
2848 if (ret)
2849 return ret;
2850 }
2851
2852 old_write_domain = obj->write_domain; 2833 old_write_domain = obj->write_domain;
2853 old_read_domains = obj->read_domains; 2834 old_read_domains = obj->read_domains;
2854 2835
@@ -2886,7 +2867,7 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
2886 if (obj_priv->gtt_space == NULL) 2867 if (obj_priv->gtt_space == NULL)
2887 return -EINVAL; 2868 return -EINVAL;
2888 2869
2889 ret = i915_gem_object_flush_gpu_write_domain(obj, true); 2870 ret = i915_gem_object_flush_gpu_write_domain(obj);
2890 if (ret) 2871 if (ret)
2891 return ret; 2872 return ret;
2892 2873
@@ -2909,6 +2890,20 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
2909 return 0; 2890 return 0;
2910} 2891}
2911 2892
2893int
2894i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
2895 bool interruptible)
2896{
2897 if (!obj->active)
2898 return 0;
2899
2900 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
2901 i915_gem_flush_ring(obj->base.dev, NULL, obj->ring,
2902 0, obj->base.write_domain);
2903
2904 return i915_gem_object_wait_rendering(&obj->base, interruptible);
2905}
2906
2912/** 2907/**
2913 * Moves a single object to the CPU read, and possibly write domain. 2908 * Moves a single object to the CPU read, and possibly write domain.
2914 * 2909 *
@@ -2921,9 +2916,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2921 uint32_t old_write_domain, old_read_domains; 2916 uint32_t old_write_domain, old_read_domains;
2922 int ret; 2917 int ret;
2923 2918
2924 ret = i915_gem_object_flush_gpu_write_domain(obj, false); 2919 ret = i915_gem_object_flush_gpu_write_domain(obj);
2925 if (ret != 0) 2920 if (ret != 0)
2926 return ret; 2921 return ret;
2922 ret = i915_gem_object_wait_rendering(obj, true);
2923 if (ret)
2924 return ret;
2927 2925
2928 i915_gem_object_flush_gtt_write_domain(obj); 2926 i915_gem_object_flush_gtt_write_domain(obj);
2929 2927
@@ -2932,12 +2930,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2932 */ 2930 */
2933 i915_gem_object_set_to_full_cpu_read_domain(obj); 2931 i915_gem_object_set_to_full_cpu_read_domain(obj);
2934 2932
2935 if (write) {
2936 ret = i915_gem_object_wait_rendering(obj, true);
2937 if (ret)
2938 return ret;
2939 }
2940
2941 old_write_domain = obj->write_domain; 2933 old_write_domain = obj->write_domain;
2942 old_read_domains = obj->read_domains; 2934 old_read_domains = obj->read_domains;
2943 2935
@@ -3108,7 +3100,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
3108 * write domain 3100 * write domain
3109 */ 3101 */
3110 if (obj->write_domain && 3102 if (obj->write_domain &&
3111 obj->write_domain != obj->pending_read_domains) { 3103 (obj->write_domain != obj->pending_read_domains ||
3104 obj_priv->ring != ring)) {
3112 flush_domains |= obj->write_domain; 3105 flush_domains |= obj->write_domain;
3113 invalidate_domains |= 3106 invalidate_domains |=
3114 obj->pending_read_domains & ~obj->write_domain; 3107 obj->pending_read_domains & ~obj->write_domain;
@@ -3201,9 +3194,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3201 if (offset == 0 && size == obj->size) 3194 if (offset == 0 && size == obj->size)
3202 return i915_gem_object_set_to_cpu_domain(obj, 0); 3195 return i915_gem_object_set_to_cpu_domain(obj, 0);
3203 3196
3204 ret = i915_gem_object_flush_gpu_write_domain(obj, false); 3197 ret = i915_gem_object_flush_gpu_write_domain(obj);
3205 if (ret != 0) 3198 if (ret != 0)
3206 return ret; 3199 return ret;
3200 ret = i915_gem_object_wait_rendering(obj, true);
3201 if (ret)
3202 return ret;
3203
3207 i915_gem_object_flush_gtt_write_domain(obj); 3204 i915_gem_object_flush_gtt_write_domain(obj);
3208 3205
3209 /* If we're already fully in the CPU read domain, we're done. */ 3206 /* If we're already fully in the CPU read domain, we're done. */
@@ -3250,192 +3247,230 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3250 return 0; 3247 return 0;
3251} 3248}
3252 3249
3253/**
3254 * Pin an object to the GTT and evaluate the relocations landing in it.
3255 */
3256static int 3250static int
3257i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj, 3251i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
3258 struct drm_file *file_priv, 3252 struct drm_file *file_priv,
3259 struct drm_i915_gem_exec_object2 *entry) 3253 struct drm_i915_gem_exec_object2 *entry,
3254 struct drm_i915_gem_relocation_entry *reloc)
3260{ 3255{
3261 struct drm_device *dev = obj->base.dev; 3256 struct drm_device *dev = obj->base.dev;
3262 drm_i915_private_t *dev_priv = dev->dev_private; 3257 struct drm_gem_object *target_obj;
3263 struct drm_i915_gem_relocation_entry __user *user_relocs; 3258 uint32_t target_offset;
3264 struct drm_gem_object *target_obj = NULL; 3259 int ret = -EINVAL;
3265 uint32_t target_handle = 0;
3266 int i, ret = 0;
3267 3260
3268 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; 3261 target_obj = drm_gem_object_lookup(dev, file_priv,
3269 for (i = 0; i < entry->relocation_count; i++) { 3262 reloc->target_handle);
3270 struct drm_i915_gem_relocation_entry reloc; 3263 if (target_obj == NULL)
3271 uint32_t target_offset; 3264 return -ENOENT;
3272 3265
3273 if (__copy_from_user_inatomic(&reloc, 3266 target_offset = to_intel_bo(target_obj)->gtt_offset;
3274 user_relocs+i,
3275 sizeof(reloc))) {
3276 ret = -EFAULT;
3277 break;
3278 }
3279 3267
3280 if (reloc.target_handle != target_handle) { 3268#if WATCH_RELOC
3281 drm_gem_object_unreference(target_obj); 3269 DRM_INFO("%s: obj %p offset %08x target %d "
3270 "read %08x write %08x gtt %08x "
3271 "presumed %08x delta %08x\n",
3272 __func__,
3273 obj,
3274 (int) reloc->offset,
3275 (int) reloc->target_handle,
3276 (int) reloc->read_domains,
3277 (int) reloc->write_domain,
3278 (int) target_offset,
3279 (int) reloc->presumed_offset,
3280 reloc->delta);
3281#endif
3282 3282
3283 target_obj = drm_gem_object_lookup(dev, file_priv, 3283 /* The target buffer should have appeared before us in the
3284 reloc.target_handle); 3284 * exec_object list, so it should have a GTT space bound by now.
3285 if (target_obj == NULL) { 3285 */
3286 ret = -ENOENT; 3286 if (target_offset == 0) {
3287 break; 3287 DRM_ERROR("No GTT space found for object %d\n",
3288 } 3288 reloc->target_handle);
3289 goto err;
3290 }
3289 3291
3290 target_handle = reloc.target_handle; 3292 /* Validate that the target is in a valid r/w GPU domain */
3291 } 3293 if (reloc->write_domain & (reloc->write_domain - 1)) {
3292 target_offset = to_intel_bo(target_obj)->gtt_offset; 3294 DRM_ERROR("reloc with multiple write domains: "
3295 "obj %p target %d offset %d "
3296 "read %08x write %08x",
3297 obj, reloc->target_handle,
3298 (int) reloc->offset,
3299 reloc->read_domains,
3300 reloc->write_domain);
3301 goto err;
3302 }
3303 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3304 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3305 DRM_ERROR("reloc with read/write CPU domains: "
3306 "obj %p target %d offset %d "
3307 "read %08x write %08x",
3308 obj, reloc->target_handle,
3309 (int) reloc->offset,
3310 reloc->read_domains,
3311 reloc->write_domain);
3312 goto err;
3313 }
3314 if (reloc->write_domain && target_obj->pending_write_domain &&
3315 reloc->write_domain != target_obj->pending_write_domain) {
3316 DRM_ERROR("Write domain conflict: "
3317 "obj %p target %d offset %d "
3318 "new %08x old %08x\n",
3319 obj, reloc->target_handle,
3320 (int) reloc->offset,
3321 reloc->write_domain,
3322 target_obj->pending_write_domain);
3323 goto err;
3324 }
3293 3325
3294#if WATCH_RELOC 3326 target_obj->pending_read_domains |= reloc->read_domains;
3295 DRM_INFO("%s: obj %p offset %08x target %d " 3327 target_obj->pending_write_domain |= reloc->write_domain;
3296 "read %08x write %08x gtt %08x "
3297 "presumed %08x delta %08x\n",
3298 __func__,
3299 obj,
3300 (int) reloc.offset,
3301 (int) reloc.target_handle,
3302 (int) reloc.read_domains,
3303 (int) reloc.write_domain,
3304 (int) target_offset,
3305 (int) reloc.presumed_offset,
3306 reloc.delta);
3307#endif
3308 3328
3309 /* The target buffer should have appeared before us in the 3329 /* If the relocation already has the right value in it, no
3310 * exec_object list, so it should have a GTT space bound by now. 3330 * more work needs to be done.
3311 */ 3331 */
3312 if (target_offset == 0) { 3332 if (target_offset == reloc->presumed_offset)
3313 DRM_ERROR("No GTT space found for object %d\n", 3333 goto out;
3314 reloc.target_handle);
3315 ret = -EINVAL;
3316 break;
3317 }
3318 3334
3319 /* Validate that the target is in a valid r/w GPU domain */ 3335 /* Check that the relocation address is valid... */
3320 if (reloc.write_domain & (reloc.write_domain - 1)) { 3336 if (reloc->offset > obj->base.size - 4) {
3321 DRM_ERROR("reloc with multiple write domains: " 3337 DRM_ERROR("Relocation beyond object bounds: "
3322 "obj %p target %d offset %d " 3338 "obj %p target %d offset %d size %d.\n",
3323 "read %08x write %08x", 3339 obj, reloc->target_handle,
3324 obj, reloc.target_handle, 3340 (int) reloc->offset,
3325 (int) reloc.offset, 3341 (int) obj->base.size);
3326 reloc.read_domains, 3342 goto err;
3327 reloc.write_domain); 3343 }
3328 ret = -EINVAL; 3344 if (reloc->offset & 3) {
3329 break; 3345 DRM_ERROR("Relocation not 4-byte aligned: "
3330 } 3346 "obj %p target %d offset %d.\n",
3331 if (reloc.write_domain & I915_GEM_DOMAIN_CPU || 3347 obj, reloc->target_handle,
3332 reloc.read_domains & I915_GEM_DOMAIN_CPU) { 3348 (int) reloc->offset);
3333 DRM_ERROR("reloc with read/write CPU domains: " 3349 goto err;
3334 "obj %p target %d offset %d " 3350 }
3335 "read %08x write %08x",
3336 obj, reloc.target_handle,
3337 (int) reloc.offset,
3338 reloc.read_domains,
3339 reloc.write_domain);
3340 ret = -EINVAL;
3341 break;
3342 }
3343 if (reloc.write_domain && target_obj->pending_write_domain &&
3344 reloc.write_domain != target_obj->pending_write_domain) {
3345 DRM_ERROR("Write domain conflict: "
3346 "obj %p target %d offset %d "
3347 "new %08x old %08x\n",
3348 obj, reloc.target_handle,
3349 (int) reloc.offset,
3350 reloc.write_domain,
3351 target_obj->pending_write_domain);
3352 ret = -EINVAL;
3353 break;
3354 }
3355 3351
3356 target_obj->pending_read_domains |= reloc.read_domains; 3352 /* and points to somewhere within the target object. */
3357 target_obj->pending_write_domain |= reloc.write_domain; 3353 if (reloc->delta >= target_obj->size) {
3354 DRM_ERROR("Relocation beyond target object bounds: "
3355 "obj %p target %d delta %d size %d.\n",
3356 obj, reloc->target_handle,
3357 (int) reloc->delta,
3358 (int) target_obj->size);
3359 goto err;
3360 }
3358 3361
3359 /* If the relocation already has the right value in it, no 3362 reloc->delta += target_offset;
3360 * more work needs to be done. 3363 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
3361 */ 3364 uint32_t page_offset = reloc->offset & ~PAGE_MASK;
3362 if (target_offset == reloc.presumed_offset) 3365 char *vaddr;
3363 continue;
3364 3366
3365 /* Check that the relocation address is valid... */ 3367 vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
3366 if (reloc.offset > obj->base.size - 4) { 3368 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
3367 DRM_ERROR("Relocation beyond object bounds: " 3369 kunmap_atomic(vaddr);
3368 "obj %p target %d offset %d size %d.\n", 3370 } else {
3369 obj, reloc.target_handle, 3371 struct drm_i915_private *dev_priv = dev->dev_private;
3370 (int) reloc.offset, (int) obj->base.size); 3372 uint32_t __iomem *reloc_entry;
3371 ret = -EINVAL; 3373 void __iomem *reloc_page;
3372 break;
3373 }
3374 if (reloc.offset & 3) {
3375 DRM_ERROR("Relocation not 4-byte aligned: "
3376 "obj %p target %d offset %d.\n",
3377 obj, reloc.target_handle,
3378 (int) reloc.offset);
3379 ret = -EINVAL;
3380 break;
3381 }
3382 3374
3383 /* and points to somewhere within the target object. */ 3375 ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
3384 if (reloc.delta >= target_obj->size) { 3376 if (ret)
3385 DRM_ERROR("Relocation beyond target object bounds: " 3377 goto err;
3386 "obj %p target %d delta %d size %d.\n",
3387 obj, reloc.target_handle,
3388 (int) reloc.delta, (int) target_obj->size);
3389 ret = -EINVAL;
3390 break;
3391 }
3392 3378
3393 reloc.delta += target_offset; 3379 /* Map the page containing the relocation we're going to perform. */
3394 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { 3380 reloc->offset += obj->gtt_offset;
3395 uint32_t page_offset = reloc.offset & ~PAGE_MASK; 3381 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3396 char *vaddr; 3382 reloc->offset & PAGE_MASK);
3383 reloc_entry = (uint32_t __iomem *)
3384 (reloc_page + (reloc->offset & ~PAGE_MASK));
3385 iowrite32(reloc->delta, reloc_entry);
3386 io_mapping_unmap_atomic(reloc_page);
3387 }
3397 3388
3398 vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]); 3389 /* and update the user's relocation entry */
3399 *(uint32_t *)(vaddr + page_offset) = reloc.delta; 3390 reloc->presumed_offset = target_offset;
3400 kunmap_atomic(vaddr);
3401 } else {
3402 uint32_t __iomem *reloc_entry;
3403 void __iomem *reloc_page;
3404 3391
3405 ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); 3392out:
3406 if (ret) 3393 ret = 0;
3407 break; 3394err:
3395 drm_gem_object_unreference(target_obj);
3396 return ret;
3397}
3408 3398
3409 /* Map the page containing the relocation we're going to perform. */ 3399static int
3410 reloc.offset += obj->gtt_offset; 3400i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
3411 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 3401 struct drm_file *file_priv,
3412 reloc.offset & PAGE_MASK); 3402 struct drm_i915_gem_exec_object2 *entry)
3413 reloc_entry = (uint32_t __iomem *) 3403{
3414 (reloc_page + (reloc.offset & ~PAGE_MASK)); 3404 struct drm_i915_gem_relocation_entry __user *user_relocs;
3415 iowrite32(reloc.delta, reloc_entry); 3405 int i, ret;
3416 io_mapping_unmap_atomic(reloc_page); 3406
3417 } 3407 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
3408 for (i = 0; i < entry->relocation_count; i++) {
3409 struct drm_i915_gem_relocation_entry reloc;
3410
3411 if (__copy_from_user_inatomic(&reloc,
3412 user_relocs+i,
3413 sizeof(reloc)))
3414 return -EFAULT;
3415
3416 ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc);
3417 if (ret)
3418 return ret;
3418 3419
3419 /* and update the user's relocation entry */
3420 reloc.presumed_offset = target_offset;
3421 if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, 3420 if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
3422 &reloc.presumed_offset, 3421 &reloc.presumed_offset,
3423 sizeof(reloc.presumed_offset))) { 3422 sizeof(reloc.presumed_offset)))
3424 ret = -EFAULT; 3423 return -EFAULT;
3425 break;
3426 }
3427 } 3424 }
3428 3425
3429 drm_gem_object_unreference(target_obj); 3426 return 0;
3430 return ret; 3427}
3428
3429static int
3430i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
3431 struct drm_file *file_priv,
3432 struct drm_i915_gem_exec_object2 *entry,
3433 struct drm_i915_gem_relocation_entry *relocs)
3434{
3435 int i, ret;
3436
3437 for (i = 0; i < entry->relocation_count; i++) {
3438 ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]);
3439 if (ret)
3440 return ret;
3441 }
3442
3443 return 0;
3431} 3444}
3432 3445
3433static int 3446static int
3434i915_gem_execbuffer_pin(struct drm_device *dev, 3447i915_gem_execbuffer_relocate(struct drm_device *dev,
3435 struct drm_file *file, 3448 struct drm_file *file,
3436 struct drm_gem_object **object_list, 3449 struct drm_gem_object **object_list,
3437 struct drm_i915_gem_exec_object2 *exec_list, 3450 struct drm_i915_gem_exec_object2 *exec_list,
3438 int count) 3451 int count)
3452{
3453 int i, ret;
3454
3455 for (i = 0; i < count; i++) {
3456 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3457 obj->base.pending_read_domains = 0;
3458 obj->base.pending_write_domain = 0;
3459 ret = i915_gem_execbuffer_relocate_object(obj, file,
3460 &exec_list[i]);
3461 if (ret)
3462 return ret;
3463 }
3464
3465 return 0;
3466}
3467
3468static int
3469i915_gem_execbuffer_reserve(struct drm_device *dev,
3470 struct drm_file *file,
3471 struct drm_gem_object **object_list,
3472 struct drm_i915_gem_exec_object2 *exec_list,
3473 int count)
3439{ 3474{
3440 struct drm_i915_private *dev_priv = dev->dev_private; 3475 struct drm_i915_private *dev_priv = dev->dev_private;
3441 int ret, i, retry; 3476 int ret, i, retry;
@@ -3497,6 +3532,133 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
3497 return 0; 3532 return 0;
3498} 3533}
3499 3534
3535static int
3536i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
3537 struct drm_file *file,
3538 struct drm_gem_object **object_list,
3539 struct drm_i915_gem_exec_object2 *exec_list,
3540 int count)
3541{
3542 struct drm_i915_gem_relocation_entry *reloc;
3543 int i, total, ret;
3544
3545 for (i = 0; i < count; i++) {
3546 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3547 obj->in_execbuffer = false;
3548 }
3549
3550 mutex_unlock(&dev->struct_mutex);
3551
3552 total = 0;
3553 for (i = 0; i < count; i++)
3554 total += exec_list[i].relocation_count;
3555
3556 reloc = drm_malloc_ab(total, sizeof(*reloc));
3557 if (reloc == NULL) {
3558 mutex_lock(&dev->struct_mutex);
3559 return -ENOMEM;
3560 }
3561
3562 total = 0;
3563 for (i = 0; i < count; i++) {
3564 struct drm_i915_gem_relocation_entry __user *user_relocs;
3565
3566 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3567
3568 if (copy_from_user(reloc+total, user_relocs,
3569 exec_list[i].relocation_count *
3570 sizeof(*reloc))) {
3571 ret = -EFAULT;
3572 mutex_lock(&dev->struct_mutex);
3573 goto err;
3574 }
3575
3576 total += exec_list[i].relocation_count;
3577 }
3578
3579 ret = i915_mutex_lock_interruptible(dev);
3580 if (ret) {
3581 mutex_lock(&dev->struct_mutex);
3582 goto err;
3583 }
3584
3585 ret = i915_gem_execbuffer_reserve(dev, file,
3586 object_list, exec_list,
3587 count);
3588 if (ret)
3589 goto err;
3590
3591 total = 0;
3592 for (i = 0; i < count; i++) {
3593 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3594 obj->base.pending_read_domains = 0;
3595 obj->base.pending_write_domain = 0;
3596 ret = i915_gem_execbuffer_relocate_object_slow(obj, file,
3597 &exec_list[i],
3598 reloc + total);
3599 if (ret)
3600 goto err;
3601
3602 total += exec_list[i].relocation_count;
3603 }
3604
3605 /* Leave the user relocations as are, this is the painfully slow path,
3606 * and we want to avoid the complication of dropping the lock whilst
3607 * having buffers reserved in the aperture and so causing spurious
3608 * ENOSPC for random operations.
3609 */
3610
3611err:
3612 drm_free_large(reloc);
3613 return ret;
3614}
3615
3616static int
3617i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
3618 struct drm_file *file,
3619 struct intel_ring_buffer *ring,
3620 struct drm_gem_object **objects,
3621 int count)
3622{
3623 struct drm_i915_private *dev_priv = dev->dev_private;
3624 int ret, i;
3625
3626 /* Zero the global flush/invalidate flags. These
3627 * will be modified as new domains are computed
3628 * for each object
3629 */
3630 dev->invalidate_domains = 0;
3631 dev->flush_domains = 0;
3632 dev_priv->mm.flush_rings = 0;
3633 for (i = 0; i < count; i++)
3634 i915_gem_object_set_to_gpu_domain(objects[i], ring);
3635
3636 if (dev->invalidate_domains | dev->flush_domains) {
3637#if WATCH_EXEC
3638 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3639 __func__,
3640 dev->invalidate_domains,
3641 dev->flush_domains);
3642#endif
3643 i915_gem_flush(dev, file,
3644 dev->invalidate_domains,
3645 dev->flush_domains,
3646 dev_priv->mm.flush_rings);
3647 }
3648
3649 for (i = 0; i < count; i++) {
3650 struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
3651 /* XXX replace with semaphores */
3652 if (obj->ring && ring != obj->ring) {
3653 ret = i915_gem_object_wait_rendering(&obj->base, true);
3654 if (ret)
3655 return ret;
3656 }
3657 }
3658
3659 return 0;
3660}
3661
3500/* Throttle our rendering by waiting until the ring has completed our requests 3662/* Throttle our rendering by waiting until the ring has completed our requests
3501 * emitted over 20 msec ago. 3663 * emitted over 20 msec ago.
3502 * 3664 *
@@ -3580,8 +3742,15 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
3580 3742
3581 for (i = 0; i < count; i++) { 3743 for (i = 0; i < count; i++) {
3582 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; 3744 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
3583 size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry); 3745 int length; /* limited by fault_in_pages_readable() */
3746
3747 /* First check for malicious input causing overflow */
3748 if (exec[i].relocation_count >
3749 INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
3750 return -EINVAL;
3584 3751
3752 length = exec[i].relocation_count *
3753 sizeof(struct drm_i915_gem_relocation_entry);
3585 if (!access_ok(VERIFY_READ, ptr, length)) 3754 if (!access_ok(VERIFY_READ, ptr, length))
3586 return -EFAULT; 3755 return -EFAULT;
3587 3756
@@ -3724,18 +3893,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3724 } 3893 }
3725 3894
3726 /* Move the objects en-masse into the GTT, evicting if necessary. */ 3895 /* Move the objects en-masse into the GTT, evicting if necessary. */
3727 ret = i915_gem_execbuffer_pin(dev, file, 3896 ret = i915_gem_execbuffer_reserve(dev, file,
3728 object_list, exec_list, 3897 object_list, exec_list,
3729 args->buffer_count); 3898 args->buffer_count);
3730 if (ret) 3899 if (ret)
3731 goto err; 3900 goto err;
3732 3901
3733 /* The objects are in their final locations, apply the relocations. */ 3902 /* The objects are in their final locations, apply the relocations. */
3734 for (i = 0; i < args->buffer_count; i++) { 3903 ret = i915_gem_execbuffer_relocate(dev, file,
3735 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); 3904 object_list, exec_list,
3736 obj->base.pending_read_domains = 0; 3905 args->buffer_count);
3737 obj->base.pending_write_domain = 0; 3906 if (ret) {
3738 ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]); 3907 if (ret == -EFAULT) {
3908 ret = i915_gem_execbuffer_relocate_slow(dev, file,
3909 object_list,
3910 exec_list,
3911 args->buffer_count);
3912 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
3913 }
3739 if (ret) 3914 if (ret)
3740 goto err; 3915 goto err;
3741 } 3916 }
@@ -3757,33 +3932,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3757 goto err; 3932 goto err;
3758 } 3933 }
3759 3934
3760 /* Zero the global flush/invalidate flags. These 3935 ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
3761 * will be modified as new domains are computed 3936 object_list, args->buffer_count);
3762 * for each object 3937 if (ret)
3763 */ 3938 goto err;
3764 dev->invalidate_domains = 0;
3765 dev->flush_domains = 0;
3766 dev_priv->mm.flush_rings = 0;
3767
3768 for (i = 0; i < args->buffer_count; i++) {
3769 struct drm_gem_object *obj = object_list[i];
3770
3771 /* Compute new gpu domains and update invalidate/flush */
3772 i915_gem_object_set_to_gpu_domain(obj, ring);
3773 }
3774
3775 if (dev->invalidate_domains | dev->flush_domains) {
3776#if WATCH_EXEC
3777 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3778 __func__,
3779 dev->invalidate_domains,
3780 dev->flush_domains);
3781#endif
3782 i915_gem_flush(dev, file,
3783 dev->invalidate_domains,
3784 dev->flush_domains,
3785 dev_priv->mm.flush_rings);
3786 }
3787 3939
3788 for (i = 0; i < args->buffer_count; i++) { 3940 for (i = 0; i < args->buffer_count; i++) {
3789 struct drm_gem_object *obj = object_list[i]; 3941 struct drm_gem_object *obj = object_list[i];
@@ -4043,8 +4195,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4043 alignment = i915_gem_get_gtt_alignment(obj); 4195 alignment = i915_gem_get_gtt_alignment(obj);
4044 if (obj_priv->gtt_offset & (alignment - 1)) { 4196 if (obj_priv->gtt_offset & (alignment - 1)) {
4045 WARN(obj_priv->pin_count, 4197 WARN(obj_priv->pin_count,
4046 "bo is already pinned with incorrect alignment:" 4198 "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
4047 " offset=%x, req.alignment=%x\n",
4048 obj_priv->gtt_offset, alignment); 4199 obj_priv->gtt_offset, alignment);
4049 ret = i915_gem_object_unbind(obj); 4200 ret = i915_gem_object_unbind(obj);
4050 if (ret) 4201 if (ret)
@@ -4223,10 +4374,20 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4223 * use this buffer rather sooner than later, so issuing the required 4374 * use this buffer rather sooner than later, so issuing the required
4224 * flush earlier is beneficial. 4375 * flush earlier is beneficial.
4225 */ 4376 */
4226 if (obj->write_domain & I915_GEM_GPU_DOMAINS) 4377 if (obj->write_domain & I915_GEM_GPU_DOMAINS) {
4227 i915_gem_flush_ring(dev, file_priv, 4378 i915_gem_flush_ring(dev, file_priv,
4228 obj_priv->ring, 4379 obj_priv->ring,
4229 0, obj->write_domain); 4380 0, obj->write_domain);
4381 } else if (obj_priv->ring->outstanding_lazy_request) {
4382 /* This ring is not being cleared by active usage,
4383 * so emit a request to do so.
4384 */
4385 u32 seqno = i915_add_request(dev,
4386 NULL, NULL,
4387 obj_priv->ring);
4388 if (seqno == 0)
4389 ret = -ENOMEM;
4390 }
4230 4391
4231 /* Update the active list for the hardware's current position. 4392 /* Update the active list for the hardware's current position.
4232 * Otherwise this only updates on a delayed timer or when irqs 4393 * Otherwise this only updates on a delayed timer or when irqs
@@ -4856,17 +5017,24 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4856 struct drm_file *file_priv) 5017 struct drm_file *file_priv)
4857{ 5018{
4858 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 5019 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4859 void *obj_addr; 5020 void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
4860 int ret; 5021 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
4861 char __user *user_data;
4862 5022
4863 user_data = (char __user *) (uintptr_t) args->data_ptr; 5023 DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
4864 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4865 5024
4866 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size); 5025 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4867 ret = copy_from_user(obj_addr, user_data, args->size); 5026 unsigned long unwritten;
4868 if (ret) 5027
4869 return -EFAULT; 5028 /* The physical object once assigned is fixed for the lifetime
5029 * of the obj, so we can safely drop the lock and continue
5030 * to access vaddr.
5031 */
5032 mutex_unlock(&dev->struct_mutex);
5033 unwritten = copy_from_user(vaddr, user_data, args->size);
5034 mutex_lock(&dev->struct_mutex);
5035 if (unwritten)
5036 return -EFAULT;
5037 }
4870 5038
4871 drm_agp_chipset_flush(dev); 5039 drm_agp_chipset_flush(dev);
4872 return 0; 5040 return 0;
@@ -4900,9 +5068,7 @@ i915_gpu_is_active(struct drm_device *dev)
4900 int lists_empty; 5068 int lists_empty;
4901 5069
4902 lists_empty = list_empty(&dev_priv->mm.flushing_list) && 5070 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4903 list_empty(&dev_priv->render_ring.active_list) && 5071 list_empty(&dev_priv->mm.active_list);
4904 list_empty(&dev_priv->bsd_ring.active_list) &&
4905 list_empty(&dev_priv->blt_ring.active_list);
4906 5072
4907 return !lists_empty; 5073 return !lists_empty;
4908} 5074}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 43a4013f53fa..d8ae7d1d0cc6 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -165,9 +165,7 @@ i915_gem_evict_everything(struct drm_device *dev)
165 165
166 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 166 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
167 list_empty(&dev_priv->mm.flushing_list) && 167 list_empty(&dev_priv->mm.flushing_list) &&
168 list_empty(&dev_priv->render_ring.active_list) && 168 list_empty(&dev_priv->mm.active_list));
169 list_empty(&dev_priv->bsd_ring.active_list) &&
170 list_empty(&dev_priv->blt_ring.active_list));
171 if (lists_empty) 169 if (lists_empty)
172 return -ENOSPC; 170 return -ENOSPC;
173 171
@@ -184,9 +182,7 @@ i915_gem_evict_everything(struct drm_device *dev)
184 182
185 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 183 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
186 list_empty(&dev_priv->mm.flushing_list) && 184 list_empty(&dev_priv->mm.flushing_list) &&
187 list_empty(&dev_priv->render_ring.active_list) && 185 list_empty(&dev_priv->mm.active_list));
188 list_empty(&dev_priv->bsd_ring.active_list) &&
189 list_empty(&dev_priv->blt_ring.active_list));
190 BUG_ON(!lists_empty); 186 BUG_ON(!lists_empty);
191 187
192 return 0; 188 return 0;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 25ed911a3112..cb8f43429279 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2471,6 +2471,9 @@
2471# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) 2471# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
2472# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) 2472# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
2473 2473
2474#define PCH_3DCGDIS1 0x46024
2475# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11)
2476
2474#define FDI_PLL_FREQ_CTL 0x46030 2477#define FDI_PLL_FREQ_CTL 0x46030
2475#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24) 2478#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24)
2476#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00 2479#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
@@ -2588,6 +2591,13 @@
2588#define ILK_DISPLAY_CHICKEN2 0x42004 2591#define ILK_DISPLAY_CHICKEN2 0x42004
2589#define ILK_DPARB_GATE (1<<22) 2592#define ILK_DPARB_GATE (1<<22)
2590#define ILK_VSDPFD_FULL (1<<21) 2593#define ILK_VSDPFD_FULL (1<<21)
2594#define ILK_DISPLAY_CHICKEN_FUSES 0x42014
2595#define ILK_INTERNAL_GRAPHICS_DISABLE (1<<31)
2596#define ILK_INTERNAL_DISPLAY_DISABLE (1<<30)
2597#define ILK_DISPLAY_DEBUG_DISABLE (1<<29)
2598#define ILK_HDCP_DISABLE (1<<25)
2599#define ILK_eDP_A_DISABLE (1<<24)
2600#define ILK_DESKTOP (1<<23)
2591#define ILK_DSPCLK_GATE 0x42020 2601#define ILK_DSPCLK_GATE 0x42020
2592#define ILK_DPARB_CLK_GATE (1<<5) 2602#define ILK_DPARB_CLK_GATE (1<<5)
2593/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */ 2603/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
@@ -3033,6 +3043,7 @@
3033#define TRANS_DP_10BPC (1<<9) 3043#define TRANS_DP_10BPC (1<<9)
3034#define TRANS_DP_6BPC (2<<9) 3044#define TRANS_DP_6BPC (2<<9)
3035#define TRANS_DP_12BPC (3<<9) 3045#define TRANS_DP_12BPC (3<<9)
3046#define TRANS_DP_BPC_MASK (3<<9)
3036#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4) 3047#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
3037#define TRANS_DP_VSYNC_ACTIVE_LOW 0 3048#define TRANS_DP_VSYNC_ACTIVE_LOW 0
3038#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) 3049#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 989c19d2d959..42729d25da58 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -239,6 +239,16 @@ static void i915_save_modeset_reg(struct drm_device *dev)
239 if (drm_core_check_feature(dev, DRIVER_MODESET)) 239 if (drm_core_check_feature(dev, DRIVER_MODESET))
240 return; 240 return;
241 241
242 /* Cursor state */
243 dev_priv->saveCURACNTR = I915_READ(CURACNTR);
244 dev_priv->saveCURAPOS = I915_READ(CURAPOS);
245 dev_priv->saveCURABASE = I915_READ(CURABASE);
246 dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
247 dev_priv->saveCURBPOS = I915_READ(CURBPOS);
248 dev_priv->saveCURBBASE = I915_READ(CURBBASE);
249 if (IS_GEN2(dev))
250 dev_priv->saveCURSIZE = I915_READ(CURSIZE);
251
242 if (HAS_PCH_SPLIT(dev)) { 252 if (HAS_PCH_SPLIT(dev)) {
243 dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); 253 dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
244 dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); 254 dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
@@ -529,6 +539,16 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
529 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); 539 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
530 I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); 540 I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
531 541
542 /* Cursor state */
543 I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
544 I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
545 I915_WRITE(CURABASE, dev_priv->saveCURABASE);
546 I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
547 I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
548 I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
549 if (IS_GEN2(dev))
550 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
551
532 return; 552 return;
533} 553}
534 554
@@ -543,16 +563,6 @@ void i915_save_display(struct drm_device *dev)
543 /* Don't save them in KMS mode */ 563 /* Don't save them in KMS mode */
544 i915_save_modeset_reg(dev); 564 i915_save_modeset_reg(dev);
545 565
546 /* Cursor state */
547 dev_priv->saveCURACNTR = I915_READ(CURACNTR);
548 dev_priv->saveCURAPOS = I915_READ(CURAPOS);
549 dev_priv->saveCURABASE = I915_READ(CURABASE);
550 dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
551 dev_priv->saveCURBPOS = I915_READ(CURBPOS);
552 dev_priv->saveCURBBASE = I915_READ(CURBBASE);
553 if (IS_GEN2(dev))
554 dev_priv->saveCURSIZE = I915_READ(CURSIZE);
555
556 /* CRT state */ 566 /* CRT state */
557 if (HAS_PCH_SPLIT(dev)) { 567 if (HAS_PCH_SPLIT(dev)) {
558 dev_priv->saveADPA = I915_READ(PCH_ADPA); 568 dev_priv->saveADPA = I915_READ(PCH_ADPA);
@@ -657,16 +667,6 @@ void i915_restore_display(struct drm_device *dev)
657 /* Don't restore them in KMS mode */ 667 /* Don't restore them in KMS mode */
658 i915_restore_modeset_reg(dev); 668 i915_restore_modeset_reg(dev);
659 669
660 /* Cursor state */
661 I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
662 I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
663 I915_WRITE(CURABASE, dev_priv->saveCURABASE);
664 I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
665 I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
666 I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
667 if (IS_GEN2(dev))
668 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
669
670 /* CRT state */ 670 /* CRT state */
671 if (HAS_PCH_SPLIT(dev)) 671 if (HAS_PCH_SPLIT(dev))
672 I915_WRITE(PCH_ADPA, dev_priv->saveADPA); 672 I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
@@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev)
862 /* Clock gating state */ 862 /* Clock gating state */
863 intel_init_clock_gating(dev); 863 intel_init_clock_gating(dev);
864 864
865 if (HAS_PCH_SPLIT(dev)) 865 if (HAS_PCH_SPLIT(dev)) {
866 ironlake_enable_drps(dev); 866 ironlake_enable_drps(dev);
867 intel_init_emon(dev);
868 }
867 869
868 /* Cache mode state */ 870 /* Cache mode state */
869 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 871 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 65c88f9ba12c..2cb8e0b9f1ee 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -190,37 +190,6 @@ out:
190 kfree(output.pointer); 190 kfree(output.pointer);
191} 191}
192 192
193static int intel_dsm_switchto(enum vga_switcheroo_client_id id)
194{
195 return 0;
196}
197
198static int intel_dsm_power_state(enum vga_switcheroo_client_id id,
199 enum vga_switcheroo_state state)
200{
201 return 0;
202}
203
204static int intel_dsm_init(void)
205{
206 return 0;
207}
208
209static int intel_dsm_get_client_id(struct pci_dev *pdev)
210{
211 if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
212 return VGA_SWITCHEROO_IGD;
213 else
214 return VGA_SWITCHEROO_DIS;
215}
216
217static struct vga_switcheroo_handler intel_dsm_handler = {
218 .switchto = intel_dsm_switchto,
219 .power_state = intel_dsm_power_state,
220 .init = intel_dsm_init,
221 .get_client_id = intel_dsm_get_client_id,
222};
223
224static bool intel_dsm_pci_probe(struct pci_dev *pdev) 193static bool intel_dsm_pci_probe(struct pci_dev *pdev)
225{ 194{
226 acpi_handle dhandle, intel_handle; 195 acpi_handle dhandle, intel_handle;
@@ -276,11 +245,8 @@ void intel_register_dsm_handler(void)
276{ 245{
277 if (!intel_dsm_detect()) 246 if (!intel_dsm_detect())
278 return; 247 return;
279
280 vga_switcheroo_register_handler(&intel_dsm_handler);
281} 248}
282 249
283void intel_unregister_dsm_handler(void) 250void intel_unregister_dsm_handler(void)
284{ 251{
285 vga_switcheroo_unregister_handler();
286} 252}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index c55c77043357..8df574316063 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -34,6 +34,25 @@
34#include "i915_drm.h" 34#include "i915_drm.h"
35#include "i915_drv.h" 35#include "i915_drv.h"
36 36
37/* Here's the desired hotplug mode */
38#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
39 ADPA_CRT_HOTPLUG_WARMUP_10MS | \
40 ADPA_CRT_HOTPLUG_SAMPLE_4S | \
41 ADPA_CRT_HOTPLUG_VOLTAGE_50 | \
42 ADPA_CRT_HOTPLUG_VOLREF_325MV | \
43 ADPA_CRT_HOTPLUG_ENABLE)
44
45struct intel_crt {
46 struct intel_encoder base;
47 bool force_hotplug_required;
48};
49
50static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
51{
52 return container_of(intel_attached_encoder(connector),
53 struct intel_crt, base);
54}
55
37static void intel_crt_dpms(struct drm_encoder *encoder, int mode) 56static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
38{ 57{
39 struct drm_device *dev = encoder->dev; 58 struct drm_device *dev = encoder->dev;
@@ -129,7 +148,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
129 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); 148 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
130 } 149 }
131 150
132 adpa = 0; 151 adpa = ADPA_HOTPLUG_BITS;
133 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 152 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
134 adpa |= ADPA_HSYNC_ACTIVE_HIGH; 153 adpa |= ADPA_HSYNC_ACTIVE_HIGH;
135 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 154 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -157,53 +176,44 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
157static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) 176static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
158{ 177{
159 struct drm_device *dev = connector->dev; 178 struct drm_device *dev = connector->dev;
179 struct intel_crt *crt = intel_attached_crt(connector);
160 struct drm_i915_private *dev_priv = dev->dev_private; 180 struct drm_i915_private *dev_priv = dev->dev_private;
161 u32 adpa, temp; 181 u32 adpa;
162 bool ret; 182 bool ret;
163 bool turn_off_dac = false;
164 183
165 temp = adpa = I915_READ(PCH_ADPA); 184 /* The first time through, trigger an explicit detection cycle */
185 if (crt->force_hotplug_required) {
186 bool turn_off_dac = HAS_PCH_SPLIT(dev);
187 u32 save_adpa;
166 188
167 if (HAS_PCH_SPLIT(dev)) 189 crt->force_hotplug_required = 0;
168 turn_off_dac = true; 190
169 191 save_adpa = adpa = I915_READ(PCH_ADPA);
170 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 192 DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
171 if (turn_off_dac) 193
172 adpa &= ~ADPA_DAC_ENABLE; 194 adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
173 195 if (turn_off_dac)
174 /* disable HPD first */ 196 adpa &= ~ADPA_DAC_ENABLE;
175 I915_WRITE(PCH_ADPA, adpa); 197
176 (void)I915_READ(PCH_ADPA); 198 I915_WRITE(PCH_ADPA, adpa);
177 199
178 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | 200 if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
179 ADPA_CRT_HOTPLUG_WARMUP_10MS | 201 1000))
180 ADPA_CRT_HOTPLUG_SAMPLE_4S | 202 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
181 ADPA_CRT_HOTPLUG_VOLTAGE_50 | /* default */ 203
182 ADPA_CRT_HOTPLUG_VOLREF_325MV | 204 if (turn_off_dac) {
183 ADPA_CRT_HOTPLUG_ENABLE | 205 I915_WRITE(PCH_ADPA, save_adpa);
184 ADPA_CRT_HOTPLUG_FORCE_TRIGGER); 206 POSTING_READ(PCH_ADPA);
185 207 }
186 DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
187 I915_WRITE(PCH_ADPA, adpa);
188
189 if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
190 1000))
191 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
192
193 if (turn_off_dac) {
194 /* Make sure hotplug is enabled */
195 I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE);
196 (void)I915_READ(PCH_ADPA);
197 } 208 }
198 209
199 /* Check the status to see if both blue and green are on now */ 210 /* Check the status to see if both blue and green are on now */
200 adpa = I915_READ(PCH_ADPA); 211 adpa = I915_READ(PCH_ADPA);
201 adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK; 212 if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
202 if ((adpa == ADPA_CRT_HOTPLUG_MONITOR_COLOR) ||
203 (adpa == ADPA_CRT_HOTPLUG_MONITOR_MONO))
204 ret = true; 213 ret = true;
205 else 214 else
206 ret = false; 215 ret = false;
216 DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret);
207 217
208 return ret; 218 return ret;
209} 219}
@@ -277,13 +287,12 @@ static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
277 return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1; 287 return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
278} 288}
279 289
280static bool intel_crt_detect_ddc(struct drm_encoder *encoder) 290static bool intel_crt_detect_ddc(struct intel_crt *crt)
281{ 291{
282 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 292 struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
283 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
284 293
285 /* CRT should always be at 0, but check anyway */ 294 /* CRT should always be at 0, but check anyway */
286 if (intel_encoder->type != INTEL_OUTPUT_ANALOG) 295 if (crt->base.type != INTEL_OUTPUT_ANALOG)
287 return false; 296 return false;
288 297
289 if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) { 298 if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) {
@@ -291,7 +300,7 @@ static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
291 return true; 300 return true;
292 } 301 }
293 302
294 if (intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin)) { 303 if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
295 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); 304 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
296 return true; 305 return true;
297 } 306 }
@@ -300,9 +309,9 @@ static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
300} 309}
301 310
302static enum drm_connector_status 311static enum drm_connector_status
303intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder) 312intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt)
304{ 313{
305 struct drm_encoder *encoder = &intel_encoder->base; 314 struct drm_encoder *encoder = &crt->base.base;
306 struct drm_device *dev = encoder->dev; 315 struct drm_device *dev = encoder->dev;
307 struct drm_i915_private *dev_priv = dev->dev_private; 316 struct drm_i915_private *dev_priv = dev->dev_private;
308 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 317 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -434,7 +443,7 @@ static enum drm_connector_status
434intel_crt_detect(struct drm_connector *connector, bool force) 443intel_crt_detect(struct drm_connector *connector, bool force)
435{ 444{
436 struct drm_device *dev = connector->dev; 445 struct drm_device *dev = connector->dev;
437 struct intel_encoder *encoder = intel_attached_encoder(connector); 446 struct intel_crt *crt = intel_attached_crt(connector);
438 struct drm_crtc *crtc; 447 struct drm_crtc *crtc;
439 int dpms_mode; 448 int dpms_mode;
440 enum drm_connector_status status; 449 enum drm_connector_status status;
@@ -443,28 +452,31 @@ intel_crt_detect(struct drm_connector *connector, bool force)
443 if (intel_crt_detect_hotplug(connector)) { 452 if (intel_crt_detect_hotplug(connector)) {
444 DRM_DEBUG_KMS("CRT detected via hotplug\n"); 453 DRM_DEBUG_KMS("CRT detected via hotplug\n");
445 return connector_status_connected; 454 return connector_status_connected;
446 } else 455 } else {
456 DRM_DEBUG_KMS("CRT not detected via hotplug\n");
447 return connector_status_disconnected; 457 return connector_status_disconnected;
458 }
448 } 459 }
449 460
450 if (intel_crt_detect_ddc(&encoder->base)) 461 if (intel_crt_detect_ddc(crt))
451 return connector_status_connected; 462 return connector_status_connected;
452 463
453 if (!force) 464 if (!force)
454 return connector->status; 465 return connector->status;
455 466
456 /* for pre-945g platforms use load detect */ 467 /* for pre-945g platforms use load detect */
457 if (encoder->base.crtc && encoder->base.crtc->enabled) { 468 crtc = crt->base.base.crtc;
458 status = intel_crt_load_detect(encoder->base.crtc, encoder); 469 if (crtc && crtc->enabled) {
470 status = intel_crt_load_detect(crtc, crt);
459 } else { 471 } else {
460 crtc = intel_get_load_detect_pipe(encoder, connector, 472 crtc = intel_get_load_detect_pipe(&crt->base, connector,
461 NULL, &dpms_mode); 473 NULL, &dpms_mode);
462 if (crtc) { 474 if (crtc) {
463 if (intel_crt_detect_ddc(&encoder->base)) 475 if (intel_crt_detect_ddc(crt))
464 status = connector_status_connected; 476 status = connector_status_connected;
465 else 477 else
466 status = intel_crt_load_detect(crtc, encoder); 478 status = intel_crt_load_detect(crtc, crt);
467 intel_release_load_detect_pipe(encoder, 479 intel_release_load_detect_pipe(&crt->base,
468 connector, dpms_mode); 480 connector, dpms_mode);
469 } else 481 } else
470 status = connector_status_unknown; 482 status = connector_status_unknown;
@@ -536,17 +548,17 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
536void intel_crt_init(struct drm_device *dev) 548void intel_crt_init(struct drm_device *dev)
537{ 549{
538 struct drm_connector *connector; 550 struct drm_connector *connector;
539 struct intel_encoder *intel_encoder; 551 struct intel_crt *crt;
540 struct intel_connector *intel_connector; 552 struct intel_connector *intel_connector;
541 struct drm_i915_private *dev_priv = dev->dev_private; 553 struct drm_i915_private *dev_priv = dev->dev_private;
542 554
543 intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL); 555 crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
544 if (!intel_encoder) 556 if (!crt)
545 return; 557 return;
546 558
547 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 559 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
548 if (!intel_connector) { 560 if (!intel_connector) {
549 kfree(intel_encoder); 561 kfree(crt);
550 return; 562 return;
551 } 563 }
552 564
@@ -554,20 +566,20 @@ void intel_crt_init(struct drm_device *dev)
554 drm_connector_init(dev, &intel_connector->base, 566 drm_connector_init(dev, &intel_connector->base,
555 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 567 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
556 568
557 drm_encoder_init(dev, &intel_encoder->base, &intel_crt_enc_funcs, 569 drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
558 DRM_MODE_ENCODER_DAC); 570 DRM_MODE_ENCODER_DAC);
559 571
560 intel_connector_attach_encoder(intel_connector, intel_encoder); 572 intel_connector_attach_encoder(intel_connector, &crt->base);
561 573
562 intel_encoder->type = INTEL_OUTPUT_ANALOG; 574 crt->base.type = INTEL_OUTPUT_ANALOG;
563 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 575 crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
564 (1 << INTEL_ANALOG_CLONE_BIT) | 576 1 << INTEL_ANALOG_CLONE_BIT |
565 (1 << INTEL_SDVO_LVDS_CLONE_BIT); 577 1 << INTEL_SDVO_LVDS_CLONE_BIT);
566 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 578 crt->base.crtc_mask = (1 << 0) | (1 << 1);
567 connector->interlace_allowed = 1; 579 connector->interlace_allowed = 1;
568 connector->doublescan_allowed = 0; 580 connector->doublescan_allowed = 0;
569 581
570 drm_encoder_helper_add(&intel_encoder->base, &intel_crt_helper_funcs); 582 drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);
571 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 583 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
572 584
573 drm_sysfs_connector_add(connector); 585 drm_sysfs_connector_add(connector);
@@ -577,5 +589,22 @@ void intel_crt_init(struct drm_device *dev)
577 else 589 else
578 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 590 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
579 591
592 /*
593 * Configure the automatic hotplug detection stuff
594 */
595 crt->force_hotplug_required = 0;
596 if (HAS_PCH_SPLIT(dev)) {
597 u32 adpa;
598
599 adpa = I915_READ(PCH_ADPA);
600 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
601 adpa |= ADPA_HOTPLUG_BITS;
602 I915_WRITE(PCH_ADPA, adpa);
603 POSTING_READ(PCH_ADPA);
604
605 DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
606 crt->force_hotplug_required = 1;
607 }
608
580 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; 609 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
581} 610}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 990f065374b2..fca523288aca 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1611,6 +1611,18 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1611 1611
1612 wait_event(dev_priv->pending_flip_queue, 1612 wait_event(dev_priv->pending_flip_queue,
1613 atomic_read(&obj_priv->pending_flip) == 0); 1613 atomic_read(&obj_priv->pending_flip) == 0);
1614
1615 /* Big Hammer, we also need to ensure that any pending
1616 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
1617 * current scanout is retired before unpinning the old
1618 * framebuffer.
1619 */
1620 ret = i915_gem_object_flush_gpu(obj_priv, false);
1621 if (ret) {
1622 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
1623 mutex_unlock(&dev->struct_mutex);
1624 return ret;
1625 }
1614 } 1626 }
1615 1627
1616 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, 1628 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -1681,6 +1693,37 @@ static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
1681 udelay(500); 1693 udelay(500);
1682} 1694}
1683 1695
1696static void intel_fdi_normal_train(struct drm_crtc *crtc)
1697{
1698 struct drm_device *dev = crtc->dev;
1699 struct drm_i915_private *dev_priv = dev->dev_private;
1700 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1701 int pipe = intel_crtc->pipe;
1702 u32 reg, temp;
1703
1704 /* enable normal train */
1705 reg = FDI_TX_CTL(pipe);
1706 temp = I915_READ(reg);
1707 temp &= ~FDI_LINK_TRAIN_NONE;
1708 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
1709 I915_WRITE(reg, temp);
1710
1711 reg = FDI_RX_CTL(pipe);
1712 temp = I915_READ(reg);
1713 if (HAS_PCH_CPT(dev)) {
1714 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1715 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
1716 } else {
1717 temp &= ~FDI_LINK_TRAIN_NONE;
1718 temp |= FDI_LINK_TRAIN_NONE;
1719 }
1720 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
1721
1722 /* wait one idle pattern time */
1723 POSTING_READ(reg);
1724 udelay(1000);
1725}
1726
1684/* The FDI link training functions for ILK/Ibexpeak. */ 1727/* The FDI link training functions for ILK/Ibexpeak. */
1685static void ironlake_fdi_link_train(struct drm_crtc *crtc) 1728static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1686{ 1729{
@@ -1767,27 +1810,6 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1767 1810
1768 DRM_DEBUG_KMS("FDI train done\n"); 1811 DRM_DEBUG_KMS("FDI train done\n");
1769 1812
1770 /* enable normal train */
1771 reg = FDI_TX_CTL(pipe);
1772 temp = I915_READ(reg);
1773 temp &= ~FDI_LINK_TRAIN_NONE;
1774 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
1775 I915_WRITE(reg, temp);
1776
1777 reg = FDI_RX_CTL(pipe);
1778 temp = I915_READ(reg);
1779 if (HAS_PCH_CPT(dev)) {
1780 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1781 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
1782 } else {
1783 temp &= ~FDI_LINK_TRAIN_NONE;
1784 temp |= FDI_LINK_TRAIN_NONE;
1785 }
1786 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
1787
1788 /* wait one idle pattern time */
1789 POSTING_READ(reg);
1790 udelay(1000);
1791} 1813}
1792 1814
1793static const int const snb_b_fdi_train_param [] = { 1815static const int const snb_b_fdi_train_param [] = {
@@ -2090,15 +2112,19 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2090 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); 2112 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2091 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); 2113 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
2092 2114
2115 intel_fdi_normal_train(crtc);
2116
2093 /* For PCH DP, enable TRANS_DP_CTL */ 2117 /* For PCH DP, enable TRANS_DP_CTL */
2094 if (HAS_PCH_CPT(dev) && 2118 if (HAS_PCH_CPT(dev) &&
2095 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 2119 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
2096 reg = TRANS_DP_CTL(pipe); 2120 reg = TRANS_DP_CTL(pipe);
2097 temp = I915_READ(reg); 2121 temp = I915_READ(reg);
2098 temp &= ~(TRANS_DP_PORT_SEL_MASK | 2122 temp &= ~(TRANS_DP_PORT_SEL_MASK |
2099 TRANS_DP_SYNC_MASK); 2123 TRANS_DP_SYNC_MASK |
2124 TRANS_DP_BPC_MASK);
2100 temp |= (TRANS_DP_OUTPUT_ENABLE | 2125 temp |= (TRANS_DP_OUTPUT_ENABLE |
2101 TRANS_DP_ENH_FRAMING); 2126 TRANS_DP_ENH_FRAMING);
2127 temp |= TRANS_DP_8BPC;
2102 2128
2103 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 2129 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2104 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 2130 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
@@ -2200,9 +2226,10 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
2200 udelay(100); 2226 udelay(100);
2201 2227
2202 /* Ironlake workaround, disable clock pointer after downing FDI */ 2228 /* Ironlake workaround, disable clock pointer after downing FDI */
2203 I915_WRITE(FDI_RX_CHICKEN(pipe), 2229 if (HAS_PCH_IBX(dev))
2204 I915_READ(FDI_RX_CHICKEN(pipe) & 2230 I915_WRITE(FDI_RX_CHICKEN(pipe),
2205 ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); 2231 I915_READ(FDI_RX_CHICKEN(pipe) &
2232 ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
2206 2233
2207 /* still set train pattern 1 */ 2234 /* still set train pattern 1 */
2208 reg = FDI_TX_CTL(pipe); 2235 reg = FDI_TX_CTL(pipe);
@@ -2687,27 +2714,19 @@ fdi_reduce_ratio(u32 *num, u32 *den)
2687 } 2714 }
2688} 2715}
2689 2716
2690#define DATA_N 0x800000
2691#define LINK_N 0x80000
2692
2693static void 2717static void
2694ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, 2718ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
2695 int link_clock, struct fdi_m_n *m_n) 2719 int link_clock, struct fdi_m_n *m_n)
2696{ 2720{
2697 u64 temp;
2698
2699 m_n->tu = 64; /* default size */ 2721 m_n->tu = 64; /* default size */
2700 2722
2701 temp = (u64) DATA_N * pixel_clock; 2723 /* BUG_ON(pixel_clock > INT_MAX / 36); */
2702 temp = div_u64(temp, link_clock); 2724 m_n->gmch_m = bits_per_pixel * pixel_clock;
2703 m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes); 2725 m_n->gmch_n = link_clock * nlanes * 8;
2704 m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */
2705 m_n->gmch_n = DATA_N;
2706 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 2726 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
2707 2727
2708 temp = (u64) LINK_N * pixel_clock; 2728 m_n->link_m = pixel_clock;
2709 m_n->link_m = div_u64(temp, link_clock); 2729 m_n->link_n = link_clock;
2710 m_n->link_n = LINK_N;
2711 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); 2730 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
2712} 2731}
2713 2732
@@ -3691,6 +3710,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3691 3710
3692 /* FDI link */ 3711 /* FDI link */
3693 if (HAS_PCH_SPLIT(dev)) { 3712 if (HAS_PCH_SPLIT(dev)) {
3713 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
3694 int lane = 0, link_bw, bpp; 3714 int lane = 0, link_bw, bpp;
3695 /* CPU eDP doesn't require FDI link, so just set DP M/N 3715 /* CPU eDP doesn't require FDI link, so just set DP M/N
3696 according to current link config */ 3716 according to current link config */
@@ -3774,6 +3794,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3774 3794
3775 intel_crtc->fdi_lanes = lane; 3795 intel_crtc->fdi_lanes = lane;
3776 3796
3797 if (pixel_multiplier > 1)
3798 link_bw *= pixel_multiplier;
3777 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); 3799 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
3778 } 3800 }
3779 3801
@@ -5211,6 +5233,55 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
5211 .page_flip = intel_crtc_page_flip, 5233 .page_flip = intel_crtc_page_flip,
5212}; 5234};
5213 5235
5236static void intel_sanitize_modesetting(struct drm_device *dev,
5237 int pipe, int plane)
5238{
5239 struct drm_i915_private *dev_priv = dev->dev_private;
5240 u32 reg, val;
5241
5242 if (HAS_PCH_SPLIT(dev))
5243 return;
5244
5245 /* Who knows what state these registers were left in by the BIOS or
5246 * grub?
5247 *
5248 * If we leave the registers in a conflicting state (e.g. with the
5249 * display plane reading from the other pipe than the one we intend
5250 * to use) then when we attempt to teardown the active mode, we will
5251 * not disable the pipes and planes in the correct order -- leaving
5252 * a plane reading from a disabled pipe and possibly leading to
5253 * undefined behaviour.
5254 */
5255
5256 reg = DSPCNTR(plane);
5257 val = I915_READ(reg);
5258
5259 if ((val & DISPLAY_PLANE_ENABLE) == 0)
5260 return;
5261 if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
5262 return;
5263
5264 /* This display plane is active and attached to the other CPU pipe. */
5265 pipe = !pipe;
5266
5267 /* Disable the plane and wait for it to stop reading from the pipe. */
5268 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
5269 intel_flush_display_plane(dev, plane);
5270
5271 if (IS_GEN2(dev))
5272 intel_wait_for_vblank(dev, pipe);
5273
5274 if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
5275 return;
5276
5277 /* Switch off the pipe. */
5278 reg = PIPECONF(pipe);
5279 val = I915_READ(reg);
5280 if (val & PIPECONF_ENABLE) {
5281 I915_WRITE(reg, val & ~PIPECONF_ENABLE);
5282 intel_wait_for_pipe_off(dev, pipe);
5283 }
5284}
5214 5285
5215static void intel_crtc_init(struct drm_device *dev, int pipe) 5286static void intel_crtc_init(struct drm_device *dev, int pipe)
5216{ 5287{
@@ -5262,6 +5333,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5262 5333
5263 setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, 5334 setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
5264 (unsigned long)intel_crtc); 5335 (unsigned long)intel_crtc);
5336
5337 intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
5265} 5338}
5266 5339
5267int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 5340int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -5306,19 +5379,41 @@ static int intel_encoder_clones(struct drm_device *dev, int type_mask)
5306 return index_mask; 5379 return index_mask;
5307} 5380}
5308 5381
5382static bool has_edp_a(struct drm_device *dev)
5383{
5384 struct drm_i915_private *dev_priv = dev->dev_private;
5385
5386 if (!IS_MOBILE(dev))
5387 return false;
5388
5389 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
5390 return false;
5391
5392 if (IS_GEN5(dev) &&
5393 (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
5394 return false;
5395
5396 return true;
5397}
5398
5309static void intel_setup_outputs(struct drm_device *dev) 5399static void intel_setup_outputs(struct drm_device *dev)
5310{ 5400{
5311 struct drm_i915_private *dev_priv = dev->dev_private; 5401 struct drm_i915_private *dev_priv = dev->dev_private;
5312 struct intel_encoder *encoder; 5402 struct intel_encoder *encoder;
5313 bool dpd_is_edp = false; 5403 bool dpd_is_edp = false;
5404 bool has_lvds = false;
5314 5405
5315 if (IS_MOBILE(dev) && !IS_I830(dev)) 5406 if (IS_MOBILE(dev) && !IS_I830(dev))
5316 intel_lvds_init(dev); 5407 has_lvds = intel_lvds_init(dev);
5408 if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
5409 /* disable the panel fitter on everything but LVDS */
5410 I915_WRITE(PFIT_CONTROL, 0);
5411 }
5317 5412
5318 if (HAS_PCH_SPLIT(dev)) { 5413 if (HAS_PCH_SPLIT(dev)) {
5319 dpd_is_edp = intel_dpd_is_edp(dev); 5414 dpd_is_edp = intel_dpd_is_edp(dev);
5320 5415
5321 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) 5416 if (has_edp_a(dev))
5322 intel_dp_init(dev, DP_A); 5417 intel_dp_init(dev, DP_A);
5323 5418
5324 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) 5419 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
@@ -5581,20 +5676,19 @@ void ironlake_enable_drps(struct drm_device *dev)
5581 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 5676 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
5582 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 5677 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
5583 MEMMODE_FSTART_SHIFT; 5678 MEMMODE_FSTART_SHIFT;
5584 fstart = fmax;
5585 5679
5586 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> 5680 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
5587 PXVFREQ_PX_SHIFT; 5681 PXVFREQ_PX_SHIFT;
5588 5682
5589 dev_priv->fmax = fstart; /* IPS callback will increase this */ 5683 dev_priv->fmax = fmax; /* IPS callback will increase this */
5590 dev_priv->fstart = fstart; 5684 dev_priv->fstart = fstart;
5591 5685
5592 dev_priv->max_delay = fmax; 5686 dev_priv->max_delay = fstart;
5593 dev_priv->min_delay = fmin; 5687 dev_priv->min_delay = fmin;
5594 dev_priv->cur_delay = fstart; 5688 dev_priv->cur_delay = fstart;
5595 5689
5596 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, 5690 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
5597 fstart); 5691 fmax, fmin, fstart);
5598 5692
5599 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 5693 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
5600 5694
@@ -5748,6 +5842,8 @@ void intel_init_clock_gating(struct drm_device *dev)
5748 I915_WRITE(PCH_3DCGDIS0, 5842 I915_WRITE(PCH_3DCGDIS0,
5749 MARIUNIT_CLOCK_GATE_DISABLE | 5843 MARIUNIT_CLOCK_GATE_DISABLE |
5750 SVSMUNIT_CLOCK_GATE_DISABLE); 5844 SVSMUNIT_CLOCK_GATE_DISABLE);
5845 I915_WRITE(PCH_3DCGDIS1,
5846 VFMUNIT_CLOCK_GATE_DISABLE);
5751 } 5847 }
5752 5848
5753 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); 5849 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 891f4f1d63b1..864417cffe9a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -479,6 +479,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
479 uint16_t address = algo_data->address; 479 uint16_t address = algo_data->address;
480 uint8_t msg[5]; 480 uint8_t msg[5];
481 uint8_t reply[2]; 481 uint8_t reply[2];
482 unsigned retry;
482 int msg_bytes; 483 int msg_bytes;
483 int reply_bytes; 484 int reply_bytes;
484 int ret; 485 int ret;
@@ -513,14 +514,33 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
513 break; 514 break;
514 } 515 }
515 516
516 for (;;) { 517 for (retry = 0; retry < 5; retry++) {
517 ret = intel_dp_aux_ch(intel_dp, 518 ret = intel_dp_aux_ch(intel_dp,
518 msg, msg_bytes, 519 msg, msg_bytes,
519 reply, reply_bytes); 520 reply, reply_bytes);
520 if (ret < 0) { 521 if (ret < 0) {
521 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 522 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
522 return ret; 523 return ret;
523 } 524 }
525
526 switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
527 case AUX_NATIVE_REPLY_ACK:
528 /* I2C-over-AUX Reply field is only valid
529 * when paired with AUX ACK.
530 */
531 break;
532 case AUX_NATIVE_REPLY_NACK:
533 DRM_DEBUG_KMS("aux_ch native nack\n");
534 return -EREMOTEIO;
535 case AUX_NATIVE_REPLY_DEFER:
536 udelay(100);
537 continue;
538 default:
539 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
540 reply[0]);
541 return -EREMOTEIO;
542 }
543
524 switch (reply[0] & AUX_I2C_REPLY_MASK) { 544 switch (reply[0] & AUX_I2C_REPLY_MASK) {
525 case AUX_I2C_REPLY_ACK: 545 case AUX_I2C_REPLY_ACK:
526 if (mode == MODE_I2C_READ) { 546 if (mode == MODE_I2C_READ) {
@@ -528,17 +548,20 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
528 } 548 }
529 return reply_bytes - 1; 549 return reply_bytes - 1;
530 case AUX_I2C_REPLY_NACK: 550 case AUX_I2C_REPLY_NACK:
531 DRM_DEBUG_KMS("aux_ch nack\n"); 551 DRM_DEBUG_KMS("aux_i2c nack\n");
532 return -EREMOTEIO; 552 return -EREMOTEIO;
533 case AUX_I2C_REPLY_DEFER: 553 case AUX_I2C_REPLY_DEFER:
534 DRM_DEBUG_KMS("aux_ch defer\n"); 554 DRM_DEBUG_KMS("aux_i2c defer\n");
535 udelay(100); 555 udelay(100);
536 break; 556 break;
537 default: 557 default:
538 DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]); 558 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
539 return -EREMOTEIO; 559 return -EREMOTEIO;
540 } 560 }
541 } 561 }
562
563 DRM_ERROR("too many retries, giving up\n");
564 return -EREMOTEIO;
542} 565}
543 566
544static int 567static int
@@ -584,17 +607,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
584 mode->clock = dev_priv->panel_fixed_mode->clock; 607 mode->clock = dev_priv->panel_fixed_mode->clock;
585 } 608 }
586 609
587 /* Just use VBT values for eDP */
588 if (is_edp(intel_dp)) {
589 intel_dp->lane_count = dev_priv->edp.lanes;
590 intel_dp->link_bw = dev_priv->edp.rate;
591 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
592 DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n",
593 intel_dp->link_bw, intel_dp->lane_count,
594 adjusted_mode->clock);
595 return true;
596 }
597
598 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 610 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
599 for (clock = 0; clock <= max_clock; clock++) { 611 for (clock = 0; clock <= max_clock; clock++) {
600 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 612 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
@@ -613,6 +625,19 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
613 } 625 }
614 } 626 }
615 627
628 if (is_edp(intel_dp)) {
629 /* okay we failed just pick the highest */
630 intel_dp->lane_count = max_lane_count;
631 intel_dp->link_bw = bws[max_clock];
632 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
633 DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
634 "count %d clock %d\n",
635 intel_dp->link_bw, intel_dp->lane_count,
636 adjusted_mode->clock);
637
638 return true;
639 }
640
616 return false; 641 return false;
617} 642}
618 643
@@ -1087,21 +1112,11 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
1087} 1112}
1088 1113
1089static uint32_t 1114static uint32_t
1090intel_dp_signal_levels(struct intel_dp *intel_dp) 1115intel_dp_signal_levels(uint8_t train_set, int lane_count)
1091{ 1116{
1092 struct drm_device *dev = intel_dp->base.base.dev; 1117 uint32_t signal_levels = 0;
1093 struct drm_i915_private *dev_priv = dev->dev_private;
1094 uint32_t signal_levels = 0;
1095 u8 train_set = intel_dp->train_set[0];
1096 u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK;
1097 u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK;
1098
1099 if (is_edp(intel_dp)) {
1100 vswing = dev_priv->edp.vswing;
1101 preemphasis = dev_priv->edp.preemphasis;
1102 }
1103 1118
1104 switch (vswing) { 1119 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1105 case DP_TRAIN_VOLTAGE_SWING_400: 1120 case DP_TRAIN_VOLTAGE_SWING_400:
1106 default: 1121 default:
1107 signal_levels |= DP_VOLTAGE_0_4; 1122 signal_levels |= DP_VOLTAGE_0_4;
@@ -1116,7 +1131,7 @@ intel_dp_signal_levels(struct intel_dp *intel_dp)
1116 signal_levels |= DP_VOLTAGE_1_2; 1131 signal_levels |= DP_VOLTAGE_1_2;
1117 break; 1132 break;
1118 } 1133 }
1119 switch (preemphasis) { 1134 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1120 case DP_TRAIN_PRE_EMPHASIS_0: 1135 case DP_TRAIN_PRE_EMPHASIS_0:
1121 default: 1136 default:
1122 signal_levels |= DP_PRE_EMPHASIS_0; 1137 signal_levels |= DP_PRE_EMPHASIS_0;
@@ -1203,18 +1218,6 @@ intel_channel_eq_ok(struct intel_dp *intel_dp)
1203} 1218}
1204 1219
1205static bool 1220static bool
1206intel_dp_aux_handshake_required(struct intel_dp *intel_dp)
1207{
1208 struct drm_device *dev = intel_dp->base.base.dev;
1209 struct drm_i915_private *dev_priv = dev->dev_private;
1210
1211 if (is_edp(intel_dp) && dev_priv->no_aux_handshake)
1212 return false;
1213
1214 return true;
1215}
1216
1217static bool
1218intel_dp_set_link_train(struct intel_dp *intel_dp, 1221intel_dp_set_link_train(struct intel_dp *intel_dp,
1219 uint32_t dp_reg_value, 1222 uint32_t dp_reg_value,
1220 uint8_t dp_train_pat) 1223 uint8_t dp_train_pat)
@@ -1226,9 +1229,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1226 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1229 I915_WRITE(intel_dp->output_reg, dp_reg_value);
1227 POSTING_READ(intel_dp->output_reg); 1230 POSTING_READ(intel_dp->output_reg);
1228 1231
1229 if (!intel_dp_aux_handshake_required(intel_dp))
1230 return true;
1231
1232 intel_dp_aux_native_write_1(intel_dp, 1232 intel_dp_aux_native_write_1(intel_dp,
1233 DP_TRAINING_PATTERN_SET, 1233 DP_TRAINING_PATTERN_SET,
1234 dp_train_pat); 1234 dp_train_pat);
@@ -1261,11 +1261,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1261 POSTING_READ(intel_dp->output_reg); 1261 POSTING_READ(intel_dp->output_reg);
1262 intel_wait_for_vblank(dev, intel_crtc->pipe); 1262 intel_wait_for_vblank(dev, intel_crtc->pipe);
1263 1263
1264 if (intel_dp_aux_handshake_required(intel_dp)) 1264 /* Write the link configuration data */
1265 /* Write the link configuration data */ 1265 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
1266 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1266 intel_dp->link_configuration,
1267 intel_dp->link_configuration, 1267 DP_LINK_CONFIGURATION_SIZE);
1268 DP_LINK_CONFIGURATION_SIZE);
1269 1268
1270 DP |= DP_PORT_EN; 1269 DP |= DP_PORT_EN;
1271 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) 1270 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
@@ -1283,7 +1282,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1283 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1282 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1284 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1283 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1285 } else { 1284 } else {
1286 signal_levels = intel_dp_signal_levels(intel_dp); 1285 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
1287 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1286 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1288 } 1287 }
1289 1288
@@ -1297,37 +1296,33 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1297 break; 1296 break;
1298 /* Set training pattern 1 */ 1297 /* Set training pattern 1 */
1299 1298
1300 udelay(500); 1299 udelay(100);
1301 if (intel_dp_aux_handshake_required(intel_dp)) { 1300 if (!intel_dp_get_link_status(intel_dp))
1302 break; 1301 break;
1303 } else {
1304 if (!intel_dp_get_link_status(intel_dp))
1305 break;
1306 1302
1307 if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { 1303 if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
1308 clock_recovery = true; 1304 clock_recovery = true;
1309 break; 1305 break;
1310 } 1306 }
1311 1307
1312 /* Check to see if we've tried the max voltage */ 1308 /* Check to see if we've tried the max voltage */
1313 for (i = 0; i < intel_dp->lane_count; i++) 1309 for (i = 0; i < intel_dp->lane_count; i++)
1314 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1310 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1315 break;
1316 if (i == intel_dp->lane_count)
1317 break; 1311 break;
1312 if (i == intel_dp->lane_count)
1313 break;
1318 1314
1319 /* Check to see if we've tried the same voltage 5 times */ 1315 /* Check to see if we've tried the same voltage 5 times */
1320 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1316 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
1321 ++tries; 1317 ++tries;
1322 if (tries == 5) 1318 if (tries == 5)
1323 break; 1319 break;
1324 } else 1320 } else
1325 tries = 0; 1321 tries = 0;
1326 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1322 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1327 1323
1328 /* Compute new intel_dp->train_set as requested by target */ 1324 /* Compute new intel_dp->train_set as requested by target */
1329 intel_get_adjust_train(intel_dp); 1325 intel_get_adjust_train(intel_dp);
1330 }
1331 } 1326 }
1332 1327
1333 intel_dp->DP = DP; 1328 intel_dp->DP = DP;
@@ -1354,7 +1349,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1354 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1349 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1355 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1350 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1356 } else { 1351 } else {
1357 signal_levels = intel_dp_signal_levels(intel_dp); 1352 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
1358 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1353 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1359 } 1354 }
1360 1355
@@ -1368,28 +1363,24 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1368 DP_TRAINING_PATTERN_2)) 1363 DP_TRAINING_PATTERN_2))
1369 break; 1364 break;
1370 1365
1371 udelay(500); 1366 udelay(400);
1372 1367 if (!intel_dp_get_link_status(intel_dp))
1373 if (!intel_dp_aux_handshake_required(intel_dp)) {
1374 break; 1368 break;
1375 } else {
1376 if (!intel_dp_get_link_status(intel_dp))
1377 break;
1378 1369
1379 if (intel_channel_eq_ok(intel_dp)) { 1370 if (intel_channel_eq_ok(intel_dp)) {
1380 channel_eq = true; 1371 channel_eq = true;
1381 break; 1372 break;
1382 } 1373 }
1383 1374
1384 /* Try 5 times */ 1375 /* Try 5 times */
1385 if (tries > 5) 1376 if (tries > 5)
1386 break; 1377 break;
1387 1378
1388 /* Compute new intel_dp->train_set as requested by target */ 1379 /* Compute new intel_dp->train_set as requested by target */
1389 intel_get_adjust_train(intel_dp); 1380 intel_get_adjust_train(intel_dp);
1390 ++tries; 1381 ++tries;
1391 }
1392 } 1382 }
1383
1393 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) 1384 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
1394 reg = DP | DP_LINK_TRAIN_OFF_CPT; 1385 reg = DP | DP_LINK_TRAIN_OFF_CPT;
1395 else 1386 else
@@ -1408,6 +1399,9 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1408 struct drm_i915_private *dev_priv = dev->dev_private; 1399 struct drm_i915_private *dev_priv = dev->dev_private;
1409 uint32_t DP = intel_dp->DP; 1400 uint32_t DP = intel_dp->DP;
1410 1401
1402 if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
1403 return;
1404
1411 DRM_DEBUG_KMS("\n"); 1405 DRM_DEBUG_KMS("\n");
1412 1406
1413 if (is_edp(intel_dp)) { 1407 if (is_edp(intel_dp)) {
@@ -1430,6 +1424,28 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1430 1424
1431 if (is_edp(intel_dp)) 1425 if (is_edp(intel_dp))
1432 DP |= DP_LINK_TRAIN_OFF; 1426 DP |= DP_LINK_TRAIN_OFF;
1427
1428 if (!HAS_PCH_CPT(dev) &&
1429 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
1430 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
1431 /* Hardware workaround: leaving our transcoder select
1432 * set to transcoder B while it's off will prevent the
1433 * corresponding HDMI output on transcoder A.
1434 *
1435 * Combine this with another hardware workaround:
1436 * transcoder select bit can only be cleared while the
1437 * port is enabled.
1438 */
1439 DP &= ~DP_PIPEB_SELECT;
1440 I915_WRITE(intel_dp->output_reg, DP);
1441
1442 /* Changes to enable or select take place the vblank
1443 * after being written.
1444 */
1445 intel_wait_for_vblank(intel_dp->base.base.dev,
1446 intel_crtc->pipe);
1447 }
1448
1433 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 1449 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
1434 POSTING_READ(intel_dp->output_reg); 1450 POSTING_READ(intel_dp->output_reg);
1435} 1451}
@@ -1517,7 +1533,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
1517 status = connector_status_connected; 1533 status = connector_status_connected;
1518 } 1534 }
1519 1535
1520 return bit; 1536 return status;
1521} 1537}
1522 1538
1523/** 1539/**
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9af9f86a8765..e52c6125bb1f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -237,7 +237,7 @@ extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
237extern void intel_dvo_init(struct drm_device *dev); 237extern void intel_dvo_init(struct drm_device *dev);
238extern void intel_tv_init(struct drm_device *dev); 238extern void intel_tv_init(struct drm_device *dev);
239extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj); 239extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj);
240extern void intel_lvds_init(struct drm_device *dev); 240extern bool intel_lvds_init(struct drm_device *dev);
241extern void intel_dp_init(struct drm_device *dev, int dp_reg); 241extern void intel_dp_init(struct drm_device *dev, int dp_reg);
242void 242void
243intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 243intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
@@ -296,6 +296,7 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
296extern void intel_init_clock_gating(struct drm_device *dev); 296extern void intel_init_clock_gating(struct drm_device *dev);
297extern void ironlake_enable_drps(struct drm_device *dev); 297extern void ironlake_enable_drps(struct drm_device *dev);
298extern void ironlake_disable_drps(struct drm_device *dev); 298extern void ironlake_disable_drps(struct drm_device *dev);
299extern void intel_init_emon(struct drm_device *dev);
299 300
300extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, 301extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
301 struct drm_gem_object *obj, 302 struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 2be4f728ed0c..3dba086e7eea 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -160,7 +160,7 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
160 }; 160 };
161 struct intel_gpio *gpio; 161 struct intel_gpio *gpio;
162 162
163 if (pin < 1 || pin > 7) 163 if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
164 return NULL; 164 return NULL;
165 165
166 gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL); 166 gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
@@ -172,7 +172,8 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
172 gpio->reg += PCH_GPIOA - GPIOA; 172 gpio->reg += PCH_GPIOA - GPIOA;
173 gpio->dev_priv = dev_priv; 173 gpio->dev_priv = dev_priv;
174 174
175 snprintf(gpio->adapter.name, I2C_NAME_SIZE, "GPIO%c", "?BACDEF?"[pin]); 175 snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
176 "i915 GPIO%c", "?BACDE?F"[pin]);
176 gpio->adapter.owner = THIS_MODULE; 177 gpio->adapter.owner = THIS_MODULE;
177 gpio->adapter.algo_data = &gpio->algo; 178 gpio->adapter.algo_data = &gpio->algo;
178 gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev; 179 gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
@@ -349,7 +350,7 @@ int intel_setup_gmbus(struct drm_device *dev)
349 "panel", 350 "panel",
350 "dpc", 351 "dpc",
351 "dpb", 352 "dpb",
352 "reserved" 353 "reserved",
353 "dpd", 354 "dpd",
354 }; 355 };
355 struct drm_i915_private *dev_priv = dev->dev_private; 356 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -366,8 +367,8 @@ int intel_setup_gmbus(struct drm_device *dev)
366 bus->adapter.owner = THIS_MODULE; 367 bus->adapter.owner = THIS_MODULE;
367 bus->adapter.class = I2C_CLASS_DDC; 368 bus->adapter.class = I2C_CLASS_DDC;
368 snprintf(bus->adapter.name, 369 snprintf(bus->adapter.name,
369 I2C_NAME_SIZE, 370 sizeof(bus->adapter.name),
370 "gmbus %s", 371 "i915 gmbus %s",
371 names[i]); 372 names[i]);
372 373
373 bus->adapter.dev.parent = &dev->pdev->dev; 374 bus->adapter.dev.parent = &dev->pdev->dev;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f1a649990ea9..25bcedf386fd 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -68,7 +68,7 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
68/** 68/**
69 * Sets the power state for the panel. 69 * Sets the power state for the panel.
70 */ 70 */
71static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on) 71static void intel_lvds_enable(struct intel_lvds *intel_lvds)
72{ 72{
73 struct drm_device *dev = intel_lvds->base.base.dev; 73 struct drm_device *dev = intel_lvds->base.base.dev;
74 struct drm_i915_private *dev_priv = dev->dev_private; 74 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -82,26 +82,61 @@ static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on)
82 lvds_reg = LVDS; 82 lvds_reg = LVDS;
83 } 83 }
84 84
85 if (on) { 85 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
86 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
87 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
88 intel_panel_set_backlight(dev, dev_priv->backlight_level);
89 } else {
90 dev_priv->backlight_level = intel_panel_get_backlight(dev);
91
92 intel_panel_set_backlight(dev, 0);
93 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
94 86
95 if (intel_lvds->pfit_control) { 87 if (intel_lvds->pfit_dirty) {
96 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) 88 /*
97 DRM_ERROR("timed out waiting for panel to power off\n"); 89 * Enable automatic panel scaling so that non-native modes
98 I915_WRITE(PFIT_CONTROL, 0); 90 * fill the screen. The panel fitter should only be
99 intel_lvds->pfit_control = 0; 91 * adjusted whilst the pipe is disabled, according to
92 * register description and PRM.
93 */
94 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
95 intel_lvds->pfit_control,
96 intel_lvds->pfit_pgm_ratios);
97 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) {
98 DRM_ERROR("timed out waiting for panel to power off\n");
99 } else {
100 I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
101 I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
100 intel_lvds->pfit_dirty = false; 102 intel_lvds->pfit_dirty = false;
101 } 103 }
104 }
105
106 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
107 POSTING_READ(lvds_reg);
108
109 intel_panel_set_backlight(dev, dev_priv->backlight_level);
110}
111
112static void intel_lvds_disable(struct intel_lvds *intel_lvds)
113{
114 struct drm_device *dev = intel_lvds->base.base.dev;
115 struct drm_i915_private *dev_priv = dev->dev_private;
116 u32 ctl_reg, lvds_reg;
117
118 if (HAS_PCH_SPLIT(dev)) {
119 ctl_reg = PCH_PP_CONTROL;
120 lvds_reg = PCH_LVDS;
121 } else {
122 ctl_reg = PP_CONTROL;
123 lvds_reg = LVDS;
124 }
125
126 dev_priv->backlight_level = intel_panel_get_backlight(dev);
127 intel_panel_set_backlight(dev, 0);
102 128
103 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); 129 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
130
131 if (intel_lvds->pfit_control) {
132 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
133 DRM_ERROR("timed out waiting for panel to power off\n");
134
135 I915_WRITE(PFIT_CONTROL, 0);
136 intel_lvds->pfit_dirty = true;
104 } 137 }
138
139 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
105 POSTING_READ(lvds_reg); 140 POSTING_READ(lvds_reg);
106} 141}
107 142
@@ -110,9 +145,9 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
110 struct intel_lvds *intel_lvds = to_intel_lvds(encoder); 145 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
111 146
112 if (mode == DRM_MODE_DPMS_ON) 147 if (mode == DRM_MODE_DPMS_ON)
113 intel_lvds_set_power(intel_lvds, true); 148 intel_lvds_enable(intel_lvds);
114 else 149 else
115 intel_lvds_set_power(intel_lvds, false); 150 intel_lvds_disable(intel_lvds);
116 151
117 /* XXX: We never power down the LVDS pairs. */ 152 /* XXX: We never power down the LVDS pairs. */
118} 153}
@@ -411,43 +446,18 @@ static void intel_lvds_commit(struct drm_encoder *encoder)
411 /* Always do a full power on as we do not know what state 446 /* Always do a full power on as we do not know what state
412 * we were left in. 447 * we were left in.
413 */ 448 */
414 intel_lvds_set_power(intel_lvds, true); 449 intel_lvds_enable(intel_lvds);
415} 450}
416 451
417static void intel_lvds_mode_set(struct drm_encoder *encoder, 452static void intel_lvds_mode_set(struct drm_encoder *encoder,
418 struct drm_display_mode *mode, 453 struct drm_display_mode *mode,
419 struct drm_display_mode *adjusted_mode) 454 struct drm_display_mode *adjusted_mode)
420{ 455{
421 struct drm_device *dev = encoder->dev;
422 struct drm_i915_private *dev_priv = dev->dev_private;
423 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
424
425 /* 456 /*
426 * The LVDS pin pair will already have been turned on in the 457 * The LVDS pin pair will already have been turned on in the
427 * intel_crtc_mode_set since it has a large impact on the DPLL 458 * intel_crtc_mode_set since it has a large impact on the DPLL
428 * settings. 459 * settings.
429 */ 460 */
430
431 if (HAS_PCH_SPLIT(dev))
432 return;
433
434 if (!intel_lvds->pfit_dirty)
435 return;
436
437 /*
438 * Enable automatic panel scaling so that non-native modes fill the
439 * screen. Should be enabled before the pipe is enabled, according to
440 * register description and PRM.
441 */
442 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
443 intel_lvds->pfit_control,
444 intel_lvds->pfit_pgm_ratios);
445 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
446 DRM_ERROR("timed out waiting for panel to power off\n");
447
448 I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
449 I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
450 intel_lvds->pfit_dirty = false;
451} 461}
452 462
453/** 463/**
@@ -481,11 +491,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
481 struct drm_device *dev = connector->dev; 491 struct drm_device *dev = connector->dev;
482 struct drm_display_mode *mode; 492 struct drm_display_mode *mode;
483 493
484 if (intel_lvds->edid) { 494 if (intel_lvds->edid)
485 drm_mode_connector_update_edid_property(connector,
486 intel_lvds->edid);
487 return drm_add_edid_modes(connector, intel_lvds->edid); 495 return drm_add_edid_modes(connector, intel_lvds->edid);
488 }
489 496
490 mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); 497 mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
491 if (mode == 0) 498 if (mode == 0)
@@ -840,7 +847,7 @@ static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin)
840 * Create the connector, register the LVDS DDC bus, and try to figure out what 847 * Create the connector, register the LVDS DDC bus, and try to figure out what
841 * modes we can display on the LVDS panel (if present). 848 * modes we can display on the LVDS panel (if present).
842 */ 849 */
843void intel_lvds_init(struct drm_device *dev) 850bool intel_lvds_init(struct drm_device *dev)
844{ 851{
845 struct drm_i915_private *dev_priv = dev->dev_private; 852 struct drm_i915_private *dev_priv = dev->dev_private;
846 struct intel_lvds *intel_lvds; 853 struct intel_lvds *intel_lvds;
@@ -856,37 +863,37 @@ void intel_lvds_init(struct drm_device *dev)
856 863
857 /* Skip init on machines we know falsely report LVDS */ 864 /* Skip init on machines we know falsely report LVDS */
858 if (dmi_check_system(intel_no_lvds)) 865 if (dmi_check_system(intel_no_lvds))
859 return; 866 return false;
860 867
861 pin = GMBUS_PORT_PANEL; 868 pin = GMBUS_PORT_PANEL;
862 if (!lvds_is_present_in_vbt(dev, &pin)) { 869 if (!lvds_is_present_in_vbt(dev, &pin)) {
863 DRM_DEBUG_KMS("LVDS is not present in VBT\n"); 870 DRM_DEBUG_KMS("LVDS is not present in VBT\n");
864 return; 871 return false;
865 } 872 }
866 873
867 if (HAS_PCH_SPLIT(dev)) { 874 if (HAS_PCH_SPLIT(dev)) {
868 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) 875 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
869 return; 876 return false;
870 if (dev_priv->edp.support) { 877 if (dev_priv->edp.support) {
871 DRM_DEBUG_KMS("disable LVDS for eDP support\n"); 878 DRM_DEBUG_KMS("disable LVDS for eDP support\n");
872 return; 879 return false;
873 } 880 }
874 } 881 }
875 882
876 if (!intel_lvds_ddc_probe(dev, pin)) { 883 if (!intel_lvds_ddc_probe(dev, pin)) {
877 DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n"); 884 DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n");
878 return; 885 return false;
879 } 886 }
880 887
881 intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); 888 intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
882 if (!intel_lvds) { 889 if (!intel_lvds) {
883 return; 890 return false;
884 } 891 }
885 892
886 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 893 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
887 if (!intel_connector) { 894 if (!intel_connector) {
888 kfree(intel_lvds); 895 kfree(intel_lvds);
889 return; 896 return false;
890 } 897 }
891 898
892 if (!HAS_PCH_SPLIT(dev)) { 899 if (!HAS_PCH_SPLIT(dev)) {
@@ -939,7 +946,16 @@ void intel_lvds_init(struct drm_device *dev)
939 */ 946 */
940 intel_lvds->edid = drm_get_edid(connector, 947 intel_lvds->edid = drm_get_edid(connector,
941 &dev_priv->gmbus[pin].adapter); 948 &dev_priv->gmbus[pin].adapter);
942 949 if (intel_lvds->edid) {
950 if (drm_add_edid_modes(connector,
951 intel_lvds->edid)) {
952 drm_mode_connector_update_edid_property(connector,
953 intel_lvds->edid);
954 } else {
955 kfree(intel_lvds->edid);
956 intel_lvds->edid = NULL;
957 }
958 }
943 if (!intel_lvds->edid) { 959 if (!intel_lvds->edid) {
944 /* Didn't get an EDID, so 960 /* Didn't get an EDID, so
945 * Set wide sync ranges so we get all modes 961 * Set wide sync ranges so we get all modes
@@ -1020,7 +1036,7 @@ out:
1020 /* keep the LVDS connector */ 1036 /* keep the LVDS connector */
1021 dev_priv->int_lvds_connector = connector; 1037 dev_priv->int_lvds_connector = connector;
1022 drm_sysfs_connector_add(connector); 1038 drm_sysfs_connector_add(connector);
1023 return; 1039 return true;
1024 1040
1025failed: 1041failed:
1026 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); 1042 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
@@ -1028,4 +1044,5 @@ failed:
1028 drm_encoder_cleanup(encoder); 1044 drm_encoder_cleanup(encoder);
1029 kfree(intel_lvds); 1045 kfree(intel_lvds);
1030 kfree(intel_connector); 1046 kfree(intel_connector);
1047 return false;
1031} 1048}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 917c7dc3cd6b..9b0d9a867aea 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -512,6 +512,6 @@ int intel_opregion_setup(struct drm_device *dev)
512 return 0; 512 return 0;
513 513
514err_out: 514err_out:
515 iounmap(opregion->header); 515 iounmap(base);
516 return err; 516 return err;
517} 517}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index afb96d25219a..02ff0a481f47 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -946,7 +946,9 @@ static int check_overlay_src(struct drm_device *dev,
946{ 946{
947 int uv_hscale = uv_hsubsampling(rec->flags); 947 int uv_hscale = uv_hsubsampling(rec->flags);
948 int uv_vscale = uv_vsubsampling(rec->flags); 948 int uv_vscale = uv_vsubsampling(rec->flags);
949 u32 stride_mask, depth, tmp; 949 u32 stride_mask;
950 int depth;
951 u32 tmp;
950 952
951 /* check src dimensions */ 953 /* check src dimensions */
952 if (IS_845G(dev) || IS_I830(dev)) { 954 if (IS_845G(dev) || IS_I830(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 09f2dc353ae2..31cd7e33e820 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -156,28 +156,30 @@ static int init_ring_common(struct drm_device *dev,
156 156
157 /* G45 ring initialization fails to reset head to zero */ 157 /* G45 ring initialization fails to reset head to zero */
158 if (head != 0) { 158 if (head != 0) {
159 DRM_ERROR("%s head not reset to zero " 159 DRM_DEBUG_KMS("%s head not reset to zero "
160 "ctl %08x head %08x tail %08x start %08x\n", 160 "ctl %08x head %08x tail %08x start %08x\n",
161 ring->name, 161 ring->name,
162 I915_READ_CTL(ring), 162 I915_READ_CTL(ring),
163 I915_READ_HEAD(ring), 163 I915_READ_HEAD(ring),
164 I915_READ_TAIL(ring), 164 I915_READ_TAIL(ring),
165 I915_READ_START(ring)); 165 I915_READ_START(ring));
166 166
167 I915_WRITE_HEAD(ring, 0); 167 I915_WRITE_HEAD(ring, 0);
168 168
169 DRM_ERROR("%s head forced to zero " 169 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
170 "ctl %08x head %08x tail %08x start %08x\n", 170 DRM_ERROR("failed to set %s head to zero "
171 ring->name, 171 "ctl %08x head %08x tail %08x start %08x\n",
172 I915_READ_CTL(ring), 172 ring->name,
173 I915_READ_HEAD(ring), 173 I915_READ_CTL(ring),
174 I915_READ_TAIL(ring), 174 I915_READ_HEAD(ring),
175 I915_READ_START(ring)); 175 I915_READ_TAIL(ring),
176 I915_READ_START(ring));
177 }
176 } 178 }
177 179
178 I915_WRITE_CTL(ring, 180 I915_WRITE_CTL(ring,
179 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) 181 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
180 | RING_NO_REPORT | RING_VALID); 182 | RING_REPORT_64K | RING_VALID);
181 183
182 head = I915_READ_HEAD(ring) & HEAD_ADDR; 184 head = I915_READ_HEAD(ring) & HEAD_ADDR;
183 /* If the head is still not zero, the ring is dead */ 185 /* If the head is still not zero, the ring is dead */
@@ -654,6 +656,10 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
654 i915_gem_object_unpin(ring->gem_object); 656 i915_gem_object_unpin(ring->gem_object);
655 drm_gem_object_unreference(ring->gem_object); 657 drm_gem_object_unreference(ring->gem_object);
656 ring->gem_object = NULL; 658 ring->gem_object = NULL;
659
660 if (ring->cleanup)
661 ring->cleanup(ring);
662
657 cleanup_status_page(dev, ring); 663 cleanup_status_page(dev, ring);
658} 664}
659 665
@@ -688,11 +694,19 @@ int intel_wait_ring_buffer(struct drm_device *dev,
688{ 694{
689 unsigned long end; 695 unsigned long end;
690 drm_i915_private_t *dev_priv = dev->dev_private; 696 drm_i915_private_t *dev_priv = dev->dev_private;
697 u32 head;
691 698
692 trace_i915_ring_wait_begin (dev); 699 trace_i915_ring_wait_begin (dev);
693 end = jiffies + 3 * HZ; 700 end = jiffies + 3 * HZ;
694 do { 701 do {
695 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 702 /* If the reported head position has wrapped or hasn't advanced,
703 * fallback to the slow and accurate path.
704 */
705 head = intel_read_status_page(ring, 4);
706 if (head < ring->actual_head)
707 head = I915_READ_HEAD(ring);
708 ring->actual_head = head;
709 ring->head = head & HEAD_ADDR;
696 ring->space = ring->head - (ring->tail + 8); 710 ring->space = ring->head - (ring->tail + 8);
697 if (ring->space < 0) 711 if (ring->space < 0)
698 ring->space += ring->size; 712 ring->space += ring->size;
@@ -854,19 +868,125 @@ blt_ring_put_user_irq(struct drm_device *dev,
854 /* do nothing */ 868 /* do nothing */
855} 869}
856 870
871
872/* Workaround for some stepping of SNB,
873 * each time when BLT engine ring tail moved,
874 * the first command in the ring to be parsed
875 * should be MI_BATCH_BUFFER_START
876 */
877#define NEED_BLT_WORKAROUND(dev) \
878 (IS_GEN6(dev) && (dev->pdev->revision < 8))
879
880static inline struct drm_i915_gem_object *
881to_blt_workaround(struct intel_ring_buffer *ring)
882{
883 return ring->private;
884}
885
886static int blt_ring_init(struct drm_device *dev,
887 struct intel_ring_buffer *ring)
888{
889 if (NEED_BLT_WORKAROUND(dev)) {
890 struct drm_i915_gem_object *obj;
891 u32 __iomem *ptr;
892 int ret;
893
894 obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
895 if (obj == NULL)
896 return -ENOMEM;
897
898 ret = i915_gem_object_pin(&obj->base, 4096);
899 if (ret) {
900 drm_gem_object_unreference(&obj->base);
901 return ret;
902 }
903
904 ptr = kmap(obj->pages[0]);
905 iowrite32(MI_BATCH_BUFFER_END, ptr);
906 iowrite32(MI_NOOP, ptr+1);
907 kunmap(obj->pages[0]);
908
909 ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
910 if (ret) {
911 i915_gem_object_unpin(&obj->base);
912 drm_gem_object_unreference(&obj->base);
913 return ret;
914 }
915
916 ring->private = obj;
917 }
918
919 return init_ring_common(dev, ring);
920}
921
922static void blt_ring_begin(struct drm_device *dev,
923 struct intel_ring_buffer *ring,
924 int num_dwords)
925{
926 if (ring->private) {
927 intel_ring_begin(dev, ring, num_dwords+2);
928 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
929 intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
930 } else
931 intel_ring_begin(dev, ring, 4);
932}
933
934static void blt_ring_flush(struct drm_device *dev,
935 struct intel_ring_buffer *ring,
936 u32 invalidate_domains,
937 u32 flush_domains)
938{
939 blt_ring_begin(dev, ring, 4);
940 intel_ring_emit(dev, ring, MI_FLUSH_DW);
941 intel_ring_emit(dev, ring, 0);
942 intel_ring_emit(dev, ring, 0);
943 intel_ring_emit(dev, ring, 0);
944 intel_ring_advance(dev, ring);
945}
946
947static u32
948blt_ring_add_request(struct drm_device *dev,
949 struct intel_ring_buffer *ring,
950 u32 flush_domains)
951{
952 u32 seqno = i915_gem_get_seqno(dev);
953
954 blt_ring_begin(dev, ring, 4);
955 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
956 intel_ring_emit(dev, ring,
957 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
958 intel_ring_emit(dev, ring, seqno);
959 intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
960 intel_ring_advance(dev, ring);
961
962 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
963 return seqno;
964}
965
966static void blt_ring_cleanup(struct intel_ring_buffer *ring)
967{
968 if (!ring->private)
969 return;
970
971 i915_gem_object_unpin(ring->private);
972 drm_gem_object_unreference(ring->private);
973 ring->private = NULL;
974}
975
857static const struct intel_ring_buffer gen6_blt_ring = { 976static const struct intel_ring_buffer gen6_blt_ring = {
858 .name = "blt ring", 977 .name = "blt ring",
859 .id = RING_BLT, 978 .id = RING_BLT,
860 .mmio_base = BLT_RING_BASE, 979 .mmio_base = BLT_RING_BASE,
861 .size = 32 * PAGE_SIZE, 980 .size = 32 * PAGE_SIZE,
862 .init = init_ring_common, 981 .init = blt_ring_init,
863 .write_tail = ring_write_tail, 982 .write_tail = ring_write_tail,
864 .flush = gen6_ring_flush, 983 .flush = blt_ring_flush,
865 .add_request = ring_add_request, 984 .add_request = blt_ring_add_request,
866 .get_seqno = ring_status_page_get_seqno, 985 .get_seqno = ring_status_page_get_seqno,
867 .user_irq_get = blt_ring_get_user_irq, 986 .user_irq_get = blt_ring_get_user_irq,
868 .user_irq_put = blt_ring_put_user_irq, 987 .user_irq_put = blt_ring_put_user_irq,
869 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, 988 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
989 .cleanup = blt_ring_cleanup,
870}; 990};
871 991
872int intel_init_render_ring_buffer(struct drm_device *dev) 992int intel_init_render_ring_buffer(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a05aff0e5764..d2cd0f1efeed 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -30,8 +30,9 @@ struct intel_ring_buffer {
30 struct drm_device *dev; 30 struct drm_device *dev;
31 struct drm_gem_object *gem_object; 31 struct drm_gem_object *gem_object;
32 32
33 unsigned int head; 33 u32 actual_head;
34 unsigned int tail; 34 u32 head;
35 u32 tail;
35 int space; 36 int space;
36 struct intel_hw_status_page status_page; 37 struct intel_hw_status_page status_page;
37 38
@@ -63,6 +64,7 @@ struct intel_ring_buffer {
63 struct drm_i915_gem_execbuffer2 *exec, 64 struct drm_i915_gem_execbuffer2 *exec,
64 struct drm_clip_rect *cliprects, 65 struct drm_clip_rect *cliprects,
65 uint64_t exec_offset); 66 uint64_t exec_offset);
67 void (*cleanup)(struct intel_ring_buffer *ring);
66 68
67 /** 69 /**
68 * List of objects currently involved in rendering from the 70 * List of objects currently involved in rendering from the
@@ -98,6 +100,8 @@ struct intel_ring_buffer {
98 100
99 wait_queue_head_t irq_queue; 101 wait_queue_head_t irq_queue;
100 drm_local_map_t map; 102 drm_local_map_t map;
103
104 void *private;
101}; 105};
102 106
103static inline u32 107static inline u32
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index de158b76bcd5..6bc42fa2a6ec 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -107,7 +107,8 @@ struct intel_sdvo {
107 * This is set if we treat the device as HDMI, instead of DVI. 107 * This is set if we treat the device as HDMI, instead of DVI.
108 */ 108 */
109 bool is_hdmi; 109 bool is_hdmi;
110 bool has_audio; 110 bool has_hdmi_monitor;
111 bool has_hdmi_audio;
111 112
112 /** 113 /**
113 * This is set if we detect output of sdvo device as LVDS and 114 * This is set if we detect output of sdvo device as LVDS and
@@ -1023,7 +1024,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1023 if (!intel_sdvo_set_target_input(intel_sdvo)) 1024 if (!intel_sdvo_set_target_input(intel_sdvo))
1024 return; 1025 return;
1025 1026
1026 if (intel_sdvo->is_hdmi && 1027 if (intel_sdvo->has_hdmi_monitor &&
1027 !intel_sdvo_set_avi_infoframe(intel_sdvo)) 1028 !intel_sdvo_set_avi_infoframe(intel_sdvo))
1028 return; 1029 return;
1029 1030
@@ -1063,7 +1064,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1063 } 1064 }
1064 if (intel_crtc->pipe == 1) 1065 if (intel_crtc->pipe == 1)
1065 sdvox |= SDVO_PIPE_B_SELECT; 1066 sdvox |= SDVO_PIPE_B_SELECT;
1066 if (intel_sdvo->has_audio) 1067 if (intel_sdvo->has_hdmi_audio)
1067 sdvox |= SDVO_AUDIO_ENABLE; 1068 sdvox |= SDVO_AUDIO_ENABLE;
1068 1069
1069 if (INTEL_INFO(dev)->gen >= 4) { 1070 if (INTEL_INFO(dev)->gen >= 4) {
@@ -1295,55 +1296,14 @@ intel_sdvo_get_edid(struct drm_connector *connector)
1295 return drm_get_edid(connector, &sdvo->ddc); 1296 return drm_get_edid(connector, &sdvo->ddc);
1296} 1297}
1297 1298
1298static struct drm_connector *
1299intel_find_analog_connector(struct drm_device *dev)
1300{
1301 struct drm_connector *connector;
1302 struct intel_sdvo *encoder;
1303
1304 list_for_each_entry(encoder,
1305 &dev->mode_config.encoder_list,
1306 base.base.head) {
1307 if (encoder->base.type == INTEL_OUTPUT_ANALOG) {
1308 list_for_each_entry(connector,
1309 &dev->mode_config.connector_list,
1310 head) {
1311 if (&encoder->base ==
1312 intel_attached_encoder(connector))
1313 return connector;
1314 }
1315 }
1316 }
1317
1318 return NULL;
1319}
1320
1321static int
1322intel_analog_is_connected(struct drm_device *dev)
1323{
1324 struct drm_connector *analog_connector;
1325
1326 analog_connector = intel_find_analog_connector(dev);
1327 if (!analog_connector)
1328 return false;
1329
1330 if (analog_connector->funcs->detect(analog_connector, false) ==
1331 connector_status_disconnected)
1332 return false;
1333
1334 return true;
1335}
1336
1337/* Mac mini hack -- use the same DDC as the analog connector */ 1299/* Mac mini hack -- use the same DDC as the analog connector */
1338static struct edid * 1300static struct edid *
1339intel_sdvo_get_analog_edid(struct drm_connector *connector) 1301intel_sdvo_get_analog_edid(struct drm_connector *connector)
1340{ 1302{
1341 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1303 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1342 1304
1343 if (!intel_analog_is_connected(connector->dev)) 1305 return drm_get_edid(connector,
1344 return NULL; 1306 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
1345
1346 return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
1347} 1307}
1348 1308
1349enum drm_connector_status 1309enum drm_connector_status
@@ -1388,8 +1348,10 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1388 /* DDC bus is shared, match EDID to connector type */ 1348 /* DDC bus is shared, match EDID to connector type */
1389 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 1349 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
1390 status = connector_status_connected; 1350 status = connector_status_connected;
1391 intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid); 1351 if (intel_sdvo->is_hdmi) {
1392 intel_sdvo->has_audio = drm_detect_monitor_audio(edid); 1352 intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
1353 intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
1354 }
1393 } 1355 }
1394 connector->display_info.raw_edid = NULL; 1356 connector->display_info.raw_edid = NULL;
1395 kfree(edid); 1357 kfree(edid);
@@ -1398,7 +1360,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1398 if (status == connector_status_connected) { 1360 if (status == connector_status_connected) {
1399 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1361 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1400 if (intel_sdvo_connector->force_audio) 1362 if (intel_sdvo_connector->force_audio)
1401 intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0; 1363 intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0;
1402 } 1364 }
1403 1365
1404 return status; 1366 return status;
@@ -1415,10 +1377,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1415 if (!intel_sdvo_write_cmd(intel_sdvo, 1377 if (!intel_sdvo_write_cmd(intel_sdvo,
1416 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) 1378 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
1417 return connector_status_unknown; 1379 return connector_status_unknown;
1418 if (intel_sdvo->is_tv) { 1380
1419 /* add 30ms delay when the output type is SDVO-TV */ 1381 /* add 30ms delay when the output type might be TV */
1382 if (intel_sdvo->caps.output_flags &
1383 (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
1420 mdelay(30); 1384 mdelay(30);
1421 } 1385
1422 if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) 1386 if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
1423 return connector_status_unknown; 1387 return connector_status_unknown;
1424 1388
@@ -1472,8 +1436,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1472 edid = intel_sdvo_get_analog_edid(connector); 1436 edid = intel_sdvo_get_analog_edid(connector);
1473 1437
1474 if (edid != NULL) { 1438 if (edid != NULL) {
1475 drm_mode_connector_update_edid_property(connector, edid); 1439 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
1476 drm_add_edid_modes(connector, edid); 1440 drm_mode_connector_update_edid_property(connector, edid);
1441 drm_add_edid_modes(connector, edid);
1442 }
1477 connector->display_info.raw_edid = NULL; 1443 connector->display_info.raw_edid = NULL;
1478 kfree(edid); 1444 kfree(edid);
1479 } 1445 }
@@ -1713,12 +1679,12 @@ intel_sdvo_set_property(struct drm_connector *connector,
1713 1679
1714 intel_sdvo_connector->force_audio = val; 1680 intel_sdvo_connector->force_audio = val;
1715 1681
1716 if (val > 0 && intel_sdvo->has_audio) 1682 if (val > 0 && intel_sdvo->has_hdmi_audio)
1717 return 0; 1683 return 0;
1718 if (val < 0 && !intel_sdvo->has_audio) 1684 if (val < 0 && !intel_sdvo->has_hdmi_audio)
1719 return 0; 1685 return 0;
1720 1686
1721 intel_sdvo->has_audio = val > 0; 1687 intel_sdvo->has_hdmi_audio = val > 0;
1722 goto done; 1688 goto done;
1723 } 1689 }
1724 1690
@@ -1942,9 +1908,12 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
1942 speed = mapping->i2c_speed; 1908 speed = mapping->i2c_speed;
1943 } 1909 }
1944 1910
1945 sdvo->i2c = &dev_priv->gmbus[pin].adapter; 1911 if (pin < GMBUS_NUM_PORTS) {
1946 intel_gmbus_set_speed(sdvo->i2c, speed); 1912 sdvo->i2c = &dev_priv->gmbus[pin].adapter;
1947 intel_gmbus_force_bit(sdvo->i2c, true); 1913 intel_gmbus_set_speed(sdvo->i2c, speed);
1914 intel_gmbus_force_bit(sdvo->i2c, true);
1915 } else
1916 sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
1948} 1917}
1949 1918
1950static bool 1919static bool
@@ -2070,14 +2039,15 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2070 intel_sdvo_set_colorimetry(intel_sdvo, 2039 intel_sdvo_set_colorimetry(intel_sdvo,
2071 SDVO_COLORIMETRY_RGB256); 2040 SDVO_COLORIMETRY_RGB256);
2072 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; 2041 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
2042
2073 intel_sdvo->is_hdmi = true; 2043 intel_sdvo->is_hdmi = true;
2074 } 2044 }
2075 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2045 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2076 (1 << INTEL_ANALOG_CLONE_BIT)); 2046 (1 << INTEL_ANALOG_CLONE_BIT));
2077 2047
2078 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2048 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2079 2049 if (intel_sdvo->is_hdmi)
2080 intel_sdvo_add_hdmi_properties(intel_sdvo_connector); 2050 intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
2081 2051
2082 return true; 2052 return true;
2083} 2053}
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 406228f4a2a0..b14c81110575 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -31,6 +31,7 @@
31 */ 31 */
32 32
33#include <linux/backlight.h> 33#include <linux/backlight.h>
34#include <linux/acpi.h>
34 35
35#include "drmP.h" 36#include "drmP.h"
36#include "nouveau_drv.h" 37#include "nouveau_drv.h"
@@ -136,6 +137,14 @@ int nouveau_backlight_init(struct drm_device *dev)
136{ 137{
137 struct drm_nouveau_private *dev_priv = dev->dev_private; 138 struct drm_nouveau_private *dev_priv = dev->dev_private;
138 139
140#ifdef CONFIG_ACPI
141 if (acpi_video_backlight_support()) {
142 NV_INFO(dev, "ACPI backlight interface available, "
143 "not registering our own\n");
144 return 0;
145 }
146#endif
147
139 switch (dev_priv->card_type) { 148 switch (dev_priv->card_type) {
140 case NV_40: 149 case NV_40:
141 return nouveau_nv40_backlight_init(dev); 150 return nouveau_nv40_backlight_init(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 5f21030a293b..b2293576f278 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6829,7 +6829,7 @@ nouveau_bios_posted(struct drm_device *dev)
6829 struct drm_nouveau_private *dev_priv = dev->dev_private; 6829 struct drm_nouveau_private *dev_priv = dev->dev_private;
6830 unsigned htotal; 6830 unsigned htotal;
6831 6831
6832 if (dev_priv->chipset >= NV_50) { 6832 if (dev_priv->card_type >= NV_50) {
6833 if (NVReadVgaCrtc(dev, 0, 0x00) == 0 && 6833 if (NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
6834 NVReadVgaCrtc(dev, 0, 0x1a) == 0) 6834 NVReadVgaCrtc(dev, 0, 0x1a) == 0)
6835 return false; 6835 return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 80353e2b8409..c41e1c200ef5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -143,8 +143,10 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
143 nvbo->no_vm = no_vm; 143 nvbo->no_vm = no_vm;
144 nvbo->tile_mode = tile_mode; 144 nvbo->tile_mode = tile_mode;
145 nvbo->tile_flags = tile_flags; 145 nvbo->tile_flags = tile_flags;
146 nvbo->bo.bdev = &dev_priv->ttm.bdev;
146 147
147 nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size); 148 nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo),
149 &align, &size);
148 align >>= PAGE_SHIFT; 150 align >>= PAGE_SHIFT;
149 151
150 nouveau_bo_placement_set(nvbo, flags, 0); 152 nouveau_bo_placement_set(nvbo, flags, 0);
@@ -176,6 +178,31 @@ set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
176 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; 178 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
177} 179}
178 180
181static void
182set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
183{
184 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
185
186 if (dev_priv->card_type == NV_10 &&
187 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) {
188 /*
189 * Make sure that the color and depth buffers are handled
190 * by independent memory controller units. Up to a 9x
191 * speed up when alpha-blending and depth-test are enabled
192 * at the same time.
193 */
194 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
195
196 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
197 nvbo->placement.fpfn = vram_pages / 2;
198 nvbo->placement.lpfn = ~0;
199 } else {
200 nvbo->placement.fpfn = 0;
201 nvbo->placement.lpfn = vram_pages / 2;
202 }
203 }
204}
205
179void 206void
180nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) 207nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
181{ 208{
@@ -190,6 +217,8 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
190 pl->busy_placement = nvbo->busy_placements; 217 pl->busy_placement = nvbo->busy_placements;
191 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, 218 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
192 type | busy, flags); 219 type | busy, flags);
220
221 set_placement_range(nvbo, type);
193} 222}
194 223
195int 224int
@@ -525,7 +554,8 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
525 stride = 16 * 4; 554 stride = 16 * 4;
526 height = amount / stride; 555 height = amount / stride;
527 556
528 if (new_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) { 557 if (new_mem->mem_type == TTM_PL_VRAM &&
558 nouveau_bo_tile_layout(nvbo)) {
529 ret = RING_SPACE(chan, 8); 559 ret = RING_SPACE(chan, 8);
530 if (ret) 560 if (ret)
531 return ret; 561 return ret;
@@ -546,7 +576,8 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
546 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); 576 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
547 OUT_RING (chan, 1); 577 OUT_RING (chan, 1);
548 } 578 }
549 if (old_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) { 579 if (old_mem->mem_type == TTM_PL_VRAM &&
580 nouveau_bo_tile_layout(nvbo)) {
550 ret = RING_SPACE(chan, 8); 581 ret = RING_SPACE(chan, 8);
551 if (ret) 582 if (ret)
552 return ret; 583 return ret;
@@ -753,7 +784,8 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
753 if (dev_priv->card_type == NV_50) { 784 if (dev_priv->card_type == NV_50) {
754 ret = nv50_mem_vm_bind_linear(dev, 785 ret = nv50_mem_vm_bind_linear(dev,
755 offset + dev_priv->vm_vram_base, 786 offset + dev_priv->vm_vram_base,
756 new_mem->size, nvbo->tile_flags, 787 new_mem->size,
788 nouveau_bo_tile_layout(nvbo),
757 offset); 789 offset);
758 if (ret) 790 if (ret)
759 return ret; 791 return ret;
@@ -894,7 +926,8 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
894 * nothing to do here. 926 * nothing to do here.
895 */ 927 */
896 if (bo->mem.mem_type != TTM_PL_VRAM) { 928 if (bo->mem.mem_type != TTM_PL_VRAM) {
897 if (dev_priv->card_type < NV_50 || !nvbo->tile_flags) 929 if (dev_priv->card_type < NV_50 ||
930 !nouveau_bo_tile_layout(nvbo))
898 return 0; 931 return 0;
899 } 932 }
900 933
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 0871495096fa..52c356e9a3d1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -281,7 +281,7 @@ detect_analog:
281 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); 281 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
282 if (!nv_encoder && !nouveau_tv_disable) 282 if (!nv_encoder && !nouveau_tv_disable)
283 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); 283 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
284 if (nv_encoder) { 284 if (nv_encoder && force) {
285 struct drm_encoder *encoder = to_drm_encoder(nv_encoder); 285 struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
286 struct drm_encoder_helper_funcs *helper = 286 struct drm_encoder_helper_funcs *helper =
287 encoder->helper_private; 287 encoder->helper_private;
@@ -641,11 +641,28 @@ nouveau_connector_get_modes(struct drm_connector *connector)
641 return ret; 641 return ret;
642} 642}
643 643
644static unsigned
645get_tmds_link_bandwidth(struct drm_connector *connector)
646{
647 struct nouveau_connector *nv_connector = nouveau_connector(connector);
648 struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
649 struct dcb_entry *dcb = nv_connector->detected_encoder->dcb;
650
651 if (dcb->location != DCB_LOC_ON_CHIP ||
652 dev_priv->chipset >= 0x46)
653 return 165000;
654 else if (dev_priv->chipset >= 0x40)
655 return 155000;
656 else if (dev_priv->chipset >= 0x18)
657 return 135000;
658 else
659 return 112000;
660}
661
644static int 662static int
645nouveau_connector_mode_valid(struct drm_connector *connector, 663nouveau_connector_mode_valid(struct drm_connector *connector,
646 struct drm_display_mode *mode) 664 struct drm_display_mode *mode)
647{ 665{
648 struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
649 struct nouveau_connector *nv_connector = nouveau_connector(connector); 666 struct nouveau_connector *nv_connector = nouveau_connector(connector);
650 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; 667 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
651 struct drm_encoder *encoder = to_drm_encoder(nv_encoder); 668 struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
@@ -663,11 +680,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
663 max_clock = 400000; 680 max_clock = 400000;
664 break; 681 break;
665 case OUTPUT_TMDS: 682 case OUTPUT_TMDS:
666 if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) || 683 max_clock = get_tmds_link_bandwidth(connector);
667 !nv_encoder->dcb->duallink_possible) 684 if (nouveau_duallink && nv_encoder->dcb->duallink_possible)
668 max_clock = 165000; 685 max_clock *= 2;
669 else
670 max_clock = 330000;
671 break; 686 break;
672 case OUTPUT_ANALOG: 687 case OUTPUT_ANALOG:
673 max_clock = nv_encoder->dcb->crtconf.maxfreq; 688 max_clock = nv_encoder->dcb->crtconf.maxfreq;
@@ -709,44 +724,6 @@ nouveau_connector_best_encoder(struct drm_connector *connector)
709 return NULL; 724 return NULL;
710} 725}
711 726
712void
713nouveau_connector_set_polling(struct drm_connector *connector)
714{
715 struct drm_device *dev = connector->dev;
716 struct drm_nouveau_private *dev_priv = dev->dev_private;
717 struct drm_crtc *crtc;
718 bool spare_crtc = false;
719
720 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
721 spare_crtc |= !crtc->enabled;
722
723 connector->polled = 0;
724
725 switch (connector->connector_type) {
726 case DRM_MODE_CONNECTOR_VGA:
727 case DRM_MODE_CONNECTOR_TV:
728 if (dev_priv->card_type >= NV_50 ||
729 (nv_gf4_disp_arch(dev) && spare_crtc))
730 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
731 break;
732
733 case DRM_MODE_CONNECTOR_DVII:
734 case DRM_MODE_CONNECTOR_DVID:
735 case DRM_MODE_CONNECTOR_HDMIA:
736 case DRM_MODE_CONNECTOR_DisplayPort:
737 case DRM_MODE_CONNECTOR_eDP:
738 if (dev_priv->card_type >= NV_50)
739 connector->polled = DRM_CONNECTOR_POLL_HPD;
740 else if (connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
741 spare_crtc)
742 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
743 break;
744
745 default:
746 break;
747 }
748}
749
750static const struct drm_connector_helper_funcs 727static const struct drm_connector_helper_funcs
751nouveau_connector_helper_funcs = { 728nouveau_connector_helper_funcs = {
752 .get_modes = nouveau_connector_get_modes, 729 .get_modes = nouveau_connector_get_modes,
@@ -872,6 +849,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
872 dev->mode_config.scaling_mode_property, 849 dev->mode_config.scaling_mode_property,
873 nv_connector->scaling_mode); 850 nv_connector->scaling_mode);
874 } 851 }
852 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
875 /* fall-through */ 853 /* fall-through */
876 case DCB_CONNECTOR_TV_0: 854 case DCB_CONNECTOR_TV_0:
877 case DCB_CONNECTOR_TV_1: 855 case DCB_CONNECTOR_TV_1:
@@ -888,11 +866,16 @@ nouveau_connector_create(struct drm_device *dev, int index)
888 dev->mode_config.dithering_mode_property, 866 dev->mode_config.dithering_mode_property,
889 nv_connector->use_dithering ? 867 nv_connector->use_dithering ?
890 DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF); 868 DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
869
870 if (dcb->type != DCB_CONNECTOR_LVDS) {
871 if (dev_priv->card_type >= NV_50)
872 connector->polled = DRM_CONNECTOR_POLL_HPD;
873 else
874 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
875 }
891 break; 876 break;
892 } 877 }
893 878
894 nouveau_connector_set_polling(connector);
895
896 drm_sysfs_connector_add(connector); 879 drm_sysfs_connector_add(connector);
897 dcb->drm = connector; 880 dcb->drm = connector;
898 return dcb->drm; 881 return dcb->drm;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index c21ed6b16f88..711b1e9203af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -52,9 +52,6 @@ static inline struct nouveau_connector *nouveau_connector(
52struct drm_connector * 52struct drm_connector *
53nouveau_connector_create(struct drm_device *, int index); 53nouveau_connector_create(struct drm_device *, int index);
54 54
55void
56nouveau_connector_set_polling(struct drm_connector *);
57
58int 55int
59nouveau_connector_bpp(struct drm_connector *); 56nouveau_connector_bpp(struct drm_connector *);
60 57
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 3a07e580d27a..1c7db64c03bf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -100,6 +100,9 @@ struct nouveau_bo {
100 int pin_refcnt; 100 int pin_refcnt;
101}; 101};
102 102
103#define nouveau_bo_tile_layout(nvbo) \
104 ((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)
105
103static inline struct nouveau_bo * 106static inline struct nouveau_bo *
104nouveau_bo(struct ttm_buffer_object *bo) 107nouveau_bo(struct ttm_buffer_object *bo)
105{ 108{
@@ -304,6 +307,7 @@ struct nouveau_fifo_engine {
304 void (*destroy_context)(struct nouveau_channel *); 307 void (*destroy_context)(struct nouveau_channel *);
305 int (*load_context)(struct nouveau_channel *); 308 int (*load_context)(struct nouveau_channel *);
306 int (*unload_context)(struct drm_device *); 309 int (*unload_context)(struct drm_device *);
310 void (*tlb_flush)(struct drm_device *dev);
307}; 311};
308 312
309struct nouveau_pgraph_object_method { 313struct nouveau_pgraph_object_method {
@@ -336,6 +340,7 @@ struct nouveau_pgraph_engine {
336 void (*destroy_context)(struct nouveau_channel *); 340 void (*destroy_context)(struct nouveau_channel *);
337 int (*load_context)(struct nouveau_channel *); 341 int (*load_context)(struct nouveau_channel *);
338 int (*unload_context)(struct drm_device *); 342 int (*unload_context)(struct drm_device *);
343 void (*tlb_flush)(struct drm_device *dev);
339 344
340 void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, 345 void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
341 uint32_t size, uint32_t pitch); 346 uint32_t size, uint32_t pitch);
@@ -485,13 +490,13 @@ enum nv04_fp_display_regs {
485}; 490};
486 491
487struct nv04_crtc_reg { 492struct nv04_crtc_reg {
488 unsigned char MiscOutReg; /* */ 493 unsigned char MiscOutReg;
489 uint8_t CRTC[0xa0]; 494 uint8_t CRTC[0xa0];
490 uint8_t CR58[0x10]; 495 uint8_t CR58[0x10];
491 uint8_t Sequencer[5]; 496 uint8_t Sequencer[5];
492 uint8_t Graphics[9]; 497 uint8_t Graphics[9];
493 uint8_t Attribute[21]; 498 uint8_t Attribute[21];
494 unsigned char DAC[768]; /* Internal Colorlookuptable */ 499 unsigned char DAC[768];
495 500
496 /* PCRTC regs */ 501 /* PCRTC regs */
497 uint32_t fb_start; 502 uint32_t fb_start;
@@ -539,43 +544,9 @@ struct nv04_output_reg {
539}; 544};
540 545
541struct nv04_mode_state { 546struct nv04_mode_state {
542 uint32_t bpp; 547 struct nv04_crtc_reg crtc_reg[2];
543 uint32_t width;
544 uint32_t height;
545 uint32_t interlace;
546 uint32_t repaint0;
547 uint32_t repaint1;
548 uint32_t screen;
549 uint32_t scale;
550 uint32_t dither;
551 uint32_t extra;
552 uint32_t fifo;
553 uint32_t pixel;
554 uint32_t horiz;
555 int arbitration0;
556 int arbitration1;
557 uint32_t pll;
558 uint32_t pllB;
559 uint32_t vpll;
560 uint32_t vpll2;
561 uint32_t vpllB;
562 uint32_t vpll2B;
563 uint32_t pllsel; 548 uint32_t pllsel;
564 uint32_t sel_clk; 549 uint32_t sel_clk;
565 uint32_t general;
566 uint32_t crtcOwner;
567 uint32_t head;
568 uint32_t head2;
569 uint32_t cursorConfig;
570 uint32_t cursor0;
571 uint32_t cursor1;
572 uint32_t cursor2;
573 uint32_t timingH;
574 uint32_t timingV;
575 uint32_t displayV;
576 uint32_t crtcSync;
577
578 struct nv04_crtc_reg crtc_reg[2];
579}; 550};
580 551
581enum nouveau_card_type { 552enum nouveau_card_type {
@@ -613,6 +584,12 @@ struct drm_nouveau_private {
613 struct work_struct irq_work; 584 struct work_struct irq_work;
614 struct work_struct hpd_work; 585 struct work_struct hpd_work;
615 586
587 struct {
588 spinlock_t lock;
589 uint32_t hpd0_bits;
590 uint32_t hpd1_bits;
591 } hpd_state;
592
616 struct list_head vbl_waiting; 593 struct list_head vbl_waiting;
617 594
618 struct { 595 struct {
@@ -1045,6 +1022,7 @@ extern int nv50_fifo_create_context(struct nouveau_channel *);
1045extern void nv50_fifo_destroy_context(struct nouveau_channel *); 1022extern void nv50_fifo_destroy_context(struct nouveau_channel *);
1046extern int nv50_fifo_load_context(struct nouveau_channel *); 1023extern int nv50_fifo_load_context(struct nouveau_channel *);
1047extern int nv50_fifo_unload_context(struct drm_device *); 1024extern int nv50_fifo_unload_context(struct drm_device *);
1025extern void nv50_fifo_tlb_flush(struct drm_device *dev);
1048 1026
1049/* nvc0_fifo.c */ 1027/* nvc0_fifo.c */
1050extern int nvc0_fifo_init(struct drm_device *); 1028extern int nvc0_fifo_init(struct drm_device *);
@@ -1122,6 +1100,8 @@ extern int nv50_graph_load_context(struct nouveau_channel *);
1122extern int nv50_graph_unload_context(struct drm_device *); 1100extern int nv50_graph_unload_context(struct drm_device *);
1123extern void nv50_graph_context_switch(struct drm_device *); 1101extern void nv50_graph_context_switch(struct drm_device *);
1124extern int nv50_grctx_init(struct nouveau_grctx *); 1102extern int nv50_grctx_init(struct nouveau_grctx *);
1103extern void nv50_graph_tlb_flush(struct drm_device *dev);
1104extern void nv86_graph_tlb_flush(struct drm_device *dev);
1125 1105
1126/* nvc0_graph.c */ 1106/* nvc0_graph.c */
1127extern int nvc0_graph_init(struct drm_device *); 1107extern int nvc0_graph_init(struct drm_device *);
@@ -1239,7 +1219,6 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
1239extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); 1219extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
1240extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); 1220extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
1241extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); 1221extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
1242extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *);
1243 1222
1244/* nouveau_fence.c */ 1223/* nouveau_fence.c */
1245struct nouveau_fence; 1224struct nouveau_fence;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 441b12420bb1..ab1bbfbf266e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -249,6 +249,7 @@ alloc_semaphore(struct drm_device *dev)
249{ 249{
250 struct drm_nouveau_private *dev_priv = dev->dev_private; 250 struct drm_nouveau_private *dev_priv = dev->dev_private;
251 struct nouveau_semaphore *sema; 251 struct nouveau_semaphore *sema;
252 int ret;
252 253
253 if (!USE_SEMA(dev)) 254 if (!USE_SEMA(dev))
254 return NULL; 255 return NULL;
@@ -257,10 +258,14 @@ alloc_semaphore(struct drm_device *dev)
257 if (!sema) 258 if (!sema)
258 goto fail; 259 goto fail;
259 260
261 ret = drm_mm_pre_get(&dev_priv->fence.heap);
262 if (ret)
263 goto fail;
264
260 spin_lock(&dev_priv->fence.lock); 265 spin_lock(&dev_priv->fence.lock);
261 sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0); 266 sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0);
262 if (sema->mem) 267 if (sema->mem)
263 sema->mem = drm_mm_get_block(sema->mem, 4, 0); 268 sema->mem = drm_mm_get_block_atomic(sema->mem, 4, 0);
264 spin_unlock(&dev_priv->fence.lock); 269 spin_unlock(&dev_priv->fence.lock);
265 270
266 if (!sema->mem) 271 if (!sema->mem)
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 5c4c929d7f74..9a1fdcf400c2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -107,23 +107,29 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
107} 107}
108 108
109static bool 109static bool
110nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) { 110nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags)
111 switch (tile_flags) { 111{
112 case 0x0000: 112 struct drm_nouveau_private *dev_priv = dev->dev_private;
113 case 0x1800: 113
114 case 0x2800: 114 if (dev_priv->card_type >= NV_50) {
115 case 0x4800: 115 switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
116 case 0x7000: 116 case 0x0000:
117 case 0x7400: 117 case 0x1800:
118 case 0x7a00: 118 case 0x2800:
119 case 0xe000: 119 case 0x4800:
120 break; 120 case 0x7000:
121 default: 121 case 0x7400:
122 NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags); 122 case 0x7a00:
123 return false; 123 case 0xe000:
124 return true;
125 }
126 } else {
127 if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
128 return true;
124 } 129 }
125 130
126 return true; 131 NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
132 return false;
127} 133}
128 134
129int 135int
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index bed669a54a2d..b9672a05c411 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -519,11 +519,11 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
519 519
520 struct pll_lims pll_lim; 520 struct pll_lims pll_lim;
521 struct nouveau_pll_vals pv; 521 struct nouveau_pll_vals pv;
522 uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; 522 enum pll_types pll = head ? PLL_VPLL1 : PLL_VPLL0;
523 523
524 if (get_pll_limits(dev, pllreg, &pll_lim)) 524 if (get_pll_limits(dev, pll, &pll_lim))
525 return; 525 return;
526 nouveau_hw_get_pllvals(dev, pllreg, &pv); 526 nouveau_hw_get_pllvals(dev, pll, &pv);
527 527
528 if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m && 528 if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
529 pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n && 529 pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
@@ -536,7 +536,7 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
536 pv.M1 = pll_lim.vco1.max_m; 536 pv.M1 = pll_lim.vco1.max_m;
537 pv.N1 = pll_lim.vco1.min_n; 537 pv.N1 = pll_lim.vco1.min_n;
538 pv.log2P = pll_lim.max_usable_log2p; 538 pv.log2P = pll_lim.max_usable_log2p;
539 nouveau_hw_setpll(dev, pllreg, &pv); 539 nouveau_hw_setpll(dev, pll_lim.reg, &pv);
540} 540}
541 541
542/* 542/*
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.h b/drivers/gpu/drm/nouveau/nouveau_hw.h
index 869130f83602..2989090b9434 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.h
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.h
@@ -416,6 +416,25 @@ nv_fix_nv40_hw_cursor(struct drm_device *dev, int head)
416} 416}
417 417
418static inline void 418static inline void
419nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset)
420{
421 struct drm_nouveau_private *dev_priv = dev->dev_private;
422
423 NVWriteCRTC(dev, head, NV_PCRTC_START, offset);
424
425 if (dev_priv->card_type == NV_04) {
426 /*
427 * Hilarious, the 24th bit doesn't want to stick to
428 * PCRTC_START...
429 */
430 int cre_heb = NVReadVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX);
431
432 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX,
433 (cre_heb & ~0x40) | ((offset >> 18) & 0x40));
434 }
435}
436
437static inline void
419nv_show_cursor(struct drm_device *dev, int head, bool show) 438nv_show_cursor(struct drm_device *dev, int head, bool show)
420{ 439{
421 struct drm_nouveau_private *dev_priv = dev->dev_private; 440 struct drm_nouveau_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index fdd7e3de79c8..cb389d014326 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -256,7 +256,7 @@ nouveau_i2c_find(struct drm_device *dev, int index)
256 if (index >= DCB_MAX_NUM_I2C_ENTRIES) 256 if (index >= DCB_MAX_NUM_I2C_ENTRIES)
257 return NULL; 257 return NULL;
258 258
259 if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) { 259 if (dev_priv->card_type >= NV_50 && (i2c->entry & 0x00000100)) {
260 uint32_t reg = 0xe500, val; 260 uint32_t reg = 0xe500, val;
261 261
262 if (i2c->port_type == 6) { 262 if (i2c->port_type == 6) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 6fd51a51c608..7bfd9e6c9d67 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -42,6 +42,13 @@
42#include "nouveau_connector.h" 42#include "nouveau_connector.h"
43#include "nv50_display.h" 43#include "nv50_display.h"
44 44
45static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
46
47static int nouveau_ratelimit(void)
48{
49 return __ratelimit(&nouveau_ratelimit_state);
50}
51
45void 52void
46nouveau_irq_preinstall(struct drm_device *dev) 53nouveau_irq_preinstall(struct drm_device *dev)
47{ 54{
@@ -53,6 +60,7 @@ nouveau_irq_preinstall(struct drm_device *dev)
53 if (dev_priv->card_type >= NV_50) { 60 if (dev_priv->card_type >= NV_50) {
54 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); 61 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
55 INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh); 62 INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
63 spin_lock_init(&dev_priv->hpd_state.lock);
56 INIT_LIST_HEAD(&dev_priv->vbl_waiting); 64 INIT_LIST_HEAD(&dev_priv->vbl_waiting);
57 } 65 }
58} 66}
@@ -202,8 +210,8 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
202 } 210 }
203 211
204 if (status & NV_PFIFO_INTR_DMA_PUSHER) { 212 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
205 u32 get = nv_rd32(dev, 0x003244); 213 u32 dma_get = nv_rd32(dev, 0x003244);
206 u32 put = nv_rd32(dev, 0x003240); 214 u32 dma_put = nv_rd32(dev, 0x003240);
207 u32 push = nv_rd32(dev, 0x003220); 215 u32 push = nv_rd32(dev, 0x003220);
208 u32 state = nv_rd32(dev, 0x003228); 216 u32 state = nv_rd32(dev, 0x003228);
209 217
@@ -213,16 +221,18 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
213 u32 ib_get = nv_rd32(dev, 0x003334); 221 u32 ib_get = nv_rd32(dev, 0x003334);
214 u32 ib_put = nv_rd32(dev, 0x003330); 222 u32 ib_put = nv_rd32(dev, 0x003330);
215 223
216 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " 224 if (nouveau_ratelimit())
225 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
217 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " 226 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
218 "State 0x%08x Push 0x%08x\n", 227 "State 0x%08x Push 0x%08x\n",
219 chid, ho_get, get, ho_put, put, ib_get, ib_put, 228 chid, ho_get, dma_get, ho_put,
220 state, push); 229 dma_put, ib_get, ib_put, state,
230 push);
221 231
222 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ 232 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
223 nv_wr32(dev, 0x003364, 0x00000000); 233 nv_wr32(dev, 0x003364, 0x00000000);
224 if (get != put || ho_get != ho_put) { 234 if (dma_get != dma_put || ho_get != ho_put) {
225 nv_wr32(dev, 0x003244, put); 235 nv_wr32(dev, 0x003244, dma_put);
226 nv_wr32(dev, 0x003328, ho_put); 236 nv_wr32(dev, 0x003328, ho_put);
227 } else 237 } else
228 if (ib_get != ib_put) { 238 if (ib_get != ib_put) {
@@ -231,10 +241,10 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
231 } else { 241 } else {
232 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " 242 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
233 "Put 0x%08x State 0x%08x Push 0x%08x\n", 243 "Put 0x%08x State 0x%08x Push 0x%08x\n",
234 chid, get, put, state, push); 244 chid, dma_get, dma_put, state, push);
235 245
236 if (get != put) 246 if (dma_get != dma_put)
237 nv_wr32(dev, 0x003244, put); 247 nv_wr32(dev, 0x003244, dma_put);
238 } 248 }
239 249
240 nv_wr32(dev, 0x003228, 0x00000000); 250 nv_wr32(dev, 0x003228, 0x00000000);
@@ -266,8 +276,9 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
266 } 276 }
267 277
268 if (status) { 278 if (status) {
269 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", 279 if (nouveau_ratelimit())
270 status, chid); 280 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
281 status, chid);
271 nv_wr32(dev, NV03_PFIFO_INTR_0, status); 282 nv_wr32(dev, NV03_PFIFO_INTR_0, status);
272 status = 0; 283 status = 0;
273 } 284 }
@@ -544,13 +555,6 @@ nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
544 nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap); 555 nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
545} 556}
546 557
547static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
548
549static int nouveau_ratelimit(void)
550{
551 return __ratelimit(&nouveau_ratelimit_state);
552}
553
554 558
555static inline void 559static inline void
556nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource) 560nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index a163c7c612e7..fe4a30dc4b42 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -33,9 +33,9 @@
33#include "drmP.h" 33#include "drmP.h"
34#include "drm.h" 34#include "drm.h"
35#include "drm_sarea.h" 35#include "drm_sarea.h"
36#include "nouveau_drv.h"
37 36
38#define MIN(a,b) a < b ? a : b 37#include "nouveau_drv.h"
38#include "nouveau_pm.h"
39 39
40/* 40/*
41 * NV10-NV40 tiling helpers 41 * NV10-NV40 tiling helpers
@@ -175,11 +175,10 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
175 } 175 }
176 } 176 }
177 } 177 }
178 dev_priv->engine.instmem.flush(dev);
179 178
180 nv50_vm_flush(dev, 5); 179 dev_priv->engine.instmem.flush(dev);
181 nv50_vm_flush(dev, 0); 180 dev_priv->engine.fifo.tlb_flush(dev);
182 nv50_vm_flush(dev, 4); 181 dev_priv->engine.graph.tlb_flush(dev);
183 nv50_vm_flush(dev, 6); 182 nv50_vm_flush(dev, 6);
184 return 0; 183 return 0;
185} 184}
@@ -209,11 +208,10 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
209 pte++; 208 pte++;
210 } 209 }
211 } 210 }
212 dev_priv->engine.instmem.flush(dev);
213 211
214 nv50_vm_flush(dev, 5); 212 dev_priv->engine.instmem.flush(dev);
215 nv50_vm_flush(dev, 0); 213 dev_priv->engine.fifo.tlb_flush(dev);
216 nv50_vm_flush(dev, 4); 214 dev_priv->engine.graph.tlb_flush(dev);
217 nv50_vm_flush(dev, 6); 215 nv50_vm_flush(dev, 6);
218} 216}
219 217
@@ -653,6 +651,7 @@ nouveau_mem_gart_init(struct drm_device *dev)
653void 651void
654nouveau_mem_timing_init(struct drm_device *dev) 652nouveau_mem_timing_init(struct drm_device *dev)
655{ 653{
654 /* cards < NVC0 only */
656 struct drm_nouveau_private *dev_priv = dev->dev_private; 655 struct drm_nouveau_private *dev_priv = dev->dev_private;
657 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 656 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
658 struct nouveau_pm_memtimings *memtimings = &pm->memtimings; 657 struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
@@ -719,14 +718,14 @@ nouveau_mem_timing_init(struct drm_device *dev)
719 tUNK_19 = 1; 718 tUNK_19 = 1;
720 tUNK_20 = 0; 719 tUNK_20 = 0;
721 tUNK_21 = 0; 720 tUNK_21 = 0;
722 switch (MIN(recordlen,21)) { 721 switch (min(recordlen, 22)) {
723 case 21: 722 case 22:
724 tUNK_21 = entry[21]; 723 tUNK_21 = entry[21];
725 case 20: 724 case 21:
726 tUNK_20 = entry[20]; 725 tUNK_20 = entry[20];
727 case 19: 726 case 20:
728 tUNK_19 = entry[19]; 727 tUNK_19 = entry[19];
729 case 18: 728 case 19:
730 tUNK_18 = entry[18]; 729 tUNK_18 = entry[18];
731 default: 730 default:
732 tUNK_0 = entry[0]; 731 tUNK_0 = entry[0];
@@ -756,24 +755,30 @@ nouveau_mem_timing_init(struct drm_device *dev)
756 timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); 755 timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
757 if(recordlen > 19) { 756 if(recordlen > 19) {
758 timing->reg_100228 += (tUNK_19 - 1) << 24; 757 timing->reg_100228 += (tUNK_19 - 1) << 24;
759 } else { 758 }/* I cannot back-up this else-statement right now
759 else {
760 timing->reg_100228 += tUNK_12 << 24; 760 timing->reg_100228 += tUNK_12 << 24;
761 } 761 }*/
762 762
763 /* XXX: reg_10022c */ 763 /* XXX: reg_10022c */
764 timing->reg_10022c = tUNK_2 - 1;
764 765
765 timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | 766 timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
766 tUNK_13 << 8 | tUNK_13); 767 tUNK_13 << 8 | tUNK_13);
767 768
768 /* XXX: +6? */ 769 /* XXX: +6? */
769 timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC); 770 timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC);
770 if(tUNK_10 > tUNK_11) { 771 timing->reg_100234 += max(tUNK_10,tUNK_11) << 16;
771 timing->reg_100234 += tUNK_10 << 16; 772
772 } else { 773 /* XXX; reg_100238, reg_10023c
773 timing->reg_100234 += tUNK_11 << 16; 774 * reg: 0x00??????
775 * reg_10023c:
776 * 0 for pre-NV50 cards
777 * 0x????0202 for NV50+ cards (empirical evidence) */
778 if(dev_priv->card_type >= NV_50) {
779 timing->reg_10023c = 0x202;
774 } 780 }
775 781
776 /* XXX; reg_100238, reg_10023c */
777 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, 782 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
778 timing->reg_100220, timing->reg_100224, 783 timing->reg_100220, timing->reg_100224,
779 timing->reg_100228, timing->reg_10022c); 784 timing->reg_100228, timing->reg_10022c);
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 896cf8634144..dd572adca02a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -129,7 +129,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
129 if (ramin == NULL) { 129 if (ramin == NULL) {
130 spin_unlock(&dev_priv->ramin_lock); 130 spin_unlock(&dev_priv->ramin_lock);
131 nouveau_gpuobj_ref(NULL, &gpuobj); 131 nouveau_gpuobj_ref(NULL, &gpuobj);
132 return ret; 132 return -ENOMEM;
133 } 133 }
134 134
135 ramin = drm_mm_get_block_atomic(ramin, size, align); 135 ramin = drm_mm_get_block_atomic(ramin, size, align);
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 1c99c55d6d46..9f7b158f5825 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -284,6 +284,7 @@ nouveau_sysfs_fini(struct drm_device *dev)
284 } 284 }
285} 285}
286 286
287#ifdef CONFIG_HWMON
287static ssize_t 288static ssize_t
288nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) 289nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
289{ 290{
@@ -395,10 +396,12 @@ static struct attribute *hwmon_attributes[] = {
395static const struct attribute_group hwmon_attrgroup = { 396static const struct attribute_group hwmon_attrgroup = {
396 .attrs = hwmon_attributes, 397 .attrs = hwmon_attributes,
397}; 398};
399#endif
398 400
399static int 401static int
400nouveau_hwmon_init(struct drm_device *dev) 402nouveau_hwmon_init(struct drm_device *dev)
401{ 403{
404#ifdef CONFIG_HWMON
402 struct drm_nouveau_private *dev_priv = dev->dev_private; 405 struct drm_nouveau_private *dev_priv = dev->dev_private;
403 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 406 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
404 struct device *hwmon_dev; 407 struct device *hwmon_dev;
@@ -425,13 +428,14 @@ nouveau_hwmon_init(struct drm_device *dev)
425 } 428 }
426 429
427 pm->hwmon = hwmon_dev; 430 pm->hwmon = hwmon_dev;
428 431#endif
429 return 0; 432 return 0;
430} 433}
431 434
432static void 435static void
433nouveau_hwmon_fini(struct drm_device *dev) 436nouveau_hwmon_fini(struct drm_device *dev)
434{ 437{
438#ifdef CONFIG_HWMON
435 struct drm_nouveau_private *dev_priv = dev->dev_private; 439 struct drm_nouveau_private *dev_priv = dev->dev_private;
436 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 440 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
437 441
@@ -439,6 +443,7 @@ nouveau_hwmon_fini(struct drm_device *dev)
439 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); 443 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup);
440 hwmon_device_unregister(pm->hwmon); 444 hwmon_device_unregister(pm->hwmon);
441 } 445 }
446#endif
442} 447}
443 448
444int 449int
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
index 7f16697cc96c..2d8580927ca4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c
@@ -153,26 +153,42 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
153 return -ENOMEM; 153 return -ENOMEM;
154} 154}
155 155
156static struct nouveau_ramht_entry *
157nouveau_ramht_remove_entry(struct nouveau_channel *chan, u32 handle)
158{
159 struct nouveau_ramht *ramht = chan ? chan->ramht : NULL;
160 struct nouveau_ramht_entry *entry;
161 unsigned long flags;
162
163 if (!ramht)
164 return NULL;
165
166 spin_lock_irqsave(&ramht->lock, flags);
167 list_for_each_entry(entry, &ramht->entries, head) {
168 if (entry->channel == chan &&
169 (!handle || entry->handle == handle)) {
170 list_del(&entry->head);
171 spin_unlock_irqrestore(&ramht->lock, flags);
172
173 return entry;
174 }
175 }
176 spin_unlock_irqrestore(&ramht->lock, flags);
177
178 return NULL;
179}
180
156static void 181static void
157nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle) 182nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle)
158{ 183{
159 struct drm_device *dev = chan->dev; 184 struct drm_device *dev = chan->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private; 185 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; 186 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
162 struct nouveau_gpuobj *ramht = chan->ramht->gpuobj; 187 struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
163 struct nouveau_ramht_entry *entry, *tmp; 188 unsigned long flags;
164 u32 co, ho; 189 u32 co, ho;
165 190
166 list_for_each_entry_safe(entry, tmp, &chan->ramht->entries, head) { 191 spin_lock_irqsave(&chan->ramht->lock, flags);
167 if (entry->channel != chan || entry->handle != handle)
168 continue;
169
170 nouveau_gpuobj_ref(NULL, &entry->gpuobj);
171 list_del(&entry->head);
172 kfree(entry);
173 break;
174 }
175
176 co = ho = nouveau_ramht_hash_handle(chan, handle); 192 co = ho = nouveau_ramht_hash_handle(chan, handle);
177 do { 193 do {
178 if (nouveau_ramht_entry_valid(dev, ramht, co) && 194 if (nouveau_ramht_entry_valid(dev, ramht, co) &&
@@ -184,7 +200,7 @@ nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle)
184 nv_wo32(ramht, co + 0, 0x00000000); 200 nv_wo32(ramht, co + 0, 0x00000000);
185 nv_wo32(ramht, co + 4, 0x00000000); 201 nv_wo32(ramht, co + 4, 0x00000000);
186 instmem->flush(dev); 202 instmem->flush(dev);
187 return; 203 goto out;
188 } 204 }
189 205
190 co += 8; 206 co += 8;
@@ -194,17 +210,22 @@ nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle)
194 210
195 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", 211 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
196 chan->id, handle); 212 chan->id, handle);
213out:
214 spin_unlock_irqrestore(&chan->ramht->lock, flags);
197} 215}
198 216
199void 217void
200nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) 218nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
201{ 219{
202 struct nouveau_ramht *ramht = chan->ramht; 220 struct nouveau_ramht_entry *entry;
203 unsigned long flags;
204 221
205 spin_lock_irqsave(&ramht->lock, flags); 222 entry = nouveau_ramht_remove_entry(chan, handle);
206 nouveau_ramht_remove_locked(chan, handle); 223 if (!entry)
207 spin_unlock_irqrestore(&ramht->lock, flags); 224 return;
225
226 nouveau_ramht_remove_hash(chan, entry->handle);
227 nouveau_gpuobj_ref(NULL, &entry->gpuobj);
228 kfree(entry);
208} 229}
209 230
210struct nouveau_gpuobj * 231struct nouveau_gpuobj *
@@ -265,23 +286,19 @@ void
265nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr, 286nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr,
266 struct nouveau_channel *chan) 287 struct nouveau_channel *chan)
267{ 288{
268 struct nouveau_ramht_entry *entry, *tmp; 289 struct nouveau_ramht_entry *entry;
269 struct nouveau_ramht *ramht; 290 struct nouveau_ramht *ramht;
270 unsigned long flags;
271 291
272 if (ref) 292 if (ref)
273 kref_get(&ref->refcount); 293 kref_get(&ref->refcount);
274 294
275 ramht = *ptr; 295 ramht = *ptr;
276 if (ramht) { 296 if (ramht) {
277 spin_lock_irqsave(&ramht->lock, flags); 297 while ((entry = nouveau_ramht_remove_entry(chan, 0))) {
278 list_for_each_entry_safe(entry, tmp, &ramht->entries, head) { 298 nouveau_ramht_remove_hash(chan, entry->handle);
279 if (entry->channel != chan) 299 nouveau_gpuobj_ref(NULL, &entry->gpuobj);
280 continue; 300 kfree(entry);
281
282 nouveau_ramht_remove_locked(chan, entry->handle);
283 } 301 }
284 spin_unlock_irqrestore(&ramht->lock, flags);
285 302
286 kref_put(&ramht->refcount, nouveau_ramht_del); 303 kref_put(&ramht->refcount, nouveau_ramht_del);
287 } 304 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 288bacac7e5a..d4ac97007038 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -120,8 +120,8 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
120 dev_priv->engine.instmem.flush(nvbe->dev); 120 dev_priv->engine.instmem.flush(nvbe->dev);
121 121
122 if (dev_priv->card_type == NV_50) { 122 if (dev_priv->card_type == NV_50) {
123 nv50_vm_flush(dev, 5); /* PGRAPH */ 123 dev_priv->engine.fifo.tlb_flush(dev);
124 nv50_vm_flush(dev, 0); /* PFIFO */ 124 dev_priv->engine.graph.tlb_flush(dev);
125 } 125 }
126 126
127 nvbe->bound = true; 127 nvbe->bound = true;
@@ -162,8 +162,8 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
162 dev_priv->engine.instmem.flush(nvbe->dev); 162 dev_priv->engine.instmem.flush(nvbe->dev);
163 163
164 if (dev_priv->card_type == NV_50) { 164 if (dev_priv->card_type == NV_50) {
165 nv50_vm_flush(dev, 5); 165 dev_priv->engine.fifo.tlb_flush(dev);
166 nv50_vm_flush(dev, 0); 166 dev_priv->engine.graph.tlb_flush(dev);
167 } 167 }
168 168
169 nvbe->bound = false; 169 nvbe->bound = false;
@@ -224,7 +224,11 @@ nouveau_sgdma_init(struct drm_device *dev)
224 int i, ret; 224 int i, ret;
225 225
226 if (dev_priv->card_type < NV_50) { 226 if (dev_priv->card_type < NV_50) {
227 aper_size = (64 * 1024 * 1024); 227 if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024)
228 aper_size = 64 * 1024 * 1024;
229 else
230 aper_size = 512 * 1024 * 1024;
231
228 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; 232 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
229 obj_size += 8; /* ctxdma header */ 233 obj_size += 8; /* ctxdma header */
230 } else { 234 } else {
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index ed7757f14083..049f755567e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -354,6 +354,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
354 engine->graph.destroy_context = nv50_graph_destroy_context; 354 engine->graph.destroy_context = nv50_graph_destroy_context;
355 engine->graph.load_context = nv50_graph_load_context; 355 engine->graph.load_context = nv50_graph_load_context;
356 engine->graph.unload_context = nv50_graph_unload_context; 356 engine->graph.unload_context = nv50_graph_unload_context;
357 if (dev_priv->chipset != 0x86)
358 engine->graph.tlb_flush = nv50_graph_tlb_flush;
359 else {
360 /* from what i can see nvidia do this on every
361 * pre-NVA3 board except NVAC, but, we've only
362 * ever seen problems on NV86
363 */
364 engine->graph.tlb_flush = nv86_graph_tlb_flush;
365 }
357 engine->fifo.channels = 128; 366 engine->fifo.channels = 128;
358 engine->fifo.init = nv50_fifo_init; 367 engine->fifo.init = nv50_fifo_init;
359 engine->fifo.takedown = nv50_fifo_takedown; 368 engine->fifo.takedown = nv50_fifo_takedown;
@@ -365,6 +374,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
365 engine->fifo.destroy_context = nv50_fifo_destroy_context; 374 engine->fifo.destroy_context = nv50_fifo_destroy_context;
366 engine->fifo.load_context = nv50_fifo_load_context; 375 engine->fifo.load_context = nv50_fifo_load_context;
367 engine->fifo.unload_context = nv50_fifo_unload_context; 376 engine->fifo.unload_context = nv50_fifo_unload_context;
377 engine->fifo.tlb_flush = nv50_fifo_tlb_flush;
368 engine->display.early_init = nv50_display_early_init; 378 engine->display.early_init = nv50_display_early_init;
369 engine->display.late_takedown = nv50_display_late_takedown; 379 engine->display.late_takedown = nv50_display_late_takedown;
370 engine->display.create = nv50_display_create; 380 engine->display.create = nv50_display_create;
@@ -1041,6 +1051,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
1041 case NOUVEAU_GETPARAM_PTIMER_TIME: 1051 case NOUVEAU_GETPARAM_PTIMER_TIME:
1042 getparam->value = dev_priv->engine.timer.read(dev); 1052 getparam->value = dev_priv->engine.timer.read(dev);
1043 break; 1053 break;
1054 case NOUVEAU_GETPARAM_HAS_BO_USAGE:
1055 getparam->value = 1;
1056 break;
1044 case NOUVEAU_GETPARAM_GRAPH_UNITS: 1057 case NOUVEAU_GETPARAM_GRAPH_UNITS:
1045 /* NV40 and NV50 versions are quite different, but register 1058 /* NV40 and NV50 versions are quite different, but register
1046 * address is the same. User is supposed to know the card 1059 * address is the same. User is supposed to know the card
@@ -1051,7 +1064,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
1051 } 1064 }
1052 /* FALLTHRU */ 1065 /* FALLTHRU */
1053 default: 1066 default:
1054 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); 1067 NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param);
1055 return -EINVAL; 1068 return -EINVAL;
1056 } 1069 }
1057 1070
@@ -1066,7 +1079,7 @@ nouveau_ioctl_setparam(struct drm_device *dev, void *data,
1066 1079
1067 switch (setparam->param) { 1080 switch (setparam->param) {
1068 default: 1081 default:
1069 NV_ERROR(dev, "unknown parameter %lld\n", setparam->param); 1082 NV_DEBUG(dev, "unknown parameter %lld\n", setparam->param);
1070 return -EINVAL; 1083 return -EINVAL;
1071 } 1084 }
1072 1085
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 16bbbf1eff63..7ecc4adc1e45 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -191,7 +191,7 @@ nv40_temp_get(struct drm_device *dev)
191 int offset = sensor->offset_mult / sensor->offset_div; 191 int offset = sensor->offset_mult / sensor->offset_div;
192 int core_temp; 192 int core_temp;
193 193
194 if (dev_priv->chipset >= 0x50) { 194 if (dev_priv->card_type >= NV_50) {
195 core_temp = nv_rd32(dev, 0x20008); 195 core_temp = nv_rd32(dev, 0x20008);
196 } else { 196 } else {
197 core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff; 197 core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff;
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index c71abc2a34d5..40e180741629 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -158,7 +158,6 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
158{ 158{
159 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 159 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
160 struct drm_device *dev = crtc->dev; 160 struct drm_device *dev = crtc->dev;
161 struct drm_connector *connector;
162 unsigned char seq1 = 0, crtc17 = 0; 161 unsigned char seq1 = 0, crtc17 = 0;
163 unsigned char crtc1A; 162 unsigned char crtc1A;
164 163
@@ -213,10 +212,6 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
213 NVVgaSeqReset(dev, nv_crtc->index, false); 212 NVVgaSeqReset(dev, nv_crtc->index, false);
214 213
215 NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A); 214 NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A);
216
217 /* Update connector polling modes */
218 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
219 nouveau_connector_set_polling(connector);
220} 215}
221 216
222static bool 217static bool
@@ -831,7 +826,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
831 /* Update the framebuffer location. */ 826 /* Update the framebuffer location. */
832 regp->fb_start = nv_crtc->fb.offset & ~3; 827 regp->fb_start = nv_crtc->fb.offset & ~3;
833 regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8); 828 regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8);
834 NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_START, regp->fb_start); 829 nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start);
835 830
836 /* Update the arbitration parameters. */ 831 /* Update the arbitration parameters. */
837 nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel, 832 nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel,
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index c936403b26e2..ef23550407b5 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -185,14 +185,15 @@ static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder,
185 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 185 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
186 struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder); 186 struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
187 187
188 /* For internal panels and gpu scaling on DVI we need the native mode */ 188 if (!nv_connector->native_mode ||
189 if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { 189 nv_connector->scaling_mode == DRM_MODE_SCALE_NONE ||
190 if (!nv_connector->native_mode) 190 mode->hdisplay > nv_connector->native_mode->hdisplay ||
191 return false; 191 mode->vdisplay > nv_connector->native_mode->vdisplay) {
192 nv_encoder->mode = *adjusted_mode;
193
194 } else {
192 nv_encoder->mode = *nv_connector->native_mode; 195 nv_encoder->mode = *nv_connector->native_mode;
193 adjusted_mode->clock = nv_connector->native_mode->clock; 196 adjusted_mode->clock = nv_connector->native_mode->clock;
194 } else {
195 nv_encoder->mode = *adjusted_mode;
196 } 197 }
197 198
198 return true; 199 return true;
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
index 6a6eb697d38e..eb1c70dd82ed 100644
--- a/drivers/gpu/drm/nouveau/nv04_pm.c
+++ b/drivers/gpu/drm/nouveau/nv04_pm.c
@@ -76,6 +76,15 @@ nv04_pm_clock_set(struct drm_device *dev, void *pre_state)
76 reg += 4; 76 reg += 4;
77 77
78 nouveau_hw_setpll(dev, reg, &state->calc); 78 nouveau_hw_setpll(dev, reg, &state->calc);
79
80 if (dev_priv->card_type < NV_30 && reg == NV_PRAMDAC_MPLL_COEFF) {
81 if (dev_priv->card_type == NV_20)
82 nv_mask(dev, 0x1002c4, 0, 1 << 20);
83
84 /* Reset the DLLs */
85 nv_mask(dev, 0x1002c0, 0, 1 << 8);
86 }
87
79 kfree(state); 88 kfree(state);
80} 89}
81 90
diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/nv50_calc.c
index 2cdc2bfe7179..de81151648f8 100644
--- a/drivers/gpu/drm/nouveau/nv50_calc.c
+++ b/drivers/gpu/drm/nouveau/nv50_calc.c
@@ -51,24 +51,28 @@ nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk,
51 int *N, int *fN, int *M, int *P) 51 int *N, int *fN, int *M, int *P)
52{ 52{
53 fixed20_12 fb_div, a, b; 53 fixed20_12 fb_div, a, b;
54 u32 refclk = pll->refclk / 10;
55 u32 max_vco_freq = pll->vco1.maxfreq / 10;
56 u32 max_vco_inputfreq = pll->vco1.max_inputfreq / 10;
57 clk /= 10;
54 58
55 *P = pll->vco1.maxfreq / clk; 59 *P = max_vco_freq / clk;
56 if (*P > pll->max_p) 60 if (*P > pll->max_p)
57 *P = pll->max_p; 61 *P = pll->max_p;
58 if (*P < pll->min_p) 62 if (*P < pll->min_p)
59 *P = pll->min_p; 63 *P = pll->min_p;
60 64
61 /* *M = ceil(refclk / pll->vco.max_inputfreq); */ 65 /* *M = floor((refclk + max_vco_inputfreq) / max_vco_inputfreq); */
62 a.full = dfixed_const(pll->refclk); 66 a.full = dfixed_const(refclk + max_vco_inputfreq);
63 b.full = dfixed_const(pll->vco1.max_inputfreq); 67 b.full = dfixed_const(max_vco_inputfreq);
64 a.full = dfixed_div(a, b); 68 a.full = dfixed_div(a, b);
65 a.full = dfixed_ceil(a); 69 a.full = dfixed_floor(a);
66 *M = dfixed_trunc(a); 70 *M = dfixed_trunc(a);
67 71
68 /* fb_div = (vco * *M) / refclk; */ 72 /* fb_div = (vco * *M) / refclk; */
69 fb_div.full = dfixed_const(clk * *P); 73 fb_div.full = dfixed_const(clk * *P);
70 fb_div.full = dfixed_mul(fb_div, a); 74 fb_div.full = dfixed_mul(fb_div, a);
71 a.full = dfixed_const(pll->refclk); 75 a.full = dfixed_const(refclk);
72 fb_div.full = dfixed_div(fb_div, a); 76 fb_div.full = dfixed_div(fb_div, a);
73 77
74 /* *N = floor(fb_div); */ 78 /* *N = floor(fb_div); */
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 16380d52cd88..56476d0c6de8 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -546,7 +546,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
546 } 546 }
547 547
548 nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base; 548 nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base;
549 nv_crtc->fb.tile_flags = fb->nvbo->tile_flags; 549 nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
550 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; 550 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
551 if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { 551 if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
552 ret = RING_SPACE(evo, 2); 552 ret = RING_SPACE(evo, 2);
@@ -578,7 +578,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
578 fb->nvbo->tile_mode); 578 fb->nvbo->tile_mode);
579 } 579 }
580 if (dev_priv->chipset == 0x50) 580 if (dev_priv->chipset == 0x50)
581 OUT_RING(evo, (fb->nvbo->tile_flags << 8) | format); 581 OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format);
582 else 582 else
583 OUT_RING(evo, format); 583 OUT_RING(evo, format);
584 584
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 55c9663ef2bf..f624c611ddea 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1032,11 +1032,18 @@ nv50_display_irq_hotplug_bh(struct work_struct *work)
1032 struct drm_connector *connector; 1032 struct drm_connector *connector;
1033 const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; 1033 const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
1034 uint32_t unplug_mask, plug_mask, change_mask; 1034 uint32_t unplug_mask, plug_mask, change_mask;
1035 uint32_t hpd0, hpd1 = 0; 1035 uint32_t hpd0, hpd1;
1036 1036
1037 hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050); 1037 spin_lock_irq(&dev_priv->hpd_state.lock);
1038 hpd0 = dev_priv->hpd_state.hpd0_bits;
1039 dev_priv->hpd_state.hpd0_bits = 0;
1040 hpd1 = dev_priv->hpd_state.hpd1_bits;
1041 dev_priv->hpd_state.hpd1_bits = 0;
1042 spin_unlock_irq(&dev_priv->hpd_state.lock);
1043
1044 hpd0 &= nv_rd32(dev, 0xe050);
1038 if (dev_priv->chipset >= 0x90) 1045 if (dev_priv->chipset >= 0x90)
1039 hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070); 1046 hpd1 &= nv_rd32(dev, 0xe070);
1040 1047
1041 plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16); 1048 plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16);
1042 unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000); 1049 unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
@@ -1078,10 +1085,6 @@ nv50_display_irq_hotplug_bh(struct work_struct *work)
1078 helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF); 1085 helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
1079 } 1086 }
1080 1087
1081 nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
1082 if (dev_priv->chipset >= 0x90)
1083 nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
1084
1085 drm_helper_hpd_irq_event(dev); 1088 drm_helper_hpd_irq_event(dev);
1086} 1089}
1087 1090
@@ -1092,8 +1095,22 @@ nv50_display_irq_handler(struct drm_device *dev)
1092 uint32_t delayed = 0; 1095 uint32_t delayed = 0;
1093 1096
1094 if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) { 1097 if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) {
1095 if (!work_pending(&dev_priv->hpd_work)) 1098 uint32_t hpd0_bits, hpd1_bits = 0;
1096 queue_work(dev_priv->wq, &dev_priv->hpd_work); 1099
1100 hpd0_bits = nv_rd32(dev, 0xe054);
1101 nv_wr32(dev, 0xe054, hpd0_bits);
1102
1103 if (dev_priv->chipset >= 0x90) {
1104 hpd1_bits = nv_rd32(dev, 0xe074);
1105 nv_wr32(dev, 0xe074, hpd1_bits);
1106 }
1107
1108 spin_lock(&dev_priv->hpd_state.lock);
1109 dev_priv->hpd_state.hpd0_bits |= hpd0_bits;
1110 dev_priv->hpd_state.hpd1_bits |= hpd1_bits;
1111 spin_unlock(&dev_priv->hpd_state.lock);
1112
1113 queue_work(dev_priv->wq, &dev_priv->hpd_work);
1097 } 1114 }
1098 1115
1099 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { 1116 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index a46a961102f3..1da65bd60c10 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -464,3 +464,8 @@ nv50_fifo_unload_context(struct drm_device *dev)
464 return 0; 464 return 0;
465} 465}
466 466
467void
468nv50_fifo_tlb_flush(struct drm_device *dev)
469{
470 nv50_vm_flush(dev, 5);
471}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index cbf5ae2f67d4..8b669d0af610 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -402,3 +402,55 @@ struct nouveau_pgraph_object_class nv50_graph_grclass[] = {
402 { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */ 402 { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */
403 {} 403 {}
404}; 404};
405
406void
407nv50_graph_tlb_flush(struct drm_device *dev)
408{
409 nv50_vm_flush(dev, 0);
410}
411
412void
413nv86_graph_tlb_flush(struct drm_device *dev)
414{
415 struct drm_nouveau_private *dev_priv = dev->dev_private;
416 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
417 bool idle, timeout = false;
418 unsigned long flags;
419 u64 start;
420 u32 tmp;
421
422 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
423 nv_mask(dev, 0x400500, 0x00000001, 0x00000000);
424
425 start = ptimer->read(dev);
426 do {
427 idle = true;
428
429 for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) {
430 if ((tmp & 7) == 1)
431 idle = false;
432 }
433
434 for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) {
435 if ((tmp & 7) == 1)
436 idle = false;
437 }
438
439 for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) {
440 if ((tmp & 7) == 1)
441 idle = false;
442 }
443 } while (!idle && !(timeout = ptimer->read(dev) - start > 2000000000));
444
445 if (timeout) {
446 NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: "
447 "0x%08x 0x%08x 0x%08x 0x%08x\n",
448 nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380),
449 nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
450 }
451
452 nv50_vm_flush(dev, 0);
453
454 nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
455 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
456}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index a53fc974332b..b773229b7647 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -402,7 +402,6 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
402 } 402 }
403 dev_priv->engine.instmem.flush(dev); 403 dev_priv->engine.instmem.flush(dev);
404 404
405 nv50_vm_flush(dev, 4);
406 nv50_vm_flush(dev, 6); 405 nv50_vm_flush(dev, 6);
407 406
408 gpuobj->im_bound = 1; 407 gpuobj->im_bound = 1;
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 8e421f644a54..05efb5b9f13e 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -112,6 +112,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
112 base += 3; 112 base += 3;
113 break; 113 break;
114 case ATOM_IIO_WRITE: 114 case ATOM_IIO_WRITE:
115 (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
115 ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); 116 ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
116 base += 3; 117 base += 3;
117 break; 118 break;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index df2b6f2b35f8..9fbabaa6ee44 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -253,7 +253,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
253 case DRM_MODE_DPMS_SUSPEND: 253 case DRM_MODE_DPMS_SUSPEND:
254 case DRM_MODE_DPMS_OFF: 254 case DRM_MODE_DPMS_OFF:
255 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); 255 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
256 atombios_blank_crtc(crtc, ATOM_ENABLE); 256 if (radeon_crtc->enabled)
257 atombios_blank_crtc(crtc, ATOM_ENABLE);
257 if (ASIC_IS_DCE3(rdev)) 258 if (ASIC_IS_DCE3(rdev))
258 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); 259 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
259 atombios_enable_crtc(crtc, ATOM_DISABLE); 260 atombios_enable_crtc(crtc, ATOM_DISABLE);
@@ -530,7 +531,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
530 dp_clock = dig_connector->dp_clock; 531 dp_clock = dig_connector->dp_clock;
531 } 532 }
532 } 533 }
533 534#if 0 /* doesn't work properly on some laptops */
534 /* use recommended ref_div for ss */ 535 /* use recommended ref_div for ss */
535 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 536 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
536 if (ss_enabled) { 537 if (ss_enabled) {
@@ -540,7 +541,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
540 } 541 }
541 } 542 }
542 } 543 }
543 544#endif
544 if (ASIC_IS_AVIVO(rdev)) { 545 if (ASIC_IS_AVIVO(rdev)) {
545 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 546 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
546 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) 547 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f12a5b3ec050..7b337c361a12 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -748,6 +748,8 @@ void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
748 unsigned i; 748 unsigned i;
749 u32 tmp; 749 u32 tmp;
750 750
751 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
752
751 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); 753 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
752 for (i = 0; i < rdev->usec_timeout; i++) { 754 for (i = 0; i < rdev->usec_timeout; i++) {
753 /* read MC_STATUS */ 755 /* read MC_STATUS */
@@ -1650,7 +1652,36 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1650 } 1652 }
1651 } 1653 }
1652 1654
1653 rdev->config.evergreen.tile_config = gb_addr_config; 1655 /* setup tiling info dword. gb_addr_config is not adequate since it does
1656 * not have bank info, so create a custom tiling dword.
1657 * bits 3:0 num_pipes
1658 * bits 7:4 num_banks
1659 * bits 11:8 group_size
1660 * bits 15:12 row_size
1661 */
1662 rdev->config.evergreen.tile_config = 0;
1663 switch (rdev->config.evergreen.max_tile_pipes) {
1664 case 1:
1665 default:
1666 rdev->config.evergreen.tile_config |= (0 << 0);
1667 break;
1668 case 2:
1669 rdev->config.evergreen.tile_config |= (1 << 0);
1670 break;
1671 case 4:
1672 rdev->config.evergreen.tile_config |= (2 << 0);
1673 break;
1674 case 8:
1675 rdev->config.evergreen.tile_config |= (3 << 0);
1676 break;
1677 }
1678 rdev->config.evergreen.tile_config |=
1679 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
1680 rdev->config.evergreen.tile_config |=
1681 ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
1682 rdev->config.evergreen.tile_config |=
1683 ((gb_addr_config & 0x30000000) >> 28) << 12;
1684
1654 WREG32(GB_BACKEND_MAP, gb_backend_map); 1685 WREG32(GB_BACKEND_MAP, gb_backend_map);
1655 WREG32(GB_ADDR_CONFIG, gb_addr_config); 1686 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1656 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 1687 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
@@ -1893,7 +1924,6 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
1893static int evergreen_gpu_soft_reset(struct radeon_device *rdev) 1924static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
1894{ 1925{
1895 struct evergreen_mc_save save; 1926 struct evergreen_mc_save save;
1896 u32 srbm_reset = 0;
1897 u32 grbm_reset = 0; 1927 u32 grbm_reset = 0;
1898 1928
1899 dev_info(rdev->dev, "GPU softreset \n"); 1929 dev_info(rdev->dev, "GPU softreset \n");
@@ -1932,16 +1962,6 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
1932 udelay(50); 1962 udelay(50);
1933 WREG32(GRBM_SOFT_RESET, 0); 1963 WREG32(GRBM_SOFT_RESET, 0);
1934 (void)RREG32(GRBM_SOFT_RESET); 1964 (void)RREG32(GRBM_SOFT_RESET);
1935
1936 /* reset all the system blocks */
1937 srbm_reset = SRBM_SOFT_RESET_ALL_MASK;
1938
1939 dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
1940 WREG32(SRBM_SOFT_RESET, srbm_reset);
1941 (void)RREG32(SRBM_SOFT_RESET);
1942 udelay(50);
1943 WREG32(SRBM_SOFT_RESET, 0);
1944 (void)RREG32(SRBM_SOFT_RESET);
1945 /* Wait a little for things to settle down */ 1965 /* Wait a little for things to settle down */
1946 udelay(50); 1966 udelay(50);
1947 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 1967 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
@@ -1952,10 +1972,6 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
1952 RREG32(GRBM_STATUS_SE1)); 1972 RREG32(GRBM_STATUS_SE1));
1953 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 1973 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
1954 RREG32(SRBM_STATUS)); 1974 RREG32(SRBM_STATUS));
1955 /* After reset we need to reinit the asic as GPU often endup in an
1956 * incoherent state.
1957 */
1958 atom_asic_init(rdev->mode_info.atom_context);
1959 evergreen_mc_resume(rdev, &save); 1975 evergreen_mc_resume(rdev, &save);
1960 return 0; 1976 return 0;
1961} 1977}
@@ -2033,7 +2049,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
2033 u32 grbm_int_cntl = 0; 2049 u32 grbm_int_cntl = 0;
2034 2050
2035 if (!rdev->irq.installed) { 2051 if (!rdev->irq.installed) {
2036 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 2052 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2037 return -EINVAL; 2053 return -EINVAL;
2038 } 2054 }
2039 /* don't enable anything if the ih is disabled */ 2055 /* don't enable anything if the ih is disabled */
@@ -2295,6 +2311,7 @@ restart_ih:
2295 case 0: /* D1 vblank */ 2311 case 0: /* D1 vblank */
2296 if (disp_int & LB_D1_VBLANK_INTERRUPT) { 2312 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
2297 drm_handle_vblank(rdev->ddev, 0); 2313 drm_handle_vblank(rdev->ddev, 0);
2314 rdev->pm.vblank_sync = true;
2298 wake_up(&rdev->irq.vblank_queue); 2315 wake_up(&rdev->irq.vblank_queue);
2299 disp_int &= ~LB_D1_VBLANK_INTERRUPT; 2316 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2300 DRM_DEBUG("IH: D1 vblank\n"); 2317 DRM_DEBUG("IH: D1 vblank\n");
@@ -2316,6 +2333,7 @@ restart_ih:
2316 case 0: /* D2 vblank */ 2333 case 0: /* D2 vblank */
2317 if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 2334 if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
2318 drm_handle_vblank(rdev->ddev, 1); 2335 drm_handle_vblank(rdev->ddev, 1);
2336 rdev->pm.vblank_sync = true;
2319 wake_up(&rdev->irq.vblank_queue); 2337 wake_up(&rdev->irq.vblank_queue);
2320 disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; 2338 disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
2321 DRM_DEBUG("IH: D2 vblank\n"); 2339 DRM_DEBUG("IH: D2 vblank\n");
@@ -2337,6 +2355,7 @@ restart_ih:
2337 case 0: /* D3 vblank */ 2355 case 0: /* D3 vblank */
2338 if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 2356 if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
2339 drm_handle_vblank(rdev->ddev, 2); 2357 drm_handle_vblank(rdev->ddev, 2);
2358 rdev->pm.vblank_sync = true;
2340 wake_up(&rdev->irq.vblank_queue); 2359 wake_up(&rdev->irq.vblank_queue);
2341 disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; 2360 disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
2342 DRM_DEBUG("IH: D3 vblank\n"); 2361 DRM_DEBUG("IH: D3 vblank\n");
@@ -2358,6 +2377,7 @@ restart_ih:
2358 case 0: /* D4 vblank */ 2377 case 0: /* D4 vblank */
2359 if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 2378 if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
2360 drm_handle_vblank(rdev->ddev, 3); 2379 drm_handle_vblank(rdev->ddev, 3);
2380 rdev->pm.vblank_sync = true;
2361 wake_up(&rdev->irq.vblank_queue); 2381 wake_up(&rdev->irq.vblank_queue);
2362 disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; 2382 disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
2363 DRM_DEBUG("IH: D4 vblank\n"); 2383 DRM_DEBUG("IH: D4 vblank\n");
@@ -2379,6 +2399,7 @@ restart_ih:
2379 case 0: /* D5 vblank */ 2399 case 0: /* D5 vblank */
2380 if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 2400 if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
2381 drm_handle_vblank(rdev->ddev, 4); 2401 drm_handle_vblank(rdev->ddev, 4);
2402 rdev->pm.vblank_sync = true;
2382 wake_up(&rdev->irq.vblank_queue); 2403 wake_up(&rdev->irq.vblank_queue);
2383 disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; 2404 disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
2384 DRM_DEBUG("IH: D5 vblank\n"); 2405 DRM_DEBUG("IH: D5 vblank\n");
@@ -2400,6 +2421,7 @@ restart_ih:
2400 case 0: /* D6 vblank */ 2421 case 0: /* D6 vblank */
2401 if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 2422 if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
2402 drm_handle_vblank(rdev->ddev, 5); 2423 drm_handle_vblank(rdev->ddev, 5);
2424 rdev->pm.vblank_sync = true;
2403 wake_up(&rdev->irq.vblank_queue); 2425 wake_up(&rdev->irq.vblank_queue);
2404 disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; 2426 disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
2405 DRM_DEBUG("IH: D6 vblank\n"); 2427 DRM_DEBUG("IH: D6 vblank\n");
@@ -2561,6 +2583,11 @@ int evergreen_resume(struct radeon_device *rdev)
2561{ 2583{
2562 int r; 2584 int r;
2563 2585
2586 /* reset the asic, the gfx blocks are often in a bad state
2587 * after the driver is unloaded or after a resume
2588 */
2589 if (radeon_asic_reset(rdev))
2590 dev_warn(rdev->dev, "GPU reset failed !\n");
2564 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, 2591 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
2565 * posting will perform necessary task to bring back GPU into good 2592 * posting will perform necessary task to bring back GPU into good
2566 * shape. 2593 * shape.
@@ -2677,6 +2704,11 @@ int evergreen_init(struct radeon_device *rdev)
2677 r = radeon_atombios_init(rdev); 2704 r = radeon_atombios_init(rdev);
2678 if (r) 2705 if (r)
2679 return r; 2706 return r;
2707 /* reset the asic, the gfx blocks are often in a bad state
2708 * after the driver is unloaded or after a resume
2709 */
2710 if (radeon_asic_reset(rdev))
2711 dev_warn(rdev->dev, "GPU reset failed !\n");
2680 /* Post card if necessary */ 2712 /* Post card if necessary */
2681 if (!evergreen_card_posted(rdev)) { 2713 if (!evergreen_card_posted(rdev)) {
2682 if (!rdev->bios) { 2714 if (!rdev->bios) {
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index ac3b6dde23db..e0e590110dd4 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -459,7 +459,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
459 obj_size += evergreen_ps_size * 4; 459 obj_size += evergreen_ps_size * 4;
460 obj_size = ALIGN(obj_size, 256); 460 obj_size = ALIGN(obj_size, 256);
461 461
462 r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM, 462 r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
463 &rdev->r600_blit.shader_obj); 463 &rdev->r600_blit.shader_obj);
464 if (r) { 464 if (r) {
465 DRM_ERROR("evergreen failed to allocate shader\n"); 465 DRM_ERROR("evergreen failed to allocate shader\n");
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 113c70cc8b39..a73b53c44359 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -174,6 +174,7 @@
174#define HDP_NONSURFACE_BASE 0x2C04 174#define HDP_NONSURFACE_BASE 0x2C04
175#define HDP_NONSURFACE_INFO 0x2C08 175#define HDP_NONSURFACE_INFO 0x2C08
176#define HDP_NONSURFACE_SIZE 0x2C0C 176#define HDP_NONSURFACE_SIZE 0x2C0C
177#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
177#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 178#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
178#define HDP_TILING_CONFIG 0x2F3C 179#define HDP_TILING_CONFIG 0x2F3C
179 180
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 0e8f28a68927..8e10aa9f74b0 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -442,7 +442,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
442 int r; 442 int r;
443 443
444 if (rdev->gart.table.ram.ptr) { 444 if (rdev->gart.table.ram.ptr) {
445 WARN(1, "R100 PCI GART already initialized.\n"); 445 WARN(1, "R100 PCI GART already initialized\n");
446 return 0; 446 return 0;
447 } 447 }
448 /* Initialize common gart structure */ 448 /* Initialize common gart structure */
@@ -516,7 +516,7 @@ int r100_irq_set(struct radeon_device *rdev)
516 uint32_t tmp = 0; 516 uint32_t tmp = 0;
517 517
518 if (!rdev->irq.installed) { 518 if (!rdev->irq.installed) {
519 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 519 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
520 WREG32(R_000040_GEN_INT_CNTL, 0); 520 WREG32(R_000040_GEN_INT_CNTL, 0);
521 return -EINVAL; 521 return -EINVAL;
522 } 522 }
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 34527e600fe9..cde1d3480d93 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -91,7 +91,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
91 int r; 91 int r;
92 92
93 if (rdev->gart.table.vram.robj) { 93 if (rdev->gart.table.vram.robj) {
94 WARN(1, "RV370 PCIE GART already initialized.\n"); 94 WARN(1, "RV370 PCIE GART already initialized\n");
95 return 0; 95 return 0;
96 } 96 }
97 /* Initialize common gart structure */ 97 /* Initialize common gart structure */
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 33952a12f0a3..9c92db7c896b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -97,14 +97,8 @@ u32 rv6xx_get_temp(struct radeon_device *rdev)
97{ 97{
98 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> 98 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
99 ASIC_T_SHIFT; 99 ASIC_T_SHIFT;
100 u32 actual_temp = 0;
101 100
102 if ((temp >> 7) & 1) 101 return temp * 1000;
103 actual_temp = 0;
104 else
105 actual_temp = (temp >> 1) & 0xff;
106
107 return actual_temp * 1000;
108} 102}
109 103
110void r600_pm_get_dynpm_state(struct radeon_device *rdev) 104void r600_pm_get_dynpm_state(struct radeon_device *rdev)
@@ -884,12 +878,15 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
884 u32 tmp; 878 u32 tmp;
885 879
886 /* flush hdp cache so updates hit vram */ 880 /* flush hdp cache so updates hit vram */
887 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { 881 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
882 !(rdev->flags & RADEON_IS_AGP)) {
888 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; 883 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
889 u32 tmp; 884 u32 tmp;
890 885
891 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 886 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
892 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL 887 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
888 * This seems to cause problems on some AGP cards. Just use the old
889 * method for them.
893 */ 890 */
894 WREG32(HDP_DEBUG1, 0); 891 WREG32(HDP_DEBUG1, 0);
895 tmp = readl((void __iomem *)ptr); 892 tmp = readl((void __iomem *)ptr);
@@ -919,7 +916,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev)
919 int r; 916 int r;
920 917
921 if (rdev->gart.table.vram.robj) { 918 if (rdev->gart.table.vram.robj) {
922 WARN(1, "R600 PCIE GART already initialized.\n"); 919 WARN(1, "R600 PCIE GART already initialized\n");
923 return 0; 920 return 0;
924 } 921 }
925 /* Initialize common gart structure */ 922 /* Initialize common gart structure */
@@ -1201,8 +1198,10 @@ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1201 mc->vram_end, mc->real_vram_size >> 20); 1198 mc->vram_end, mc->real_vram_size >> 20);
1202 } else { 1199 } else {
1203 u64 base = 0; 1200 u64 base = 0;
1204 if (rdev->flags & RADEON_IS_IGP) 1201 if (rdev->flags & RADEON_IS_IGP) {
1205 base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; 1202 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1203 base <<= 24;
1204 }
1206 radeon_vram_location(rdev, &rdev->mc, base); 1205 radeon_vram_location(rdev, &rdev->mc, base);
1207 rdev->mc.gtt_base_align = 0; 1206 rdev->mc.gtt_base_align = 0;
1208 radeon_gtt_location(rdev, mc); 1207 radeon_gtt_location(rdev, mc);
@@ -1343,13 +1342,19 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev)
1343 u32 srbm_status; 1342 u32 srbm_status;
1344 u32 grbm_status; 1343 u32 grbm_status;
1345 u32 grbm_status2; 1344 u32 grbm_status2;
1345 struct r100_gpu_lockup *lockup;
1346 int r; 1346 int r;
1347 1347
1348 if (rdev->family >= CHIP_RV770)
1349 lockup = &rdev->config.rv770.lockup;
1350 else
1351 lockup = &rdev->config.r600.lockup;
1352
1348 srbm_status = RREG32(R_000E50_SRBM_STATUS); 1353 srbm_status = RREG32(R_000E50_SRBM_STATUS);
1349 grbm_status = RREG32(R_008010_GRBM_STATUS); 1354 grbm_status = RREG32(R_008010_GRBM_STATUS);
1350 grbm_status2 = RREG32(R_008014_GRBM_STATUS2); 1355 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1351 if (!G_008010_GUI_ACTIVE(grbm_status)) { 1356 if (!G_008010_GUI_ACTIVE(grbm_status)) {
1352 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp); 1357 r100_gpu_lockup_update(lockup, &rdev->cp);
1353 return false; 1358 return false;
1354 } 1359 }
1355 /* force CP activities */ 1360 /* force CP activities */
@@ -1361,7 +1366,7 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev)
1361 radeon_ring_unlock_commit(rdev); 1366 radeon_ring_unlock_commit(rdev);
1362 } 1367 }
1363 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); 1368 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
1364 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp); 1369 return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
1365} 1370}
1366 1371
1367int r600_asic_reset(struct radeon_device *rdev) 1372int r600_asic_reset(struct radeon_device *rdev)
@@ -2724,7 +2729,7 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev)
2724 /* Allocate ring buffer */ 2729 /* Allocate ring buffer */
2725 if (rdev->ih.ring_obj == NULL) { 2730 if (rdev->ih.ring_obj == NULL) {
2726 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, 2731 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
2727 true, 2732 PAGE_SIZE, true,
2728 RADEON_GEM_DOMAIN_GTT, 2733 RADEON_GEM_DOMAIN_GTT,
2729 &rdev->ih.ring_obj); 2734 &rdev->ih.ring_obj);
2730 if (r) { 2735 if (r) {
@@ -2995,7 +3000,7 @@ int r600_irq_set(struct radeon_device *rdev)
2995 u32 hdmi1, hdmi2; 3000 u32 hdmi1, hdmi2;
2996 3001
2997 if (!rdev->irq.installed) { 3002 if (!rdev->irq.installed) {
2998 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 3003 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2999 return -EINVAL; 3004 return -EINVAL;
3000 } 3005 }
3001 /* don't enable anything if the ih is disabled */ 3006 /* don't enable anything if the ih is disabled */
@@ -3489,10 +3494,12 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3489void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) 3494void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3490{ 3495{
3491 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 3496 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
3492 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL 3497 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
3498 * This seems to cause problems on some AGP cards. Just use the old
3499 * method for them.
3493 */ 3500 */
3494 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && 3501 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
3495 rdev->vram_scratch.ptr) { 3502 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
3496 void __iomem *ptr = (void *)rdev->vram_scratch.ptr; 3503 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3497 u32 tmp; 3504 u32 tmp;
3498 3505
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 8362974ef41a..86e5aa07f0db 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -501,7 +501,7 @@ int r600_blit_init(struct radeon_device *rdev)
501 obj_size += r6xx_ps_size * 4; 501 obj_size += r6xx_ps_size * 4;
502 obj_size = ALIGN(obj_size, 256); 502 obj_size = ALIGN(obj_size, 256);
503 503
504 r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM, 504 r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
505 &rdev->r600_blit.shader_obj); 505 &rdev->r600_blit.shader_obj);
506 if (r) { 506 if (r) {
507 DRM_ERROR("r600 failed to allocate shader\n"); 507 DRM_ERROR("r600 failed to allocate shader\n");
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 37cc2aa9f923..7831e0890210 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -50,6 +50,7 @@ struct r600_cs_track {
50 u32 nsamples; 50 u32 nsamples;
51 u32 cb_color_base_last[8]; 51 u32 cb_color_base_last[8];
52 struct radeon_bo *cb_color_bo[8]; 52 struct radeon_bo *cb_color_bo[8];
53 u64 cb_color_bo_mc[8];
53 u32 cb_color_bo_offset[8]; 54 u32 cb_color_bo_offset[8];
54 struct radeon_bo *cb_color_frag_bo[8]; 55 struct radeon_bo *cb_color_frag_bo[8];
55 struct radeon_bo *cb_color_tile_bo[8]; 56 struct radeon_bo *cb_color_tile_bo[8];
@@ -67,6 +68,7 @@ struct r600_cs_track {
67 u32 db_depth_size; 68 u32 db_depth_size;
68 u32 db_offset; 69 u32 db_offset;
69 struct radeon_bo *db_bo; 70 struct radeon_bo *db_bo;
71 u64 db_bo_mc;
70}; 72};
71 73
72static inline int r600_bpe_from_format(u32 *bpe, u32 format) 74static inline int r600_bpe_from_format(u32 *bpe, u32 format)
@@ -140,6 +142,68 @@ static inline int r600_bpe_from_format(u32 *bpe, u32 format)
140 return 0; 142 return 0;
141} 143}
142 144
145struct array_mode_checker {
146 int array_mode;
147 u32 group_size;
148 u32 nbanks;
149 u32 npipes;
150 u32 nsamples;
151 u32 bpe;
152};
153
154/* returns alignment in pixels for pitch/height/depth and bytes for base */
155static inline int r600_get_array_mode_alignment(struct array_mode_checker *values,
156 u32 *pitch_align,
157 u32 *height_align,
158 u32 *depth_align,
159 u64 *base_align)
160{
161 u32 tile_width = 8;
162 u32 tile_height = 8;
163 u32 macro_tile_width = values->nbanks;
164 u32 macro_tile_height = values->npipes;
165 u32 tile_bytes = tile_width * tile_height * values->bpe * values->nsamples;
166 u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
167
168 switch (values->array_mode) {
169 case ARRAY_LINEAR_GENERAL:
170 /* technically tile_width/_height for pitch/height */
171 *pitch_align = 1; /* tile_width */
172 *height_align = 1; /* tile_height */
173 *depth_align = 1;
174 *base_align = 1;
175 break;
176 case ARRAY_LINEAR_ALIGNED:
177 *pitch_align = max((u32)64, (u32)(values->group_size / values->bpe));
178 *height_align = tile_height;
179 *depth_align = 1;
180 *base_align = values->group_size;
181 break;
182 case ARRAY_1D_TILED_THIN1:
183 *pitch_align = max((u32)tile_width,
184 (u32)(values->group_size /
185 (tile_height * values->bpe * values->nsamples)));
186 *height_align = tile_height;
187 *depth_align = 1;
188 *base_align = values->group_size;
189 break;
190 case ARRAY_2D_TILED_THIN1:
191 *pitch_align = max((u32)macro_tile_width,
192 (u32)(((values->group_size / tile_height) /
193 (values->bpe * values->nsamples)) *
194 values->nbanks)) * tile_width;
195 *height_align = macro_tile_height * tile_height;
196 *depth_align = 1;
197 *base_align = max(macro_tile_bytes,
198 (*pitch_align) * values->bpe * (*height_align) * values->nsamples);
199 break;
200 default:
201 return -EINVAL;
202 }
203
204 return 0;
205}
206
143static void r600_cs_track_init(struct r600_cs_track *track) 207static void r600_cs_track_init(struct r600_cs_track *track)
144{ 208{
145 int i; 209 int i;
@@ -153,10 +217,12 @@ static void r600_cs_track_init(struct r600_cs_track *track)
153 track->cb_color_info[i] = 0; 217 track->cb_color_info[i] = 0;
154 track->cb_color_bo[i] = NULL; 218 track->cb_color_bo[i] = NULL;
155 track->cb_color_bo_offset[i] = 0xFFFFFFFF; 219 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
220 track->cb_color_bo_mc[i] = 0xFFFFFFFF;
156 } 221 }
157 track->cb_target_mask = 0xFFFFFFFF; 222 track->cb_target_mask = 0xFFFFFFFF;
158 track->cb_shader_mask = 0xFFFFFFFF; 223 track->cb_shader_mask = 0xFFFFFFFF;
159 track->db_bo = NULL; 224 track->db_bo = NULL;
225 track->db_bo_mc = 0xFFFFFFFF;
160 /* assume the biggest format and that htile is enabled */ 226 /* assume the biggest format and that htile is enabled */
161 track->db_depth_info = 7 | (1 << 25); 227 track->db_depth_info = 7 | (1 << 25);
162 track->db_depth_view = 0xFFFFC000; 228 track->db_depth_view = 0xFFFFC000;
@@ -168,7 +234,10 @@ static void r600_cs_track_init(struct r600_cs_track *track)
168static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) 234static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
169{ 235{
170 struct r600_cs_track *track = p->track; 236 struct r600_cs_track *track = p->track;
171 u32 bpe = 0, pitch, slice_tile_max, size, tmp, height, pitch_align; 237 u32 bpe = 0, slice_tile_max, size, tmp;
238 u32 height, height_align, pitch, pitch_align, depth_align;
239 u64 base_offset, base_align;
240 struct array_mode_checker array_check;
172 volatile u32 *ib = p->ib->ptr; 241 volatile u32 *ib = p->ib->ptr;
173 unsigned array_mode; 242 unsigned array_mode;
174 243
@@ -183,60 +252,40 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
183 i, track->cb_color_info[i]); 252 i, track->cb_color_info[i]);
184 return -EINVAL; 253 return -EINVAL;
185 } 254 }
186 /* pitch is the number of 8x8 tiles per row */ 255 /* pitch in pixels */
187 pitch = G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1; 256 pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
188 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; 257 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
189 slice_tile_max *= 64; 258 slice_tile_max *= 64;
190 height = slice_tile_max / (pitch * 8); 259 height = slice_tile_max / pitch;
191 if (height > 8192) 260 if (height > 8192)
192 height = 8192; 261 height = 8192;
193 array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); 262 array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
263
264 base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
265 array_check.array_mode = array_mode;
266 array_check.group_size = track->group_size;
267 array_check.nbanks = track->nbanks;
268 array_check.npipes = track->npipes;
269 array_check.nsamples = track->nsamples;
270 array_check.bpe = bpe;
271 if (r600_get_array_mode_alignment(&array_check,
272 &pitch_align, &height_align, &depth_align, &base_align)) {
273 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
274 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
275 track->cb_color_info[i]);
276 return -EINVAL;
277 }
194 switch (array_mode) { 278 switch (array_mode) {
195 case V_0280A0_ARRAY_LINEAR_GENERAL: 279 case V_0280A0_ARRAY_LINEAR_GENERAL:
196 /* technically height & 0x7 */
197 break; 280 break;
198 case V_0280A0_ARRAY_LINEAR_ALIGNED: 281 case V_0280A0_ARRAY_LINEAR_ALIGNED:
199 pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8;
200 if (!IS_ALIGNED(pitch, pitch_align)) {
201 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
202 __func__, __LINE__, pitch);
203 return -EINVAL;
204 }
205 if (!IS_ALIGNED(height, 8)) {
206 dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
207 __func__, __LINE__, height);
208 return -EINVAL;
209 }
210 break; 282 break;
211 case V_0280A0_ARRAY_1D_TILED_THIN1: 283 case V_0280A0_ARRAY_1D_TILED_THIN1:
212 pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe * track->nsamples))) / 8;
213 if (!IS_ALIGNED(pitch, pitch_align)) {
214 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
215 __func__, __LINE__, pitch);
216 return -EINVAL;
217 }
218 /* avoid breaking userspace */ 284 /* avoid breaking userspace */
219 if (height > 7) 285 if (height > 7)
220 height &= ~0x7; 286 height &= ~0x7;
221 if (!IS_ALIGNED(height, 8)) {
222 dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
223 __func__, __LINE__, height);
224 return -EINVAL;
225 }
226 break; 287 break;
227 case V_0280A0_ARRAY_2D_TILED_THIN1: 288 case V_0280A0_ARRAY_2D_TILED_THIN1:
228 pitch_align = max((u32)track->nbanks,
229 (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks)) / 8;
230 if (!IS_ALIGNED(pitch, pitch_align)) {
231 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
232 __func__, __LINE__, pitch);
233 return -EINVAL;
234 }
235 if (!IS_ALIGNED((height / 8), track->npipes)) {
236 dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
237 __func__, __LINE__, height);
238 return -EINVAL;
239 }
240 break; 289 break;
241 default: 290 default:
242 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, 291 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
@@ -244,31 +293,42 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
244 track->cb_color_info[i]); 293 track->cb_color_info[i]);
245 return -EINVAL; 294 return -EINVAL;
246 } 295 }
296
297 if (!IS_ALIGNED(pitch, pitch_align)) {
298 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
299 __func__, __LINE__, pitch);
300 return -EINVAL;
301 }
302 if (!IS_ALIGNED(height, height_align)) {
303 dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
304 __func__, __LINE__, height);
305 return -EINVAL;
306 }
307 if (!IS_ALIGNED(base_offset, base_align)) {
308 dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
309 return -EINVAL;
310 }
311
247 /* check offset */ 312 /* check offset */
248 tmp = height * pitch * 8 * bpe; 313 tmp = height * pitch * bpe;
249 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { 314 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
250 if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { 315 if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
251 /* the initial DDX does bad things with the CB size occasionally */ 316 /* the initial DDX does bad things with the CB size occasionally */
252 /* it rounds up height too far for slice tile max but the BO is smaller */ 317 /* it rounds up height too far for slice tile max but the BO is smaller */
253 tmp = (height - 7) * 8 * bpe; 318 /* r600c,g also seem to flush at bad times in some apps resulting in
254 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { 319 * bogus values here. So for linear just allow anything to avoid breaking
255 dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); 320 * broken userspace.
256 return -EINVAL; 321 */
257 }
258 } else { 322 } else {
259 dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); 323 dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
260 return -EINVAL; 324 return -EINVAL;
261 } 325 }
262 } 326 }
263 if (!IS_ALIGNED(track->cb_color_bo_offset[i], track->group_size)) {
264 dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->cb_color_bo_offset[i]);
265 return -EINVAL;
266 }
267 /* limit max tile */ 327 /* limit max tile */
268 tmp = (height * pitch * 8) >> 6; 328 tmp = (height * pitch) >> 6;
269 if (tmp < slice_tile_max) 329 if (tmp < slice_tile_max)
270 slice_tile_max = tmp; 330 slice_tile_max = tmp;
271 tmp = S_028060_PITCH_TILE_MAX(pitch - 1) | 331 tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
272 S_028060_SLICE_TILE_MAX(slice_tile_max - 1); 332 S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
273 ib[track->cb_color_size_idx[i]] = tmp; 333 ib[track->cb_color_size_idx[i]] = tmp;
274 return 0; 334 return 0;
@@ -310,7 +370,12 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
310 /* Check depth buffer */ 370 /* Check depth buffer */
311 if (G_028800_STENCIL_ENABLE(track->db_depth_control) || 371 if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
312 G_028800_Z_ENABLE(track->db_depth_control)) { 372 G_028800_Z_ENABLE(track->db_depth_control)) {
313 u32 nviews, bpe, ntiles, pitch, pitch_align, height, size, slice_tile_max; 373 u32 nviews, bpe, ntiles, size, slice_tile_max;
374 u32 height, height_align, pitch, pitch_align, depth_align;
375 u64 base_offset, base_align;
376 struct array_mode_checker array_check;
377 int array_mode;
378
314 if (track->db_bo == NULL) { 379 if (track->db_bo == NULL) {
315 dev_warn(p->dev, "z/stencil with no depth buffer\n"); 380 dev_warn(p->dev, "z/stencil with no depth buffer\n");
316 return -EINVAL; 381 return -EINVAL;
@@ -353,41 +418,34 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
353 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); 418 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
354 } else { 419 } else {
355 size = radeon_bo_size(track->db_bo); 420 size = radeon_bo_size(track->db_bo);
356 pitch = G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1; 421 /* pitch in pixels */
422 pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
357 slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 423 slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
358 slice_tile_max *= 64; 424 slice_tile_max *= 64;
359 height = slice_tile_max / (pitch * 8); 425 height = slice_tile_max / pitch;
360 if (height > 8192) 426 if (height > 8192)
361 height = 8192; 427 height = 8192;
362 switch (G_028010_ARRAY_MODE(track->db_depth_info)) { 428 base_offset = track->db_bo_mc + track->db_offset;
429 array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
430 array_check.array_mode = array_mode;
431 array_check.group_size = track->group_size;
432 array_check.nbanks = track->nbanks;
433 array_check.npipes = track->npipes;
434 array_check.nsamples = track->nsamples;
435 array_check.bpe = bpe;
436 if (r600_get_array_mode_alignment(&array_check,
437 &pitch_align, &height_align, &depth_align, &base_align)) {
438 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
439 G_028010_ARRAY_MODE(track->db_depth_info),
440 track->db_depth_info);
441 return -EINVAL;
442 }
443 switch (array_mode) {
363 case V_028010_ARRAY_1D_TILED_THIN1: 444 case V_028010_ARRAY_1D_TILED_THIN1:
364 pitch_align = (max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8);
365 if (!IS_ALIGNED(pitch, pitch_align)) {
366 dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
367 __func__, __LINE__, pitch);
368 return -EINVAL;
369 }
370 /* don't break userspace */ 445 /* don't break userspace */
371 height &= ~0x7; 446 height &= ~0x7;
372 if (!IS_ALIGNED(height, 8)) {
373 dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
374 __func__, __LINE__, height);
375 return -EINVAL;
376 }
377 break; 447 break;
378 case V_028010_ARRAY_2D_TILED_THIN1: 448 case V_028010_ARRAY_2D_TILED_THIN1:
379 pitch_align = max((u32)track->nbanks,
380 (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8;
381 if (!IS_ALIGNED(pitch, pitch_align)) {
382 dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
383 __func__, __LINE__, pitch);
384 return -EINVAL;
385 }
386 if (!IS_ALIGNED((height / 8), track->npipes)) {
387 dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
388 __func__, __LINE__, height);
389 return -EINVAL;
390 }
391 break; 449 break;
392 default: 450 default:
393 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, 451 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
@@ -395,15 +453,27 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
395 track->db_depth_info); 453 track->db_depth_info);
396 return -EINVAL; 454 return -EINVAL;
397 } 455 }
398 if (!IS_ALIGNED(track->db_offset, track->group_size)) { 456
399 dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->db_offset); 457 if (!IS_ALIGNED(pitch, pitch_align)) {
458 dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
459 __func__, __LINE__, pitch);
460 return -EINVAL;
461 }
462 if (!IS_ALIGNED(height, height_align)) {
463 dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
464 __func__, __LINE__, height);
400 return -EINVAL; 465 return -EINVAL;
401 } 466 }
467 if (!IS_ALIGNED(base_offset, base_align)) {
468 dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
469 return -EINVAL;
470 }
471
402 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 472 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
403 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; 473 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
404 tmp = ntiles * bpe * 64 * nviews; 474 tmp = ntiles * bpe * 64 * nviews;
405 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { 475 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
406 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %d have %ld)\n", 476 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n",
407 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, 477 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
408 radeon_bo_size(track->db_bo)); 478 radeon_bo_size(track->db_bo));
409 return -EINVAL; 479 return -EINVAL;
@@ -954,6 +1024,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
954 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1024 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
955 track->cb_color_base_last[tmp] = ib[idx]; 1025 track->cb_color_base_last[tmp] = ib[idx];
956 track->cb_color_bo[tmp] = reloc->robj; 1026 track->cb_color_bo[tmp] = reloc->robj;
1027 track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
957 break; 1028 break;
958 case DB_DEPTH_BASE: 1029 case DB_DEPTH_BASE:
959 r = r600_cs_packet_next_reloc(p, &reloc); 1030 r = r600_cs_packet_next_reloc(p, &reloc);
@@ -965,6 +1036,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
965 track->db_offset = radeon_get_ib_value(p, idx) << 8; 1036 track->db_offset = radeon_get_ib_value(p, idx) << 8;
966 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1037 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
967 track->db_bo = reloc->robj; 1038 track->db_bo = reloc->robj;
1039 track->db_bo_mc = reloc->lobj.gpu_offset;
968 break; 1040 break;
969 case DB_HTILE_DATA_BASE: 1041 case DB_HTILE_DATA_BASE:
970 case SQ_PGM_START_FS: 1042 case SQ_PGM_START_FS:
@@ -1086,16 +1158,25 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels
1086static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, 1158static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
1087 struct radeon_bo *texture, 1159 struct radeon_bo *texture,
1088 struct radeon_bo *mipmap, 1160 struct radeon_bo *mipmap,
1161 u64 base_offset,
1162 u64 mip_offset,
1089 u32 tiling_flags) 1163 u32 tiling_flags)
1090{ 1164{
1091 struct r600_cs_track *track = p->track; 1165 struct r600_cs_track *track = p->track;
1092 u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0; 1166 u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0;
1093 u32 word0, word1, l0_size, mipmap_size, pitch, pitch_align; 1167 u32 word0, word1, l0_size, mipmap_size;
1168 u32 height_align, pitch, pitch_align, depth_align;
1169 u64 base_align;
1170 struct array_mode_checker array_check;
1094 1171
1095 /* on legacy kernel we don't perform advanced check */ 1172 /* on legacy kernel we don't perform advanced check */
1096 if (p->rdev == NULL) 1173 if (p->rdev == NULL)
1097 return 0; 1174 return 0;
1098 1175
1176 /* convert to bytes */
1177 base_offset <<= 8;
1178 mip_offset <<= 8;
1179
1099 word0 = radeon_get_ib_value(p, idx + 0); 1180 word0 = radeon_get_ib_value(p, idx + 0);
1100 if (tiling_flags & RADEON_TILING_MACRO) 1181 if (tiling_flags & RADEON_TILING_MACRO)
1101 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1182 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
@@ -1128,46 +1209,38 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
1128 return -EINVAL; 1209 return -EINVAL;
1129 } 1210 }
1130 1211
1131 pitch = G_038000_PITCH(word0) + 1; 1212 /* pitch in texels */
1132 switch (G_038000_TILE_MODE(word0)) { 1213 pitch = (G_038000_PITCH(word0) + 1) * 8;
1133 case V_038000_ARRAY_LINEAR_GENERAL: 1214 array_check.array_mode = G_038000_TILE_MODE(word0);
1134 pitch_align = 1; 1215 array_check.group_size = track->group_size;
1135 /* XXX check height align */ 1216 array_check.nbanks = track->nbanks;
1136 break; 1217 array_check.npipes = track->npipes;
1137 case V_038000_ARRAY_LINEAR_ALIGNED: 1218 array_check.nsamples = 1;
1138 pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8; 1219 array_check.bpe = bpe;
1139 if (!IS_ALIGNED(pitch, pitch_align)) { 1220 if (r600_get_array_mode_alignment(&array_check,
1140 dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", 1221 &pitch_align, &height_align, &depth_align, &base_align)) {
1141 __func__, __LINE__, pitch); 1222 dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
1142 return -EINVAL; 1223 __func__, __LINE__, G_038000_TILE_MODE(word0));
1143 } 1224 return -EINVAL;
1144 /* XXX check height align */ 1225 }
1145 break; 1226
1146 case V_038000_ARRAY_1D_TILED_THIN1: 1227 /* XXX check height as well... */
1147 pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8; 1228
1148 if (!IS_ALIGNED(pitch, pitch_align)) { 1229 if (!IS_ALIGNED(pitch, pitch_align)) {
1149 dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", 1230 dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
1150 __func__, __LINE__, pitch); 1231 __func__, __LINE__, pitch);
1151 return -EINVAL; 1232 return -EINVAL;
1152 } 1233 }
1153 /* XXX check height align */ 1234 if (!IS_ALIGNED(base_offset, base_align)) {
1154 break; 1235 dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n",
1155 case V_038000_ARRAY_2D_TILED_THIN1: 1236 __func__, __LINE__, base_offset);
1156 pitch_align = max((u32)track->nbanks, 1237 return -EINVAL;
1157 (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8; 1238 }
1158 if (!IS_ALIGNED(pitch, pitch_align)) { 1239 if (!IS_ALIGNED(mip_offset, base_align)) {
1159 dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", 1240 dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n",
1160 __func__, __LINE__, pitch); 1241 __func__, __LINE__, mip_offset);
1161 return -EINVAL;
1162 }
1163 /* XXX check height align */
1164 break;
1165 default:
1166 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
1167 G_038000_TILE_MODE(word0), word0);
1168 return -EINVAL; 1242 return -EINVAL;
1169 } 1243 }
1170 /* XXX check offset align */
1171 1244
1172 word0 = radeon_get_ib_value(p, idx + 4); 1245 word0 = radeon_get_ib_value(p, idx + 4);
1173 word1 = radeon_get_ib_value(p, idx + 5); 1246 word1 = radeon_get_ib_value(p, idx + 5);
@@ -1402,7 +1475,10 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1402 mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1475 mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1403 mipmap = reloc->robj; 1476 mipmap = reloc->robj;
1404 r = r600_check_texture_resource(p, idx+(i*7)+1, 1477 r = r600_check_texture_resource(p, idx+(i*7)+1,
1405 texture, mipmap, reloc->lobj.tiling_flags); 1478 texture, mipmap,
1479 base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
1480 mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
1481 reloc->lobj.tiling_flags);
1406 if (r) 1482 if (r)
1407 return r; 1483 return r;
1408 ib[idx+1+(i*7)+2] += base_offset; 1484 ib[idx+1+(i*7)+2] += base_offset;
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index d84612ae47e0..33cda016b083 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -86,6 +86,7 @@
86#define R600_HDP_NONSURFACE_BASE 0x2c04 86#define R600_HDP_NONSURFACE_BASE 0x2c04
87 87
88#define R600_BUS_CNTL 0x5420 88#define R600_BUS_CNTL 0x5420
89# define R600_BIOS_ROM_DIS (1 << 1)
89#define R600_CONFIG_CNTL 0x5424 90#define R600_CONFIG_CNTL 0x5424
90#define R600_CONFIG_MEMSIZE 0x5428 91#define R600_CONFIG_MEMSIZE 0x5428
91#define R600_CONFIG_F0_BASE 0x542C 92#define R600_CONFIG_F0_BASE 0x542C
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 966a793e225b..bff4dc4f410f 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -51,6 +51,12 @@
51#define PTE_READABLE (1 << 5) 51#define PTE_READABLE (1 << 5)
52#define PTE_WRITEABLE (1 << 6) 52#define PTE_WRITEABLE (1 << 6)
53 53
54/* tiling bits */
55#define ARRAY_LINEAR_GENERAL 0x00000000
56#define ARRAY_LINEAR_ALIGNED 0x00000001
57#define ARRAY_1D_TILED_THIN1 0x00000002
58#define ARRAY_2D_TILED_THIN1 0x00000004
59
54/* Registers */ 60/* Registers */
55#define ARB_POP 0x2418 61#define ARB_POP 0x2418
56#define ENABLE_TC128 (1 << 30) 62#define ENABLE_TC128 (1 << 30)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 73f600d39ad4..3a7095743d44 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1262,6 +1262,10 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
1262 (rdev->family == CHIP_RS400) || \ 1262 (rdev->family == CHIP_RS400) || \
1263 (rdev->family == CHIP_RS480)) 1263 (rdev->family == CHIP_RS480))
1264#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) 1264#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
1265#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \
1266 (rdev->family == CHIP_RS690) || \
1267 (rdev->family == CHIP_RS740) || \
1268 (rdev->family >= CHIP_R600))
1265#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) 1269#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
1266#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) 1270#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
1267#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) 1271#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 04cac7ec9039..bc5a2c3382d9 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -98,6 +98,14 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
98 } 98 }
99 } 99 }
100 100
101 /* some DCE3 boards have bad data for this entry */
102 if (ASIC_IS_DCE3(rdev)) {
103 if ((i == 4) &&
104 (gpio->usClkMaskRegisterIndex == 0x1fda) &&
105 (gpio->sucI2cId.ucAccess == 0x94))
106 gpio->sucI2cId.ucAccess = 0x14;
107 }
108
101 if (gpio->sucI2cId.ucAccess == id) { 109 if (gpio->sucI2cId.ucAccess == id) {
102 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 110 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
103 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; 111 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
@@ -174,6 +182,14 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
174 } 182 }
175 } 183 }
176 184
185 /* some DCE3 boards have bad data for this entry */
186 if (ASIC_IS_DCE3(rdev)) {
187 if ((i == 4) &&
188 (gpio->usClkMaskRegisterIndex == 0x1fda) &&
189 (gpio->sucI2cId.ucAccess == 0x94))
190 gpio->sucI2cId.ucAccess = 0x14;
191 }
192
177 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 193 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
178 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; 194 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
179 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; 195 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
@@ -526,8 +542,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
526 if (crev < 2) 542 if (crev < 2)
527 return false; 543 return false;
528 544
529 router.valid = false;
530
531 obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); 545 obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
532 path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) 546 path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
533 (ctx->bios + data_offset + 547 (ctx->bios + data_offset +
@@ -624,6 +638,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
624 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 638 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
625 continue; 639 continue;
626 640
641 router.ddc_valid = false;
642 router.cd_valid = false;
627 for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { 643 for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
628 uint8_t grph_obj_id, grph_obj_num, grph_obj_type; 644 uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
629 645
@@ -647,9 +663,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
647 usDeviceTag)); 663 usDeviceTag));
648 664
649 } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { 665 } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
650 router.valid = false;
651 for (k = 0; k < router_obj->ucNumberOfObjects; k++) { 666 for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
652 u16 router_obj_id = le16_to_cpu(router_obj->asObjects[j].usObjectID); 667 u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
653 if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) { 668 if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
654 ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) 669 ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
655 (ctx->bios + data_offset + 670 (ctx->bios + data_offset +
@@ -657,6 +672,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
657 ATOM_I2C_RECORD *i2c_record; 672 ATOM_I2C_RECORD *i2c_record;
658 ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; 673 ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
659 ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path; 674 ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
675 ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path;
660 ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table = 676 ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
661 (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) 677 (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
662 (ctx->bios + data_offset + 678 (ctx->bios + data_offset +
@@ -690,10 +706,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
690 case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE: 706 case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
691 ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *) 707 ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
692 record; 708 record;
693 router.valid = true; 709 router.ddc_valid = true;
694 router.mux_type = ddc_path->ucMuxType; 710 router.ddc_mux_type = ddc_path->ucMuxType;
695 router.mux_control_pin = ddc_path->ucMuxControlPin; 711 router.ddc_mux_control_pin = ddc_path->ucMuxControlPin;
696 router.mux_state = ddc_path->ucMuxState[enum_id]; 712 router.ddc_mux_state = ddc_path->ucMuxState[enum_id];
713 break;
714 case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE:
715 cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *)
716 record;
717 router.cd_valid = true;
718 router.cd_mux_type = cd_path->ucMuxType;
719 router.cd_mux_control_pin = cd_path->ucMuxControlPin;
720 router.cd_mux_state = cd_path->ucMuxState[enum_id];
697 break; 721 break;
698 } 722 }
699 record = (ATOM_COMMON_RECORD_HEADER *) 723 record = (ATOM_COMMON_RECORD_HEADER *)
@@ -860,7 +884,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
860 size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; 884 size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
861 struct radeon_router router; 885 struct radeon_router router;
862 886
863 router.valid = false; 887 router.ddc_valid = false;
888 router.cd_valid = false;
864 889
865 bios_connectors = kzalloc(bc_size, GFP_KERNEL); 890 bios_connectors = kzalloc(bc_size, GFP_KERNEL);
866 if (!bios_connectors) 891 if (!bios_connectors)
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 7932dc4d6b90..c558685cc637 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -41,7 +41,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
41 41
42 size = bsize; 42 size = bsize;
43 n = 1024; 43 n = 1024;
44 r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj); 44 r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, sdomain, &sobj);
45 if (r) { 45 if (r) {
46 goto out_cleanup; 46 goto out_cleanup;
47 } 47 }
@@ -53,7 +53,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
53 if (r) { 53 if (r) {
54 goto out_cleanup; 54 goto out_cleanup;
55 } 55 }
56 r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj); 56 r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, ddomain, &dobj);
57 if (r) { 57 if (r) {
58 goto out_cleanup; 58 goto out_cleanup;
59 } 59 }
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 654787ec43f4..8f2c7b50dcf5 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -130,6 +130,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
130 } 130 }
131 return true; 131 return true;
132} 132}
133
133static bool r700_read_disabled_bios(struct radeon_device *rdev) 134static bool r700_read_disabled_bios(struct radeon_device *rdev)
134{ 135{
135 uint32_t viph_control; 136 uint32_t viph_control;
@@ -143,7 +144,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev)
143 bool r; 144 bool r;
144 145
145 viph_control = RREG32(RADEON_VIPH_CONTROL); 146 viph_control = RREG32(RADEON_VIPH_CONTROL);
146 bus_cntl = RREG32(RADEON_BUS_CNTL); 147 bus_cntl = RREG32(R600_BUS_CNTL);
147 d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); 148 d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
148 d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); 149 d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
149 vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); 150 vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
@@ -152,7 +153,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev)
152 /* disable VIP */ 153 /* disable VIP */
153 WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); 154 WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
154 /* enable the rom */ 155 /* enable the rom */
155 WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); 156 WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
156 /* Disable VGA mode */ 157 /* Disable VGA mode */
157 WREG32(AVIVO_D1VGA_CONTROL, 158 WREG32(AVIVO_D1VGA_CONTROL,
158 (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | 159 (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
@@ -191,7 +192,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev)
191 cg_spll_status = RREG32(R600_CG_SPLL_STATUS); 192 cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
192 } 193 }
193 WREG32(RADEON_VIPH_CONTROL, viph_control); 194 WREG32(RADEON_VIPH_CONTROL, viph_control);
194 WREG32(RADEON_BUS_CNTL, bus_cntl); 195 WREG32(R600_BUS_CNTL, bus_cntl);
195 WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); 196 WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
196 WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); 197 WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
197 WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); 198 WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
@@ -216,7 +217,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev)
216 bool r; 217 bool r;
217 218
218 viph_control = RREG32(RADEON_VIPH_CONTROL); 219 viph_control = RREG32(RADEON_VIPH_CONTROL);
219 bus_cntl = RREG32(RADEON_BUS_CNTL); 220 bus_cntl = RREG32(R600_BUS_CNTL);
220 d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); 221 d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
221 d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); 222 d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
222 vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); 223 vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
@@ -231,7 +232,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev)
231 /* disable VIP */ 232 /* disable VIP */
232 WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); 233 WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
233 /* enable the rom */ 234 /* enable the rom */
234 WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); 235 WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
235 /* Disable VGA mode */ 236 /* Disable VGA mode */
236 WREG32(AVIVO_D1VGA_CONTROL, 237 WREG32(AVIVO_D1VGA_CONTROL,
237 (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | 238 (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
@@ -262,7 +263,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev)
262 263
263 /* restore regs */ 264 /* restore regs */
264 WREG32(RADEON_VIPH_CONTROL, viph_control); 265 WREG32(RADEON_VIPH_CONTROL, viph_control);
265 WREG32(RADEON_BUS_CNTL, bus_cntl); 266 WREG32(R600_BUS_CNTL, bus_cntl);
266 WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); 267 WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
267 WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); 268 WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
268 WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); 269 WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 7b7ea269549c..137b8075f6e7 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -571,6 +571,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
571 } 571 }
572 572
573 if (clk_mask && data_mask) { 573 if (clk_mask && data_mask) {
574 /* system specific masks */
574 i2c.mask_clk_mask = clk_mask; 575 i2c.mask_clk_mask = clk_mask;
575 i2c.mask_data_mask = data_mask; 576 i2c.mask_data_mask = data_mask;
576 i2c.a_clk_mask = clk_mask; 577 i2c.a_clk_mask = clk_mask;
@@ -579,7 +580,19 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
579 i2c.en_data_mask = data_mask; 580 i2c.en_data_mask = data_mask;
580 i2c.y_clk_mask = clk_mask; 581 i2c.y_clk_mask = clk_mask;
581 i2c.y_data_mask = data_mask; 582 i2c.y_data_mask = data_mask;
583 } else if ((ddc_line == RADEON_GPIOPAD_MASK) ||
584 (ddc_line == RADEON_MDGPIO_MASK)) {
585 /* default gpiopad masks */
586 i2c.mask_clk_mask = (0x20 << 8);
587 i2c.mask_data_mask = 0x80;
588 i2c.a_clk_mask = (0x20 << 8);
589 i2c.a_data_mask = 0x80;
590 i2c.en_clk_mask = (0x20 << 8);
591 i2c.en_data_mask = 0x80;
592 i2c.y_clk_mask = (0x20 << 8);
593 i2c.y_data_mask = 0x80;
582 } else { 594 } else {
595 /* default masks for ddc pads */
583 i2c.mask_clk_mask = RADEON_GPIO_EN_1; 596 i2c.mask_clk_mask = RADEON_GPIO_EN_1;
584 i2c.mask_data_mask = RADEON_GPIO_EN_0; 597 i2c.mask_data_mask = RADEON_GPIO_EN_0;
585 i2c.a_clk_mask = RADEON_GPIO_A_1; 598 i2c.a_clk_mask = RADEON_GPIO_A_1;
@@ -716,7 +729,7 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
716 clk = RBIOS8(offset + 3 + (i * 5) + 3); 729 clk = RBIOS8(offset + 3 + (i * 5) + 3);
717 data = RBIOS8(offset + 3 + (i * 5) + 4); 730 data = RBIOS8(offset + 3 + (i * 5) + 4);
718 i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 731 i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
719 clk, data); 732 (1 << clk), (1 << data));
720 rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK"); 733 rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
721 break; 734 break;
722 } 735 }
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 4dac4b0a02ee..8afaf7a7459e 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -183,13 +183,13 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
183 continue; 183 continue;
184 184
185 if (priority == true) { 185 if (priority == true) {
186 DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); 186 DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
187 DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); 187 DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector));
188 conflict->status = connector_status_disconnected; 188 conflict->status = connector_status_disconnected;
189 radeon_connector_update_scratch_regs(conflict, connector_status_disconnected); 189 radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
190 } else { 190 } else {
191 DRM_INFO("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector)); 191 DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
192 DRM_INFO("in favor of %s\n", drm_get_connector_name(conflict)); 192 DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict));
193 current_status = connector_status_disconnected; 193 current_status = connector_status_disconnected;
194 } 194 }
195 break; 195 break;
@@ -432,13 +432,13 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
432 mode->vdisplay == native_mode->vdisplay) { 432 mode->vdisplay == native_mode->vdisplay) {
433 *native_mode = *mode; 433 *native_mode = *mode;
434 drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V); 434 drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
435 DRM_INFO("Determined LVDS native mode details from EDID\n"); 435 DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
436 break; 436 break;
437 } 437 }
438 } 438 }
439 } 439 }
440 if (!native_mode->clock) { 440 if (!native_mode->clock) {
441 DRM_INFO("No LVDS native mode details, disabling RMX\n"); 441 DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
442 radeon_encoder->rmx_type = RMX_OFF; 442 radeon_encoder->rmx_type = RMX_OFF;
443 } 443 }
444} 444}
@@ -1008,9 +1008,21 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector)
1008static int radeon_dp_get_modes(struct drm_connector *connector) 1008static int radeon_dp_get_modes(struct drm_connector *connector)
1009{ 1009{
1010 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1010 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1011 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
1011 int ret; 1012 int ret;
1012 1013
1014 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
1015 if (!radeon_dig_connector->edp_on)
1016 atombios_set_edp_panel_power(connector,
1017 ATOM_TRANSMITTER_ACTION_POWER_ON);
1018 }
1013 ret = radeon_ddc_get_modes(radeon_connector); 1019 ret = radeon_ddc_get_modes(radeon_connector);
1020 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
1021 if (!radeon_dig_connector->edp_on)
1022 atombios_set_edp_panel_power(connector,
1023 ATOM_TRANSMITTER_ACTION_POWER_OFF);
1024 }
1025
1014 return ret; 1026 return ret;
1015} 1027}
1016 1028
@@ -1029,8 +1041,14 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1029 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1041 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
1030 /* eDP is always DP */ 1042 /* eDP is always DP */
1031 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; 1043 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
1044 if (!radeon_dig_connector->edp_on)
1045 atombios_set_edp_panel_power(connector,
1046 ATOM_TRANSMITTER_ACTION_POWER_ON);
1032 if (radeon_dp_getdpcd(radeon_connector)) 1047 if (radeon_dp_getdpcd(radeon_connector))
1033 ret = connector_status_connected; 1048 ret = connector_status_connected;
1049 if (!radeon_dig_connector->edp_on)
1050 atombios_set_edp_panel_power(connector,
1051 ATOM_TRANSMITTER_ACTION_POWER_OFF);
1034 } else { 1052 } else {
1035 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); 1053 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
1036 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 1054 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
@@ -1116,7 +1134,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1116 radeon_connector->shared_ddc = true; 1134 radeon_connector->shared_ddc = true;
1117 shared_ddc = true; 1135 shared_ddc = true;
1118 } 1136 }
1119 if (radeon_connector->router_bus && router->valid && 1137 if (radeon_connector->router_bus && router->ddc_valid &&
1120 (radeon_connector->router.router_id == router->router_id)) { 1138 (radeon_connector->router.router_id == router->router_id)) {
1121 radeon_connector->shared_ddc = false; 1139 radeon_connector->shared_ddc = false;
1122 shared_ddc = false; 1140 shared_ddc = false;
@@ -1136,7 +1154,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1136 radeon_connector->connector_object_id = connector_object_id; 1154 radeon_connector->connector_object_id = connector_object_id;
1137 radeon_connector->hpd = *hpd; 1155 radeon_connector->hpd = *hpd;
1138 radeon_connector->router = *router; 1156 radeon_connector->router = *router;
1139 if (router->valid) { 1157 if (router->ddc_valid || router->cd_valid) {
1140 radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); 1158 radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
1141 if (!radeon_connector->router_bus) 1159 if (!radeon_connector->router_bus)
1142 goto failed; 1160 goto failed;
@@ -1157,6 +1175,8 @@ radeon_add_atom_connector(struct drm_device *dev,
1157 /* no HPD on analog connectors */ 1175 /* no HPD on analog connectors */
1158 radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1176 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1159 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 1177 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1178 connector->interlace_allowed = true;
1179 connector->doublescan_allowed = true;
1160 break; 1180 break;
1161 case DRM_MODE_CONNECTOR_DVIA: 1181 case DRM_MODE_CONNECTOR_DVIA:
1162 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1182 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -1172,6 +1192,8 @@ radeon_add_atom_connector(struct drm_device *dev,
1172 1); 1192 1);
1173 /* no HPD on analog connectors */ 1193 /* no HPD on analog connectors */
1174 radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1194 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1195 connector->interlace_allowed = true;
1196 connector->doublescan_allowed = true;
1175 break; 1197 break;
1176 case DRM_MODE_CONNECTOR_DVII: 1198 case DRM_MODE_CONNECTOR_DVII:
1177 case DRM_MODE_CONNECTOR_DVID: 1199 case DRM_MODE_CONNECTOR_DVID:
@@ -1208,6 +1230,11 @@ radeon_add_atom_connector(struct drm_device *dev,
1208 rdev->mode_info.load_detect_property, 1230 rdev->mode_info.load_detect_property,
1209 1); 1231 1);
1210 } 1232 }
1233 connector->interlace_allowed = true;
1234 if (connector_type == DRM_MODE_CONNECTOR_DVII)
1235 connector->doublescan_allowed = true;
1236 else
1237 connector->doublescan_allowed = false;
1211 break; 1238 break;
1212 case DRM_MODE_CONNECTOR_HDMIA: 1239 case DRM_MODE_CONNECTOR_HDMIA:
1213 case DRM_MODE_CONNECTOR_HDMIB: 1240 case DRM_MODE_CONNECTOR_HDMIB:
@@ -1238,6 +1265,11 @@ radeon_add_atom_connector(struct drm_device *dev,
1238 0); 1265 0);
1239 } 1266 }
1240 subpixel_order = SubPixelHorizontalRGB; 1267 subpixel_order = SubPixelHorizontalRGB;
1268 connector->interlace_allowed = true;
1269 if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
1270 connector->doublescan_allowed = true;
1271 else
1272 connector->doublescan_allowed = false;
1241 break; 1273 break;
1242 case DRM_MODE_CONNECTOR_DisplayPort: 1274 case DRM_MODE_CONNECTOR_DisplayPort:
1243 case DRM_MODE_CONNECTOR_eDP: 1275 case DRM_MODE_CONNECTOR_eDP:
@@ -1275,6 +1307,9 @@ radeon_add_atom_connector(struct drm_device *dev,
1275 rdev->mode_info.underscan_vborder_property, 1307 rdev->mode_info.underscan_vborder_property,
1276 0); 1308 0);
1277 } 1309 }
1310 connector->interlace_allowed = true;
1311 /* in theory with a DP to VGA converter... */
1312 connector->doublescan_allowed = false;
1278 break; 1313 break;
1279 case DRM_MODE_CONNECTOR_SVIDEO: 1314 case DRM_MODE_CONNECTOR_SVIDEO:
1280 case DRM_MODE_CONNECTOR_Composite: 1315 case DRM_MODE_CONNECTOR_Composite:
@@ -1290,6 +1325,8 @@ radeon_add_atom_connector(struct drm_device *dev,
1290 radeon_atombios_get_tv_info(rdev)); 1325 radeon_atombios_get_tv_info(rdev));
1291 /* no HPD on analog connectors */ 1326 /* no HPD on analog connectors */
1292 radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1327 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1328 connector->interlace_allowed = false;
1329 connector->doublescan_allowed = false;
1293 break; 1330 break;
1294 case DRM_MODE_CONNECTOR_LVDS: 1331 case DRM_MODE_CONNECTOR_LVDS:
1295 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1332 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
@@ -1308,6 +1345,8 @@ radeon_add_atom_connector(struct drm_device *dev,
1308 dev->mode_config.scaling_mode_property, 1345 dev->mode_config.scaling_mode_property,
1309 DRM_MODE_SCALE_FULLSCREEN); 1346 DRM_MODE_SCALE_FULLSCREEN);
1310 subpixel_order = SubPixelHorizontalRGB; 1347 subpixel_order = SubPixelHorizontalRGB;
1348 connector->interlace_allowed = false;
1349 connector->doublescan_allowed = false;
1311 break; 1350 break;
1312 } 1351 }
1313 1352
@@ -1385,6 +1424,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
1385 /* no HPD on analog connectors */ 1424 /* no HPD on analog connectors */
1386 radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1425 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1387 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 1426 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1427 connector->interlace_allowed = true;
1428 connector->doublescan_allowed = true;
1388 break; 1429 break;
1389 case DRM_MODE_CONNECTOR_DVIA: 1430 case DRM_MODE_CONNECTOR_DVIA:
1390 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1431 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -1400,6 +1441,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
1400 1); 1441 1);
1401 /* no HPD on analog connectors */ 1442 /* no HPD on analog connectors */
1402 radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1443 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1444 connector->interlace_allowed = true;
1445 connector->doublescan_allowed = true;
1403 break; 1446 break;
1404 case DRM_MODE_CONNECTOR_DVII: 1447 case DRM_MODE_CONNECTOR_DVII:
1405 case DRM_MODE_CONNECTOR_DVID: 1448 case DRM_MODE_CONNECTOR_DVID:
@@ -1417,6 +1460,11 @@ radeon_add_legacy_connector(struct drm_device *dev,
1417 1); 1460 1);
1418 } 1461 }
1419 subpixel_order = SubPixelHorizontalRGB; 1462 subpixel_order = SubPixelHorizontalRGB;
1463 connector->interlace_allowed = true;
1464 if (connector_type == DRM_MODE_CONNECTOR_DVII)
1465 connector->doublescan_allowed = true;
1466 else
1467 connector->doublescan_allowed = false;
1420 break; 1468 break;
1421 case DRM_MODE_CONNECTOR_SVIDEO: 1469 case DRM_MODE_CONNECTOR_SVIDEO:
1422 case DRM_MODE_CONNECTOR_Composite: 1470 case DRM_MODE_CONNECTOR_Composite:
@@ -1439,6 +1487,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
1439 radeon_combios_get_tv_info(rdev)); 1487 radeon_combios_get_tv_info(rdev));
1440 /* no HPD on analog connectors */ 1488 /* no HPD on analog connectors */
1441 radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1489 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1490 connector->interlace_allowed = false;
1491 connector->doublescan_allowed = false;
1442 break; 1492 break;
1443 case DRM_MODE_CONNECTOR_LVDS: 1493 case DRM_MODE_CONNECTOR_LVDS:
1444 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); 1494 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
@@ -1452,6 +1502,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
1452 dev->mode_config.scaling_mode_property, 1502 dev->mode_config.scaling_mode_property,
1453 DRM_MODE_SCALE_FULLSCREEN); 1503 DRM_MODE_SCALE_FULLSCREEN);
1454 subpixel_order = SubPixelHorizontalRGB; 1504 subpixel_order = SubPixelHorizontalRGB;
1505 connector->interlace_allowed = false;
1506 connector->doublescan_allowed = false;
1455 break; 1507 break;
1456 } 1508 }
1457 1509
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 8adfedfe547f..501966a13f48 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -180,7 +180,7 @@ int radeon_wb_init(struct radeon_device *rdev)
180 int r; 180 int r;
181 181
182 if (rdev->wb.wb_obj == NULL) { 182 if (rdev->wb.wb_obj == NULL) {
183 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, 183 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
184 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); 184 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
185 if (r) { 185 if (r) {
186 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 186 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
@@ -286,7 +286,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
286 mc->mc_vram_size = mc->aper_size; 286 mc->mc_vram_size = mc->aper_size;
287 } 287 }
288 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 288 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
289 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", 289 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
290 mc->mc_vram_size >> 20, mc->vram_start, 290 mc->mc_vram_size >> 20, mc->vram_start,
291 mc->vram_end, mc->real_vram_size >> 20); 291 mc->vram_end, mc->real_vram_size >> 20);
292} 292}
@@ -323,7 +323,7 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
323 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; 323 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
324 } 324 }
325 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; 325 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
326 dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n", 326 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
327 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); 327 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
328} 328}
329 329
@@ -910,11 +910,6 @@ int radeon_resume_kms(struct drm_device *dev)
910 radeon_pm_resume(rdev); 910 radeon_pm_resume(rdev);
911 radeon_restore_bios_scratch_regs(rdev); 911 radeon_restore_bios_scratch_regs(rdev);
912 912
913 /* turn on display hw */
914 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
915 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
916 }
917
918 radeon_fbdev_set_suspend(rdev, 0); 913 radeon_fbdev_set_suspend(rdev, 0);
919 release_console_sem(); 914 release_console_sem();
920 915
@@ -922,6 +917,10 @@ int radeon_resume_kms(struct drm_device *dev)
922 radeon_hpd_init(rdev); 917 radeon_hpd_init(rdev);
923 /* blat the mode back in */ 918 /* blat the mode back in */
924 drm_helper_resume_force_mode(dev); 919 drm_helper_resume_force_mode(dev);
920 /* turn on display hw */
921 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
922 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
923 }
925 return 0; 924 return 0;
926} 925}
927 926
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0383631da69c..1df4dc6c063c 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -315,10 +315,14 @@ static void radeon_print_display_setup(struct drm_device *dev)
315 radeon_connector->ddc_bus->rec.en_data_reg, 315 radeon_connector->ddc_bus->rec.en_data_reg,
316 radeon_connector->ddc_bus->rec.y_clk_reg, 316 radeon_connector->ddc_bus->rec.y_clk_reg,
317 radeon_connector->ddc_bus->rec.y_data_reg); 317 radeon_connector->ddc_bus->rec.y_data_reg);
318 if (radeon_connector->router_bus) 318 if (radeon_connector->router.ddc_valid)
319 DRM_INFO(" DDC Router 0x%x/0x%x\n", 319 DRM_INFO(" DDC Router 0x%x/0x%x\n",
320 radeon_connector->router.mux_control_pin, 320 radeon_connector->router.ddc_mux_control_pin,
321 radeon_connector->router.mux_state); 321 radeon_connector->router.ddc_mux_state);
322 if (radeon_connector->router.cd_valid)
323 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
324 radeon_connector->router.cd_mux_control_pin,
325 radeon_connector->router.cd_mux_state);
322 } else { 326 } else {
323 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || 327 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
324 connector->connector_type == DRM_MODE_CONNECTOR_DVII || 328 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
@@ -398,8 +402,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
398 int ret = 0; 402 int ret = 0;
399 403
400 /* on hw with routers, select right port */ 404 /* on hw with routers, select right port */
401 if (radeon_connector->router.valid) 405 if (radeon_connector->router.ddc_valid)
402 radeon_router_select_port(radeon_connector); 406 radeon_router_select_ddc_port(radeon_connector);
403 407
404 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 408 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
405 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { 409 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
@@ -432,8 +436,8 @@ static int radeon_ddc_dump(struct drm_connector *connector)
432 int ret = 0; 436 int ret = 0;
433 437
434 /* on hw with routers, select right port */ 438 /* on hw with routers, select right port */
435 if (radeon_connector->router.valid) 439 if (radeon_connector->router.ddc_valid)
436 radeon_router_select_port(radeon_connector); 440 radeon_router_select_ddc_port(radeon_connector);
437 441
438 if (!radeon_connector->ddc_bus) 442 if (!radeon_connector->ddc_bus)
439 return -1; 443 return -1;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 88e4ea925900..60e689f2d048 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -232,9 +232,28 @@ static struct drm_driver driver_old = {
232 232
233static struct drm_driver kms_driver; 233static struct drm_driver kms_driver;
234 234
235static void radeon_kick_out_firmware_fb(struct pci_dev *pdev)
236{
237 struct apertures_struct *ap;
238 bool primary = false;
239
240 ap = alloc_apertures(1);
241 ap->ranges[0].base = pci_resource_start(pdev, 0);
242 ap->ranges[0].size = pci_resource_len(pdev, 0);
243
244#ifdef CONFIG_X86
245 primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
246#endif
247 remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
248 kfree(ap);
249}
250
235static int __devinit 251static int __devinit
236radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 252radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
237{ 253{
254 /* Get rid of things like offb */
255 radeon_kick_out_firmware_fb(pdev);
256
238 return drm_get_pci_dev(pdev, ent, &kms_driver); 257 return drm_get_pci_dev(pdev, ent, &kms_driver);
239} 258}
240 259
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index ae58b6849a2e..041943df966b 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -176,6 +176,7 @@ static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
176 return false; 176 return false;
177 } 177 }
178} 178}
179
179void 180void
180radeon_link_encoder_connector(struct drm_device *dev) 181radeon_link_encoder_connector(struct drm_device *dev)
181{ 182{
@@ -228,6 +229,27 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
228 return NULL; 229 return NULL;
229} 230}
230 231
232struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder)
233{
234 struct drm_device *dev = encoder->dev;
235 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
236 struct drm_encoder *other_encoder;
237 struct radeon_encoder *other_radeon_encoder;
238
239 if (radeon_encoder->is_ext_encoder)
240 return NULL;
241
242 list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
243 if (other_encoder == encoder)
244 continue;
245 other_radeon_encoder = to_radeon_encoder(other_encoder);
246 if (other_radeon_encoder->is_ext_encoder &&
247 (radeon_encoder->devices & other_radeon_encoder->devices))
248 return other_encoder;
249 }
250 return NULL;
251}
252
231void radeon_panel_mode_fixup(struct drm_encoder *encoder, 253void radeon_panel_mode_fixup(struct drm_encoder *encoder,
232 struct drm_display_mode *adjusted_mode) 254 struct drm_display_mode *adjusted_mode)
233{ 255{
@@ -426,52 +448,49 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
426 448
427} 449}
428 450
429void 451union dvo_encoder_control {
430atombios_external_tmds_setup(struct drm_encoder *encoder, int action) 452 ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
431{ 453 DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
432 struct drm_device *dev = encoder->dev; 454 DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
433 struct radeon_device *rdev = dev->dev_private; 455};
434 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
435 ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION args;
436 int index = 0;
437
438 memset(&args, 0, sizeof(args));
439
440 index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
441
442 args.sXTmdsEncoder.ucEnable = action;
443
444 if (radeon_encoder->pixel_clock > 165000)
445 args.sXTmdsEncoder.ucMisc = PANEL_ENCODER_MISC_DUAL;
446
447 /*if (pScrn->rgbBits == 8)*/
448 args.sXTmdsEncoder.ucMisc |= (1 << 1);
449
450 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
451
452}
453 456
454static void 457void
455atombios_ddia_setup(struct drm_encoder *encoder, int action) 458atombios_dvo_setup(struct drm_encoder *encoder, int action)
456{ 459{
457 struct drm_device *dev = encoder->dev; 460 struct drm_device *dev = encoder->dev;
458 struct radeon_device *rdev = dev->dev_private; 461 struct radeon_device *rdev = dev->dev_private;
459 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 462 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
460 DVO_ENCODER_CONTROL_PS_ALLOCATION args; 463 union dvo_encoder_control args;
461 int index = 0; 464 int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
462 465
463 memset(&args, 0, sizeof(args)); 466 memset(&args, 0, sizeof(args));
464 467
465 index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); 468 if (ASIC_IS_DCE3(rdev)) {
469 /* DCE3+ */
470 args.dvo_v3.ucAction = action;
471 args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
472 args.dvo_v3.ucDVOConfig = 0; /* XXX */
473 } else if (ASIC_IS_DCE2(rdev)) {
474 /* DCE2 (pre-DCE3 R6xx, RS600/690/740 */
475 args.dvo.sDVOEncoder.ucAction = action;
476 args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
477 /* DFP1, CRT1, TV1 depending on the type of port */
478 args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
479
480 if (radeon_encoder->pixel_clock > 165000)
481 args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
482 } else {
483 /* R4xx, R5xx */
484 args.ext_tmds.sXTmdsEncoder.ucEnable = action;
466 485
467 args.sDVOEncoder.ucAction = action; 486 if (radeon_encoder->pixel_clock > 165000)
468 args.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 487 args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
469 488
470 if (radeon_encoder->pixel_clock > 165000) 489 /*if (pScrn->rgbBits == 8)*/
471 args.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute = PANEL_ENCODER_MISC_DUAL; 490 args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
491 }
472 492
473 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 493 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
474
475} 494}
476 495
477union lvds_encoder_control { 496union lvds_encoder_control {
@@ -532,14 +551,14 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
532 if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) 551 if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
533 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; 552 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
534 if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) 553 if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
535 args.v1.ucMisc |= (1 << 1); 554 args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
536 } else { 555 } else {
537 if (dig->linkb) 556 if (dig->linkb)
538 args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; 557 args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
539 if (radeon_encoder->pixel_clock > 165000) 558 if (radeon_encoder->pixel_clock > 165000)
540 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; 559 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
541 /*if (pScrn->rgbBits == 8) */ 560 /*if (pScrn->rgbBits == 8) */
542 args.v1.ucMisc |= (1 << 1); 561 args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
543 } 562 }
544 break; 563 break;
545 case 2: 564 case 2:
@@ -595,6 +614,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
595int 614int
596atombios_get_encoder_mode(struct drm_encoder *encoder) 615atombios_get_encoder_mode(struct drm_encoder *encoder)
597{ 616{
617 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
598 struct drm_device *dev = encoder->dev; 618 struct drm_device *dev = encoder->dev;
599 struct radeon_device *rdev = dev->dev_private; 619 struct radeon_device *rdev = dev->dev_private;
600 struct drm_connector *connector; 620 struct drm_connector *connector;
@@ -602,9 +622,20 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
602 struct radeon_connector_atom_dig *dig_connector; 622 struct radeon_connector_atom_dig *dig_connector;
603 623
604 connector = radeon_get_connector_for_encoder(encoder); 624 connector = radeon_get_connector_for_encoder(encoder);
605 if (!connector) 625 if (!connector) {
606 return 0; 626 switch (radeon_encoder->encoder_id) {
607 627 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
628 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
629 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
630 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
631 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
632 return ATOM_ENCODER_MODE_DVI;
633 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
634 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
635 default:
636 return ATOM_ENCODER_MODE_CRT;
637 }
638 }
608 radeon_connector = to_radeon_connector(connector); 639 radeon_connector = to_radeon_connector(connector);
609 640
610 switch (connector->connector_type) { 641 switch (connector->connector_type) {
@@ -834,6 +865,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
834 memset(&args, 0, sizeof(args)); 865 memset(&args, 0, sizeof(args));
835 866
836 switch (radeon_encoder->encoder_id) { 867 switch (radeon_encoder->encoder_id) {
868 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
869 index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
870 break;
837 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 871 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
838 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 872 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
839 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 873 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
@@ -978,6 +1012,105 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
978 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1012 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
979} 1013}
980 1014
1015void
1016atombios_set_edp_panel_power(struct drm_connector *connector, int action)
1017{
1018 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1019 struct drm_device *dev = radeon_connector->base.dev;
1020 struct radeon_device *rdev = dev->dev_private;
1021 union dig_transmitter_control args;
1022 int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
1023 uint8_t frev, crev;
1024
1025 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
1026 return;
1027
1028 if (!ASIC_IS_DCE4(rdev))
1029 return;
1030
1031 if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) ||
1032 (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
1033 return;
1034
1035 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
1036 return;
1037
1038 memset(&args, 0, sizeof(args));
1039
1040 args.v1.ucAction = action;
1041
1042 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1043}
1044
1045union external_encoder_control {
1046 EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
1047};
1048
1049static void
1050atombios_external_encoder_setup(struct drm_encoder *encoder,
1051 struct drm_encoder *ext_encoder,
1052 int action)
1053{
1054 struct drm_device *dev = encoder->dev;
1055 struct radeon_device *rdev = dev->dev_private;
1056 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1057 union external_encoder_control args;
1058 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1059 int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
1060 u8 frev, crev;
1061 int dp_clock = 0;
1062 int dp_lane_count = 0;
1063 int connector_object_id = 0;
1064
1065 if (connector) {
1066 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1067 struct radeon_connector_atom_dig *dig_connector =
1068 radeon_connector->con_priv;
1069
1070 dp_clock = dig_connector->dp_clock;
1071 dp_lane_count = dig_connector->dp_lane_count;
1072 connector_object_id =
1073 (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
1074 }
1075
1076 memset(&args, 0, sizeof(args));
1077
1078 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
1079 return;
1080
1081 switch (frev) {
1082 case 1:
1083 /* no params on frev 1 */
1084 break;
1085 case 2:
1086 switch (crev) {
1087 case 1:
1088 case 2:
1089 args.v1.sDigEncoder.ucAction = action;
1090 args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
1091 args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
1092
1093 if (args.v1.sDigEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
1094 if (dp_clock == 270000)
1095 args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
1096 args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
1097 } else if (radeon_encoder->pixel_clock > 165000)
1098 args.v1.sDigEncoder.ucLaneNum = 8;
1099 else
1100 args.v1.sDigEncoder.ucLaneNum = 4;
1101 break;
1102 default:
1103 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
1104 return;
1105 }
1106 break;
1107 default:
1108 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
1109 return;
1110 }
1111 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1112}
1113
981static void 1114static void
982atombios_yuv_setup(struct drm_encoder *encoder, bool enable) 1115atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
983{ 1116{
@@ -1021,6 +1154,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1021 struct drm_device *dev = encoder->dev; 1154 struct drm_device *dev = encoder->dev;
1022 struct radeon_device *rdev = dev->dev_private; 1155 struct radeon_device *rdev = dev->dev_private;
1023 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1156 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1157 struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
1024 DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; 1158 DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
1025 int index = 0; 1159 int index = 0;
1026 bool is_dig = false; 1160 bool is_dig = false;
@@ -1043,9 +1177,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1043 break; 1177 break;
1044 case ENCODER_OBJECT_ID_INTERNAL_DVO1: 1178 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1045 case ENCODER_OBJECT_ID_INTERNAL_DDI: 1179 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1046 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1047 index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); 1180 index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
1048 break; 1181 break;
1182 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1183 if (ASIC_IS_DCE3(rdev))
1184 is_dig = true;
1185 else
1186 index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
1187 break;
1049 case ENCODER_OBJECT_ID_INTERNAL_LVDS: 1188 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1050 index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); 1189 index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
1051 break; 1190 break;
@@ -1082,34 +1221,85 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1082 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { 1221 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
1083 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1222 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1084 1223
1224 if (connector &&
1225 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
1226 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1227 struct radeon_connector_atom_dig *radeon_dig_connector =
1228 radeon_connector->con_priv;
1229 atombios_set_edp_panel_power(connector,
1230 ATOM_TRANSMITTER_ACTION_POWER_ON);
1231 radeon_dig_connector->edp_on = true;
1232 }
1085 dp_link_train(encoder, connector); 1233 dp_link_train(encoder, connector);
1086 if (ASIC_IS_DCE4(rdev)) 1234 if (ASIC_IS_DCE4(rdev))
1087 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); 1235 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON);
1088 } 1236 }
1237 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1238 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
1089 break; 1239 break;
1090 case DRM_MODE_DPMS_STANDBY: 1240 case DRM_MODE_DPMS_STANDBY:
1091 case DRM_MODE_DPMS_SUSPEND: 1241 case DRM_MODE_DPMS_SUSPEND:
1092 case DRM_MODE_DPMS_OFF: 1242 case DRM_MODE_DPMS_OFF:
1093 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); 1243 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
1094 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { 1244 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
1245 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1246
1095 if (ASIC_IS_DCE4(rdev)) 1247 if (ASIC_IS_DCE4(rdev))
1096 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); 1248 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF);
1249 if (connector &&
1250 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
1251 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1252 struct radeon_connector_atom_dig *radeon_dig_connector =
1253 radeon_connector->con_priv;
1254 atombios_set_edp_panel_power(connector,
1255 ATOM_TRANSMITTER_ACTION_POWER_OFF);
1256 radeon_dig_connector->edp_on = false;
1257 }
1097 } 1258 }
1259 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1260 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
1098 break; 1261 break;
1099 } 1262 }
1100 } else { 1263 } else {
1101 switch (mode) { 1264 switch (mode) {
1102 case DRM_MODE_DPMS_ON: 1265 case DRM_MODE_DPMS_ON:
1103 args.ucAction = ATOM_ENABLE; 1266 args.ucAction = ATOM_ENABLE;
1267 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1268 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
1269 args.ucAction = ATOM_LCD_BLON;
1270 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1271 }
1104 break; 1272 break;
1105 case DRM_MODE_DPMS_STANDBY: 1273 case DRM_MODE_DPMS_STANDBY:
1106 case DRM_MODE_DPMS_SUSPEND: 1274 case DRM_MODE_DPMS_SUSPEND:
1107 case DRM_MODE_DPMS_OFF: 1275 case DRM_MODE_DPMS_OFF:
1108 args.ucAction = ATOM_DISABLE; 1276 args.ucAction = ATOM_DISABLE;
1277 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1278 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
1279 args.ucAction = ATOM_LCD_BLOFF;
1280 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1281 }
1109 break; 1282 break;
1110 } 1283 }
1111 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1112 } 1284 }
1285
1286 if (ext_encoder) {
1287 int action;
1288
1289 switch (mode) {
1290 case DRM_MODE_DPMS_ON:
1291 default:
1292 action = ATOM_ENABLE;
1293 break;
1294 case DRM_MODE_DPMS_STANDBY:
1295 case DRM_MODE_DPMS_SUSPEND:
1296 case DRM_MODE_DPMS_OFF:
1297 action = ATOM_DISABLE;
1298 break;
1299 }
1300 atombios_external_encoder_setup(encoder, ext_encoder, action);
1301 }
1302
1113 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 1303 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
1114 1304
1115} 1305}
@@ -1242,7 +1432,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1242 break; 1432 break;
1243 default: 1433 default:
1244 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); 1434 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
1245 break; 1435 return;
1246 } 1436 }
1247 1437
1248 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1438 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
@@ -1357,6 +1547,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1357 struct drm_device *dev = encoder->dev; 1547 struct drm_device *dev = encoder->dev;
1358 struct radeon_device *rdev = dev->dev_private; 1548 struct radeon_device *rdev = dev->dev_private;
1359 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1549 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1550 struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
1360 1551
1361 radeon_encoder->pixel_clock = adjusted_mode->clock; 1552 radeon_encoder->pixel_clock = adjusted_mode->clock;
1362 1553
@@ -1400,11 +1591,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1400 } 1591 }
1401 break; 1592 break;
1402 case ENCODER_OBJECT_ID_INTERNAL_DDI: 1593 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1403 atombios_ddia_setup(encoder, ATOM_ENABLE);
1404 break;
1405 case ENCODER_OBJECT_ID_INTERNAL_DVO1: 1594 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1406 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1595 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1407 atombios_external_tmds_setup(encoder, ATOM_ENABLE); 1596 atombios_dvo_setup(encoder, ATOM_ENABLE);
1408 break; 1597 break;
1409 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 1598 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1410 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 1599 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
@@ -1419,6 +1608,11 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1419 } 1608 }
1420 break; 1609 break;
1421 } 1610 }
1611
1612 if (ext_encoder) {
1613 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
1614 }
1615
1422 atombios_apply_encoder_quirks(encoder, adjusted_mode); 1616 atombios_apply_encoder_quirks(encoder, adjusted_mode);
1423 1617
1424 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { 1618 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
@@ -1520,6 +1714,7 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
1520static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) 1714static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
1521{ 1715{
1522 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1716 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1717 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1523 1718
1524 if (radeon_encoder->active_device & 1719 if (radeon_encoder->active_device &
1525 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { 1720 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
@@ -1531,6 +1726,13 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
1531 radeon_atom_output_lock(encoder, true); 1726 radeon_atom_output_lock(encoder, true);
1532 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 1727 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1533 1728
1729 /* select the clock/data port if it uses a router */
1730 if (connector) {
1731 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1732 if (radeon_connector->router.cd_valid)
1733 radeon_router_select_cd_port(radeon_connector);
1734 }
1735
1534 /* this is needed for the pll/ss setup to work correctly in some cases */ 1736 /* this is needed for the pll/ss setup to work correctly in some cases */
1535 atombios_set_encoder_crtc_source(encoder); 1737 atombios_set_encoder_crtc_source(encoder);
1536} 1738}
@@ -1547,6 +1749,23 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1547 struct radeon_device *rdev = dev->dev_private; 1749 struct radeon_device *rdev = dev->dev_private;
1548 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1750 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1549 struct radeon_encoder_atom_dig *dig; 1751 struct radeon_encoder_atom_dig *dig;
1752
1753 /* check for pre-DCE3 cards with shared encoders;
1754 * can't really use the links individually, so don't disable
1755 * the encoder if it's in use by another connector
1756 */
1757 if (!ASIC_IS_DCE3(rdev)) {
1758 struct drm_encoder *other_encoder;
1759 struct radeon_encoder *other_radeon_encoder;
1760
1761 list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
1762 other_radeon_encoder = to_radeon_encoder(other_encoder);
1763 if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
1764 drm_helper_encoder_in_use(other_encoder))
1765 goto disable_done;
1766 }
1767 }
1768
1550 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 1769 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1551 1770
1552 switch (radeon_encoder->encoder_id) { 1771 switch (radeon_encoder->encoder_id) {
@@ -1570,11 +1789,9 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1570 } 1789 }
1571 break; 1790 break;
1572 case ENCODER_OBJECT_ID_INTERNAL_DDI: 1791 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1573 atombios_ddia_setup(encoder, ATOM_DISABLE);
1574 break;
1575 case ENCODER_OBJECT_ID_INTERNAL_DVO1: 1792 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1576 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1793 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1577 atombios_external_tmds_setup(encoder, ATOM_DISABLE); 1794 atombios_dvo_setup(encoder, ATOM_DISABLE);
1578 break; 1795 break;
1579 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 1796 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1580 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 1797 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
@@ -1586,6 +1803,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1586 break; 1803 break;
1587 } 1804 }
1588 1805
1806disable_done:
1589 if (radeon_encoder_is_digital(encoder)) { 1807 if (radeon_encoder_is_digital(encoder)) {
1590 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) 1808 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
1591 r600_hdmi_disable(encoder); 1809 r600_hdmi_disable(encoder);
@@ -1595,6 +1813,53 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1595 radeon_encoder->active_device = 0; 1813 radeon_encoder->active_device = 0;
1596} 1814}
1597 1815
1816/* these are handled by the primary encoders */
1817static void radeon_atom_ext_prepare(struct drm_encoder *encoder)
1818{
1819
1820}
1821
1822static void radeon_atom_ext_commit(struct drm_encoder *encoder)
1823{
1824
1825}
1826
1827static void
1828radeon_atom_ext_mode_set(struct drm_encoder *encoder,
1829 struct drm_display_mode *mode,
1830 struct drm_display_mode *adjusted_mode)
1831{
1832
1833}
1834
1835static void radeon_atom_ext_disable(struct drm_encoder *encoder)
1836{
1837
1838}
1839
1840static void
1841radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
1842{
1843
1844}
1845
1846static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
1847 struct drm_display_mode *mode,
1848 struct drm_display_mode *adjusted_mode)
1849{
1850 return true;
1851}
1852
1853static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
1854 .dpms = radeon_atom_ext_dpms,
1855 .mode_fixup = radeon_atom_ext_mode_fixup,
1856 .prepare = radeon_atom_ext_prepare,
1857 .mode_set = radeon_atom_ext_mode_set,
1858 .commit = radeon_atom_ext_commit,
1859 .disable = radeon_atom_ext_disable,
1860 /* no detect for TMDS/LVDS yet */
1861};
1862
1598static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = { 1863static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
1599 .dpms = radeon_atom_encoder_dpms, 1864 .dpms = radeon_atom_encoder_dpms,
1600 .mode_fixup = radeon_atom_mode_fixup, 1865 .mode_fixup = radeon_atom_mode_fixup,
@@ -1704,6 +1969,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
1704 radeon_encoder->devices = supported_device; 1969 radeon_encoder->devices = supported_device;
1705 radeon_encoder->rmx_type = RMX_OFF; 1970 radeon_encoder->rmx_type = RMX_OFF;
1706 radeon_encoder->underscan_type = UNDERSCAN_OFF; 1971 radeon_encoder->underscan_type = UNDERSCAN_OFF;
1972 radeon_encoder->is_ext_encoder = false;
1707 1973
1708 switch (radeon_encoder->encoder_id) { 1974 switch (radeon_encoder->encoder_id) {
1709 case ENCODER_OBJECT_ID_INTERNAL_LVDS: 1975 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
@@ -1745,6 +2011,9 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
1745 radeon_encoder->rmx_type = RMX_FULL; 2011 radeon_encoder->rmx_type = RMX_FULL;
1746 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); 2012 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
1747 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); 2013 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
2014 } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
2015 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
2016 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
1748 } else { 2017 } else {
1749 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); 2018 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
1750 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); 2019 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
@@ -1753,5 +2022,22 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
1753 } 2022 }
1754 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); 2023 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
1755 break; 2024 break;
2025 case ENCODER_OBJECT_ID_SI170B:
2026 case ENCODER_OBJECT_ID_CH7303:
2027 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
2028 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
2029 case ENCODER_OBJECT_ID_TITFP513:
2030 case ENCODER_OBJECT_ID_VT1623:
2031 case ENCODER_OBJECT_ID_HDMI_SI1930:
2032 /* these are handled by the primary encoders */
2033 radeon_encoder->is_ext_encoder = true;
2034 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
2035 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
2036 else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
2037 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
2038 else
2039 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
2040 drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
2041 break;
1756 } 2042 }
1757} 2043}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index efa211898fe6..6abea32be5e8 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -245,7 +245,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
245 goto out_unref; 245 goto out_unref;
246 } 246 }
247 info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; 247 info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
248 info->apertures->ranges[0].size = rdev->mc.real_vram_size; 248 info->apertures->ranges[0].size = rdev->mc.aper_size;
249 249
250 info->fix.mmio_start = 0; 250 info->fix.mmio_start = 0;
251 info->fix.mmio_len = 0; 251 info->fix.mmio_len = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 216392d0353b..daacb281dfaf 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -240,7 +240,8 @@ retry:
240 */ 240 */
241 if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { 241 if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
242 /* good news we believe it's a lockup */ 242 /* good news we believe it's a lockup */
243 WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq); 243 WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
244 fence->seq, seq);
244 /* FIXME: what should we do ? marking everyone 245 /* FIXME: what should we do ? marking everyone
245 * as signaled for now 246 * as signaled for now
246 */ 247 */
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index e65b90317fab..65016117d95f 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -79,8 +79,8 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
79 79
80 if (rdev->gart.table.vram.robj == NULL) { 80 if (rdev->gart.table.vram.robj == NULL) {
81 r = radeon_bo_create(rdev, NULL, rdev->gart.table_size, 81 r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
82 true, RADEON_GEM_DOMAIN_VRAM, 82 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
83 &rdev->gart.table.vram.robj); 83 &rdev->gart.table.vram.robj);
84 if (r) { 84 if (r) {
85 return r; 85 return r;
86 } 86 }
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d1e595d91723..df95eb83dac6 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -67,7 +67,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
67 if (alignment < PAGE_SIZE) { 67 if (alignment < PAGE_SIZE) {
68 alignment = PAGE_SIZE; 68 alignment = PAGE_SIZE;
69 } 69 }
70 r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj); 70 r = radeon_bo_create(rdev, gobj, size, alignment, kernel, initial_domain, &robj);
71 if (r) { 71 if (r) {
72 if (r != -ERESTARTSYS) 72 if (r != -ERESTARTSYS)
73 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", 73 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 6a13ee38a5b9..ded2a45bc95c 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -53,8 +53,8 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
53 }; 53 };
54 54
55 /* on hw with routers, select right port */ 55 /* on hw with routers, select right port */
56 if (radeon_connector->router.valid) 56 if (radeon_connector->router.ddc_valid)
57 radeon_router_select_port(radeon_connector); 57 radeon_router_select_ddc_port(radeon_connector);
58 58
59 ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); 59 ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
60 if (ret == 2) 60 if (ret == 2)
@@ -896,7 +896,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
896 ((rdev->family <= CHIP_RS480) || 896 ((rdev->family <= CHIP_RS480) ||
897 ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) { 897 ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) {
898 /* set the radeon hw i2c adapter */ 898 /* set the radeon hw i2c adapter */
899 sprintf(i2c->adapter.name, "Radeon i2c hw bus %s", name); 899 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
900 "Radeon i2c hw bus %s", name);
900 i2c->adapter.algo = &radeon_i2c_algo; 901 i2c->adapter.algo = &radeon_i2c_algo;
901 ret = i2c_add_adapter(&i2c->adapter); 902 ret = i2c_add_adapter(&i2c->adapter);
902 if (ret) { 903 if (ret) {
@@ -905,7 +906,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
905 } 906 }
906 } else { 907 } else {
907 /* set the radeon bit adapter */ 908 /* set the radeon bit adapter */
908 sprintf(i2c->adapter.name, "Radeon i2c bit bus %s", name); 909 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
910 "Radeon i2c bit bus %s", name);
909 i2c->adapter.algo_data = &i2c->algo.bit; 911 i2c->adapter.algo_data = &i2c->algo.bit;
910 i2c->algo.bit.pre_xfer = pre_xfer; 912 i2c->algo.bit.pre_xfer = pre_xfer;
911 i2c->algo.bit.post_xfer = post_xfer; 913 i2c->algo.bit.post_xfer = post_xfer;
@@ -946,6 +948,8 @@ struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
946 i2c->rec = *rec; 948 i2c->rec = *rec;
947 i2c->adapter.owner = THIS_MODULE; 949 i2c->adapter.owner = THIS_MODULE;
948 i2c->dev = dev; 950 i2c->dev = dev;
951 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
952 "Radeon aux bus %s", name);
949 i2c_set_adapdata(&i2c->adapter, i2c); 953 i2c_set_adapdata(&i2c->adapter, i2c);
950 i2c->adapter.algo_data = &i2c->algo.dp; 954 i2c->adapter.algo_data = &i2c->algo.dp;
951 i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch; 955 i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
@@ -1084,26 +1088,51 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
1084 addr, val); 1088 addr, val);
1085} 1089}
1086 1090
1087/* router switching */ 1091/* ddc router switching */
1088void radeon_router_select_port(struct radeon_connector *radeon_connector) 1092void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
1089{ 1093{
1090 u8 val; 1094 u8 val;
1091 1095
1092 if (!radeon_connector->router.valid) 1096 if (!radeon_connector->router.ddc_valid)
1093 return; 1097 return;
1094 1098
1095 radeon_i2c_get_byte(radeon_connector->router_bus, 1099 radeon_i2c_get_byte(radeon_connector->router_bus,
1096 radeon_connector->router.i2c_addr, 1100 radeon_connector->router.i2c_addr,
1097 0x3, &val); 1101 0x3, &val);
1098 val &= radeon_connector->router.mux_control_pin; 1102 val &= ~radeon_connector->router.ddc_mux_control_pin;
1099 radeon_i2c_put_byte(radeon_connector->router_bus, 1103 radeon_i2c_put_byte(radeon_connector->router_bus,
1100 radeon_connector->router.i2c_addr, 1104 radeon_connector->router.i2c_addr,
1101 0x3, val); 1105 0x3, val);
1102 radeon_i2c_get_byte(radeon_connector->router_bus, 1106 radeon_i2c_get_byte(radeon_connector->router_bus,
1103 radeon_connector->router.i2c_addr, 1107 radeon_connector->router.i2c_addr,
1104 0x1, &val); 1108 0x1, &val);
1105 val &= radeon_connector->router.mux_control_pin; 1109 val &= ~radeon_connector->router.ddc_mux_control_pin;
1106 val |= radeon_connector->router.mux_state; 1110 val |= radeon_connector->router.ddc_mux_state;
1111 radeon_i2c_put_byte(radeon_connector->router_bus,
1112 radeon_connector->router.i2c_addr,
1113 0x1, val);
1114}
1115
1116/* clock/data router switching */
1117void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
1118{
1119 u8 val;
1120
1121 if (!radeon_connector->router.cd_valid)
1122 return;
1123
1124 radeon_i2c_get_byte(radeon_connector->router_bus,
1125 radeon_connector->router.i2c_addr,
1126 0x3, &val);
1127 val &= ~radeon_connector->router.cd_mux_control_pin;
1128 radeon_i2c_put_byte(radeon_connector->router_bus,
1129 radeon_connector->router.i2c_addr,
1130 0x3, val);
1131 radeon_i2c_get_byte(radeon_connector->router_bus,
1132 radeon_connector->router.i2c_addr,
1133 0x1, &val);
1134 val &= ~radeon_connector->router.cd_mux_control_pin;
1135 val |= radeon_connector->router.cd_mux_state;
1107 radeon_i2c_put_byte(radeon_connector->router_bus, 1136 radeon_i2c_put_byte(radeon_connector->router_bus,
1108 radeon_connector->router.i2c_addr, 1137 radeon_connector->router.i2c_addr,
1109 0x1, val); 1138 0x1, val);
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index 2f349a300195..465746bd51b7 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -76,7 +76,7 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc)
76 default: 76 default:
77 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", 77 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
78 crtc); 78 crtc);
79 return EINVAL; 79 return -EINVAL;
80 } 80 }
81 } else { 81 } else {
82 switch (crtc) { 82 switch (crtc) {
@@ -89,7 +89,7 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc)
89 default: 89 default:
90 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", 90 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
91 crtc); 91 crtc);
92 return EINVAL; 92 return -EINVAL;
93 } 93 }
94 } 94 }
95 95
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 0b8397000f4c..59f834ba283d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -670,7 +670,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
670 670
671 if (rdev->is_atom_bios) { 671 if (rdev->is_atom_bios) {
672 radeon_encoder->pixel_clock = adjusted_mode->clock; 672 radeon_encoder->pixel_clock = adjusted_mode->clock;
673 atombios_external_tmds_setup(encoder, ATOM_ENABLE); 673 atombios_dvo_setup(encoder, ATOM_ENABLE);
674 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); 674 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
675 } else { 675 } else {
676 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); 676 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 92457163d070..e301c6f9e059 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -375,6 +375,7 @@ struct radeon_encoder {
375 int hdmi_config_offset; 375 int hdmi_config_offset;
376 int hdmi_audio_workaround; 376 int hdmi_audio_workaround;
377 int hdmi_buffer_status; 377 int hdmi_buffer_status;
378 bool is_ext_encoder;
378}; 379};
379 380
380struct radeon_connector_atom_dig { 381struct radeon_connector_atom_dig {
@@ -385,6 +386,7 @@ struct radeon_connector_atom_dig {
385 u8 dp_sink_type; 386 u8 dp_sink_type;
386 int dp_clock; 387 int dp_clock;
387 int dp_lane_count; 388 int dp_lane_count;
389 bool edp_on;
388}; 390};
389 391
390struct radeon_gpio_rec { 392struct radeon_gpio_rec {
@@ -401,13 +403,19 @@ struct radeon_hpd {
401}; 403};
402 404
403struct radeon_router { 405struct radeon_router {
404 bool valid;
405 u32 router_id; 406 u32 router_id;
406 struct radeon_i2c_bus_rec i2c_info; 407 struct radeon_i2c_bus_rec i2c_info;
407 u8 i2c_addr; 408 u8 i2c_addr;
408 u8 mux_type; 409 /* i2c mux */
409 u8 mux_control_pin; 410 bool ddc_valid;
410 u8 mux_state; 411 u8 ddc_mux_type;
412 u8 ddc_mux_control_pin;
413 u8 ddc_mux_state;
414 /* clock/data mux */
415 bool cd_valid;
416 u8 cd_mux_type;
417 u8 cd_mux_control_pin;
418 u8 cd_mux_state;
411}; 419};
412 420
413struct radeon_connector { 421struct radeon_connector {
@@ -488,7 +496,8 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
488 u8 slave_addr, 496 u8 slave_addr,
489 u8 addr, 497 u8 addr,
490 u8 val); 498 u8 val);
491extern void radeon_router_select_port(struct radeon_connector *radeon_connector); 499extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
500extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
492extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); 501extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
493extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); 502extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
494 503
@@ -516,9 +525,10 @@ struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev
516struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv); 525struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
517struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index); 526struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
518struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); 527struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
519extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action); 528extern void atombios_dvo_setup(struct drm_encoder *encoder, int action);
520extern void atombios_digital_setup(struct drm_encoder *encoder, int action); 529extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
521extern int atombios_get_encoder_mode(struct drm_encoder *encoder); 530extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
531extern void atombios_set_edp_panel_power(struct drm_connector *connector, int action);
522extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); 532extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
523 533
524extern void radeon_crtc_load_lut(struct drm_crtc *crtc); 534extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d7ab91416410..a598d0049aa5 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -69,7 +69,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
69 u32 c = 0; 69 u32 c = 0;
70 70
71 rbo->placement.fpfn = 0; 71 rbo->placement.fpfn = 0;
72 rbo->placement.lpfn = rbo->rdev->mc.active_vram_size >> PAGE_SHIFT; 72 rbo->placement.lpfn = 0;
73 rbo->placement.placement = rbo->placements; 73 rbo->placement.placement = rbo->placements;
74 rbo->placement.busy_placement = rbo->placements; 74 rbo->placement.busy_placement = rbo->placements;
75 if (domain & RADEON_GEM_DOMAIN_VRAM) 75 if (domain & RADEON_GEM_DOMAIN_VRAM)
@@ -86,11 +86,13 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
86} 86}
87 87
88int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, 88int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
89 unsigned long size, bool kernel, u32 domain, 89 unsigned long size, int byte_align, bool kernel, u32 domain,
90 struct radeon_bo **bo_ptr) 90 struct radeon_bo **bo_ptr)
91{ 91{
92 struct radeon_bo *bo; 92 struct radeon_bo *bo;
93 enum ttm_bo_type type; 93 enum ttm_bo_type type;
94 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
95 unsigned long max_size = 0;
94 int r; 96 int r;
95 97
96 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { 98 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
@@ -102,6 +104,16 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
102 type = ttm_bo_type_device; 104 type = ttm_bo_type_device;
103 } 105 }
104 *bo_ptr = NULL; 106 *bo_ptr = NULL;
107
108 /* maximun bo size is the minimun btw visible vram and gtt size */
109 max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
110 if ((page_align << PAGE_SHIFT) >= max_size) {
111 printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
112 __func__, __LINE__, page_align >> (20 - PAGE_SHIFT), max_size >> 20);
113 return -ENOMEM;
114 }
115
116retry:
105 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); 117 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
106 if (bo == NULL) 118 if (bo == NULL)
107 return -ENOMEM; 119 return -ENOMEM;
@@ -109,13 +121,11 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
109 bo->gobj = gobj; 121 bo->gobj = gobj;
110 bo->surface_reg = -1; 122 bo->surface_reg = -1;
111 INIT_LIST_HEAD(&bo->list); 123 INIT_LIST_HEAD(&bo->list);
112
113retry:
114 radeon_ttm_placement_from_domain(bo, domain); 124 radeon_ttm_placement_from_domain(bo, domain);
115 /* Kernel allocation are uninterruptible */ 125 /* Kernel allocation are uninterruptible */
116 mutex_lock(&rdev->vram_mutex); 126 mutex_lock(&rdev->vram_mutex);
117 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, 127 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
118 &bo->placement, 0, 0, !kernel, NULL, size, 128 &bo->placement, page_align, 0, !kernel, NULL, size,
119 &radeon_ttm_bo_destroy); 129 &radeon_ttm_bo_destroy);
120 mutex_unlock(&rdev->vram_mutex); 130 mutex_unlock(&rdev->vram_mutex);
121 if (unlikely(r != 0)) { 131 if (unlikely(r != 0)) {
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 3481bc7f6f58..d143702b244a 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -137,9 +137,10 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
137} 137}
138 138
139extern int radeon_bo_create(struct radeon_device *rdev, 139extern int radeon_bo_create(struct radeon_device *rdev,
140 struct drm_gem_object *gobj, unsigned long size, 140 struct drm_gem_object *gobj, unsigned long size,
141 bool kernel, u32 domain, 141 int byte_align,
142 struct radeon_bo **bo_ptr); 142 bool kernel, u32 domain,
143 struct radeon_bo **bo_ptr);
143extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); 144extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
144extern void radeon_bo_kunmap(struct radeon_bo *bo); 145extern void radeon_bo_kunmap(struct radeon_bo *bo);
145extern void radeon_bo_unref(struct radeon_bo **bo); 146extern void radeon_bo_unref(struct radeon_bo **bo);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 6ea798ce8218..06e79822a2bf 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -176,8 +176,8 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
176 INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); 176 INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
177 /* Allocate 1M object buffer */ 177 /* Allocate 1M object buffer */
178 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, 178 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
179 true, RADEON_GEM_DOMAIN_GTT, 179 PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
180 &rdev->ib_pool.robj); 180 &rdev->ib_pool.robj);
181 if (r) { 181 if (r) {
182 DRM_ERROR("radeon: failed to ib pool (%d).\n", r); 182 DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
183 return r; 183 return r;
@@ -332,7 +332,7 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
332 rdev->cp.ring_size = ring_size; 332 rdev->cp.ring_size = ring_size;
333 /* Allocate ring buffer */ 333 /* Allocate ring buffer */
334 if (rdev->cp.ring_obj == NULL) { 334 if (rdev->cp.ring_obj == NULL) {
335 r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true, 335 r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, PAGE_SIZE, true,
336 RADEON_GEM_DOMAIN_GTT, 336 RADEON_GEM_DOMAIN_GTT,
337 &rdev->cp.ring_obj); 337 &rdev->cp.ring_obj);
338 if (r) { 338 if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 313c96bc09da..5b44f652145c 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -52,7 +52,7 @@ void radeon_test_moves(struct radeon_device *rdev)
52 goto out_cleanup; 52 goto out_cleanup;
53 } 53 }
54 54
55 r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM, 55 r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
56 &vram_obj); 56 &vram_obj);
57 if (r) { 57 if (r) {
58 DRM_ERROR("Failed to create VRAM object\n"); 58 DRM_ERROR("Failed to create VRAM object\n");
@@ -71,7 +71,7 @@ void radeon_test_moves(struct radeon_device *rdev)
71 void **gtt_start, **gtt_end; 71 void **gtt_start, **gtt_end;
72 void **vram_start, **vram_end; 72 void **vram_start, **vram_end;
73 73
74 r = radeon_bo_create(rdev, NULL, size, true, 74 r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true,
75 RADEON_GEM_DOMAIN_GTT, gtt_obj + i); 75 RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
76 if (r) { 76 if (r) {
77 DRM_ERROR("Failed to create GTT object %d\n", i); 77 DRM_ERROR("Failed to create GTT object %d\n", i);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index fe95bb35317e..1272e4b6a1d4 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -529,7 +529,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
529 DRM_ERROR("Failed initializing VRAM heap.\n"); 529 DRM_ERROR("Failed initializing VRAM heap.\n");
530 return r; 530 return r;
531 } 531 }
532 r = radeon_bo_create(rdev, NULL, 256 * 1024, true, 532 r = radeon_bo_create(rdev, NULL, 256 * 1024, PAGE_SIZE, true,
533 RADEON_GEM_DOMAIN_VRAM, 533 RADEON_GEM_DOMAIN_VRAM,
534 &rdev->stollen_vga_memory); 534 &rdev->stollen_vga_memory);
535 if (r) { 535 if (r) {
@@ -689,7 +689,8 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
689 gtt = container_of(backend, struct radeon_ttm_backend, backend); 689 gtt = container_of(backend, struct radeon_ttm_backend, backend);
690 gtt->offset = bo_mem->start << PAGE_SHIFT; 690 gtt->offset = bo_mem->start << PAGE_SHIFT;
691 if (!gtt->num_pages) { 691 if (!gtt->num_pages) {
692 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); 692 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
693 gtt->num_pages, bo_mem, backend);
693 } 694 }
694 r = radeon_gart_bind(gtt->rdev, gtt->offset, 695 r = radeon_gart_bind(gtt->rdev, gtt->offset,
695 gtt->num_pages, gtt->pages); 696 gtt->num_pages, gtt->pages);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index f683e51a2a06..5512e4e5e636 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -78,7 +78,7 @@ int rs400_gart_init(struct radeon_device *rdev)
78 int r; 78 int r;
79 79
80 if (rdev->gart.table.ram.ptr) { 80 if (rdev->gart.table.ram.ptr) {
81 WARN(1, "RS400 GART already initialized.\n"); 81 WARN(1, "RS400 GART already initialized\n");
82 return 0; 82 return 0;
83 } 83 }
84 /* Check gart size */ 84 /* Check gart size */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index b091a1f6fa4e..f1c6e02c2e6b 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -375,7 +375,7 @@ int rs600_gart_init(struct radeon_device *rdev)
375 int r; 375 int r;
376 376
377 if (rdev->gart.table.vram.robj) { 377 if (rdev->gart.table.vram.robj) {
378 WARN(1, "RS600 GART already initialized.\n"); 378 WARN(1, "RS600 GART already initialized\n");
379 return 0; 379 return 0;
380 } 380 }
381 /* Initialize common gart structure */ 381 /* Initialize common gart structure */
@@ -505,7 +505,7 @@ int rs600_irq_set(struct radeon_device *rdev)
505 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); 505 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
506 506
507 if (!rdev->irq.installed) { 507 if (!rdev->irq.installed) {
508 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 508 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
509 WREG32(R_000040_GEN_INT_CNTL, 0); 509 WREG32(R_000040_GEN_INT_CNTL, 0);
510 return -EINVAL; 510 return -EINVAL;
511 } 511 }
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 245374e2b778..4dfead8cee33 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -915,8 +915,8 @@ static int rv770_vram_scratch_init(struct radeon_device *rdev)
915 915
916 if (rdev->vram_scratch.robj == NULL) { 916 if (rdev->vram_scratch.robj == NULL) {
917 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, 917 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
918 true, RADEON_GEM_DOMAIN_VRAM, 918 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
919 &rdev->vram_scratch.robj); 919 &rdev->vram_scratch.robj);
920 if (r) { 920 if (r) {
921 return r; 921 return r;
922 } 922 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a1cb783c7131..148a322d8f5d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,14 +27,6 @@
27/* 27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */ 29 */
30/* Notes:
31 *
32 * We store bo pointer in drm_mm_node struct so we know which bo own a
33 * specific node. There is no protection on the pointer, thus to make
34 * sure things don't go berserk you have to access this pointer while
35 * holding the global lru lock and make sure anytime you free a node you
36 * reset the pointer to NULL.
37 */
38 30
39#include "ttm/ttm_module.h" 31#include "ttm/ttm_module.h"
40#include "ttm/ttm_bo_driver.h" 32#include "ttm/ttm_bo_driver.h"
@@ -45,6 +37,7 @@
45#include <linux/mm.h> 37#include <linux/mm.h>
46#include <linux/file.h> 38#include <linux/file.h>
47#include <linux/module.h> 39#include <linux/module.h>
40#include <asm/atomic.h>
48 41
49#define TTM_ASSERT_LOCKED(param) 42#define TTM_ASSERT_LOCKED(param)
50#define TTM_DEBUG(fmt, arg...) 43#define TTM_DEBUG(fmt, arg...)
@@ -231,6 +224,9 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
231 int ret; 224 int ret;
232 225
233 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { 226 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
227 /**
228 * Deadlock avoidance for multi-bo reserving.
229 */
234 if (use_sequence && bo->seq_valid && 230 if (use_sequence && bo->seq_valid &&
235 (sequence - bo->val_seq < (1 << 31))) { 231 (sequence - bo->val_seq < (1 << 31))) {
236 return -EAGAIN; 232 return -EAGAIN;
@@ -248,6 +244,14 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
248 } 244 }
249 245
250 if (use_sequence) { 246 if (use_sequence) {
247 /**
248 * Wake up waiters that may need to recheck for deadlock,
249 * if we decreased the sequence number.
250 */
251 if (unlikely((bo->val_seq - sequence < (1 << 31))
252 || !bo->seq_valid))
253 wake_up_all(&bo->event_queue);
254
251 bo->val_seq = sequence; 255 bo->val_seq = sequence;
252 bo->seq_valid = true; 256 bo->seq_valid = true;
253 } else { 257 } else {
@@ -452,6 +456,11 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
452 ttm_bo_mem_put(bo, &bo->mem); 456 ttm_bo_mem_put(bo, &bo->mem);
453 457
454 atomic_set(&bo->reserved, 0); 458 atomic_set(&bo->reserved, 0);
459
460 /*
461 * Make processes trying to reserve really pick it up.
462 */
463 smp_mb__after_atomic_dec();
455 wake_up_all(&bo->event_queue); 464 wake_up_all(&bo->event_queue);
456} 465}
457 466
@@ -460,7 +469,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
460 struct ttm_bo_device *bdev = bo->bdev; 469 struct ttm_bo_device *bdev = bo->bdev;
461 struct ttm_bo_global *glob = bo->glob; 470 struct ttm_bo_global *glob = bo->glob;
462 struct ttm_bo_driver *driver; 471 struct ttm_bo_driver *driver;
463 void *sync_obj; 472 void *sync_obj = NULL;
464 void *sync_obj_arg; 473 void *sync_obj_arg;
465 int put_count; 474 int put_count;
466 int ret; 475 int ret;
@@ -495,17 +504,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
495 spin_lock(&glob->lru_lock); 504 spin_lock(&glob->lru_lock);
496 } 505 }
497queue: 506queue:
498 sync_obj = bo->sync_obj;
499 sync_obj_arg = bo->sync_obj_arg;
500 driver = bdev->driver; 507 driver = bdev->driver;
508 if (bo->sync_obj)
509 sync_obj = driver->sync_obj_ref(bo->sync_obj);
510 sync_obj_arg = bo->sync_obj_arg;
501 511
502 kref_get(&bo->list_kref); 512 kref_get(&bo->list_kref);
503 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 513 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
504 spin_unlock(&glob->lru_lock); 514 spin_unlock(&glob->lru_lock);
505 spin_unlock(&bo->lock); 515 spin_unlock(&bo->lock);
506 516
507 if (sync_obj) 517 if (sync_obj) {
508 driver->sync_obj_flush(sync_obj, sync_obj_arg); 518 driver->sync_obj_flush(sync_obj, sync_obj_arg);
519 driver->sync_obj_unref(&sync_obj);
520 }
509 schedule_delayed_work(&bdev->wq, 521 schedule_delayed_work(&bdev->wq,
510 ((HZ / 100) < 1) ? 1 : HZ / 100); 522 ((HZ / 100) < 1) ? 1 : HZ / 100);
511} 523}
@@ -822,7 +834,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
822 bool no_wait_gpu) 834 bool no_wait_gpu)
823{ 835{
824 struct ttm_bo_device *bdev = bo->bdev; 836 struct ttm_bo_device *bdev = bo->bdev;
825 struct ttm_bo_global *glob = bdev->glob;
826 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 837 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
827 int ret; 838 int ret;
828 839
@@ -832,12 +843,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
832 return ret; 843 return ret;
833 if (mem->mm_node) 844 if (mem->mm_node)
834 break; 845 break;
835 spin_lock(&glob->lru_lock);
836 if (list_empty(&man->lru)) {
837 spin_unlock(&glob->lru_lock);
838 break;
839 }
840 spin_unlock(&glob->lru_lock);
841 ret = ttm_mem_evict_first(bdev, mem_type, interruptible, 846 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
842 no_wait_reserve, no_wait_gpu); 847 no_wait_reserve, no_wait_gpu);
843 if (unlikely(ret != 0)) 848 if (unlikely(ret != 0))
@@ -1125,35 +1130,9 @@ EXPORT_SYMBOL(ttm_bo_validate);
1125int ttm_bo_check_placement(struct ttm_buffer_object *bo, 1130int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1126 struct ttm_placement *placement) 1131 struct ttm_placement *placement)
1127{ 1132{
1128 int i; 1133 BUG_ON((placement->fpfn || placement->lpfn) &&
1134 (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1129 1135
1130 if (placement->fpfn || placement->lpfn) {
1131 if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
1132 printk(KERN_ERR TTM_PFX "Page number range to small "
1133 "Need %lu pages, range is [%u, %u]\n",
1134 bo->mem.num_pages, placement->fpfn,
1135 placement->lpfn);
1136 return -EINVAL;
1137 }
1138 }
1139 for (i = 0; i < placement->num_placement; i++) {
1140 if (!capable(CAP_SYS_ADMIN)) {
1141 if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
1142 printk(KERN_ERR TTM_PFX "Need to be root to "
1143 "modify NO_EVICT status.\n");
1144 return -EINVAL;
1145 }
1146 }
1147 }
1148 for (i = 0; i < placement->num_busy_placement; i++) {
1149 if (!capable(CAP_SYS_ADMIN)) {
1150 if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
1151 printk(KERN_ERR TTM_PFX "Need to be root to "
1152 "modify NO_EVICT status.\n");
1153 return -EINVAL;
1154 }
1155 }
1156 }
1157 return 0; 1136 return 0;
1158} 1137}
1159 1138
@@ -1176,6 +1155,10 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1176 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1155 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1177 if (num_pages == 0) { 1156 if (num_pages == 0) {
1178 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n"); 1157 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1158 if (destroy)
1159 (*destroy)(bo);
1160 else
1161 kfree(bo);
1179 return -EINVAL; 1162 return -EINVAL;
1180 } 1163 }
1181 bo->destroy = destroy; 1164 bo->destroy = destroy;
@@ -1369,18 +1352,9 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1369 int ret = -EINVAL; 1352 int ret = -EINVAL;
1370 struct ttm_mem_type_manager *man; 1353 struct ttm_mem_type_manager *man;
1371 1354
1372 if (type >= TTM_NUM_MEM_TYPES) { 1355 BUG_ON(type >= TTM_NUM_MEM_TYPES);
1373 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1374 return ret;
1375 }
1376
1377 man = &bdev->man[type]; 1356 man = &bdev->man[type];
1378 if (man->has_type) { 1357 BUG_ON(man->has_type);
1379 printk(KERN_ERR TTM_PFX
1380 "Memory manager already initialized for type %d\n",
1381 type);
1382 return ret;
1383 }
1384 1358
1385 ret = bdev->driver->init_mem_type(bdev, type, man); 1359 ret = bdev->driver->init_mem_type(bdev, type, man);
1386 if (ret) 1360 if (ret)
@@ -1389,13 +1363,6 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1389 1363
1390 ret = 0; 1364 ret = 0;
1391 if (type != TTM_PL_SYSTEM) { 1365 if (type != TTM_PL_SYSTEM) {
1392 if (!p_size) {
1393 printk(KERN_ERR TTM_PFX
1394 "Zero size memory manager type %d\n",
1395 type);
1396 return ret;
1397 }
1398
1399 ret = (*man->func->init)(man, p_size); 1366 ret = (*man->func->init)(man, p_size);
1400 if (ret) 1367 if (ret)
1401 return ret; 1368 return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 7410c190c891..038e947d00f9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,20 +31,29 @@
31#include "ttm/ttm_module.h" 31#include "ttm/ttm_module.h"
32#include "ttm/ttm_bo_driver.h" 32#include "ttm/ttm_bo_driver.h"
33#include "ttm/ttm_placement.h" 33#include "ttm/ttm_placement.h"
34#include <linux/jiffies.h> 34#include "drm_mm.h"
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/sched.h> 36#include <linux/spinlock.h>
37#include <linux/mm.h>
38#include <linux/file.h>
39#include <linux/module.h> 37#include <linux/module.h>
40 38
39/**
40 * Currently we use a spinlock for the lock, but a mutex *may* be
41 * more appropriate to reduce scheduling latency if the range manager
42 * ends up with very fragmented allocation patterns.
43 */
44
45struct ttm_range_manager {
46 struct drm_mm mm;
47 spinlock_t lock;
48};
49
41static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, 50static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
42 struct ttm_buffer_object *bo, 51 struct ttm_buffer_object *bo,
43 struct ttm_placement *placement, 52 struct ttm_placement *placement,
44 struct ttm_mem_reg *mem) 53 struct ttm_mem_reg *mem)
45{ 54{
46 struct ttm_bo_global *glob = man->bdev->glob; 55 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
47 struct drm_mm *mm = man->priv; 56 struct drm_mm *mm = &rman->mm;
48 struct drm_mm_node *node = NULL; 57 struct drm_mm_node *node = NULL;
49 unsigned long lpfn; 58 unsigned long lpfn;
50 int ret; 59 int ret;
@@ -57,19 +66,19 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
57 if (unlikely(ret)) 66 if (unlikely(ret))
58 return ret; 67 return ret;
59 68
60 spin_lock(&glob->lru_lock); 69 spin_lock(&rman->lock);
61 node = drm_mm_search_free_in_range(mm, 70 node = drm_mm_search_free_in_range(mm,
62 mem->num_pages, mem->page_alignment, 71 mem->num_pages, mem->page_alignment,
63 placement->fpfn, lpfn, 1); 72 placement->fpfn, lpfn, 1);
64 if (unlikely(node == NULL)) { 73 if (unlikely(node == NULL)) {
65 spin_unlock(&glob->lru_lock); 74 spin_unlock(&rman->lock);
66 return 0; 75 return 0;
67 } 76 }
68 node = drm_mm_get_block_atomic_range(node, mem->num_pages, 77 node = drm_mm_get_block_atomic_range(node, mem->num_pages,
69 mem->page_alignment, 78 mem->page_alignment,
70 placement->fpfn, 79 placement->fpfn,
71 lpfn); 80 lpfn);
72 spin_unlock(&glob->lru_lock); 81 spin_unlock(&rman->lock);
73 } while (node == NULL); 82 } while (node == NULL);
74 83
75 mem->mm_node = node; 84 mem->mm_node = node;
@@ -80,12 +89,12 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
80static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, 89static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
81 struct ttm_mem_reg *mem) 90 struct ttm_mem_reg *mem)
82{ 91{
83 struct ttm_bo_global *glob = man->bdev->glob; 92 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
84 93
85 if (mem->mm_node) { 94 if (mem->mm_node) {
86 spin_lock(&glob->lru_lock); 95 spin_lock(&rman->lock);
87 drm_mm_put_block(mem->mm_node); 96 drm_mm_put_block(mem->mm_node);
88 spin_unlock(&glob->lru_lock); 97 spin_unlock(&rman->lock);
89 mem->mm_node = NULL; 98 mem->mm_node = NULL;
90 } 99 }
91} 100}
@@ -93,49 +102,49 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
93static int ttm_bo_man_init(struct ttm_mem_type_manager *man, 102static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
94 unsigned long p_size) 103 unsigned long p_size)
95{ 104{
96 struct drm_mm *mm; 105 struct ttm_range_manager *rman;
97 int ret; 106 int ret;
98 107
99 mm = kzalloc(sizeof(*mm), GFP_KERNEL); 108 rman = kzalloc(sizeof(*rman), GFP_KERNEL);
100 if (!mm) 109 if (!rman)
101 return -ENOMEM; 110 return -ENOMEM;
102 111
103 ret = drm_mm_init(mm, 0, p_size); 112 ret = drm_mm_init(&rman->mm, 0, p_size);
104 if (ret) { 113 if (ret) {
105 kfree(mm); 114 kfree(rman);
106 return ret; 115 return ret;
107 } 116 }
108 117
109 man->priv = mm; 118 spin_lock_init(&rman->lock);
119 man->priv = rman;
110 return 0; 120 return 0;
111} 121}
112 122
113static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) 123static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
114{ 124{
115 struct ttm_bo_global *glob = man->bdev->glob; 125 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
116 struct drm_mm *mm = man->priv; 126 struct drm_mm *mm = &rman->mm;
117 int ret = 0;
118 127
119 spin_lock(&glob->lru_lock); 128 spin_lock(&rman->lock);
120 if (drm_mm_clean(mm)) { 129 if (drm_mm_clean(mm)) {
121 drm_mm_takedown(mm); 130 drm_mm_takedown(mm);
122 kfree(mm); 131 spin_unlock(&rman->lock);
132 kfree(rman);
123 man->priv = NULL; 133 man->priv = NULL;
124 } else 134 return 0;
125 ret = -EBUSY; 135 }
126 spin_unlock(&glob->lru_lock); 136 spin_unlock(&rman->lock);
127 return ret; 137 return -EBUSY;
128} 138}
129 139
130static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, 140static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
131 const char *prefix) 141 const char *prefix)
132{ 142{
133 struct ttm_bo_global *glob = man->bdev->glob; 143 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
134 struct drm_mm *mm = man->priv;
135 144
136 spin_lock(&glob->lru_lock); 145 spin_lock(&rman->lock);
137 drm_mm_debug_table(mm, prefix); 146 drm_mm_debug_table(&rman->mm, prefix);
138 spin_unlock(&glob->lru_lock); 147 spin_unlock(&rman->lock);
139} 148}
140 149
141const struct ttm_mem_type_manager_func ttm_bo_manager_func = { 150const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index a7bab87a548b..af789dc869b9 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -440,10 +440,8 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
440 return ret; 440 return ret;
441 441
442 ret = be->func->bind(be, bo_mem); 442 ret = be->func->bind(be, bo_mem);
443 if (ret) { 443 if (unlikely(ret != 0))
444 printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
445 return ret; 444 return ret;
446 }
447 445
448 ttm->state = tt_bound; 446 ttm->state = tt_bound;
449 447
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 9b5b4d9dd62c..3e038a394c51 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -235,9 +235,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
235 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) - 235 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
236 first_pfn + 1; 236 first_pfn + 1;
237 237
238 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) 238 vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages);
239 if (NULL == vsg->pages)
239 return -ENOMEM; 240 return -ENOMEM;
240 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
241 down_read(&current->mm->mmap_sem); 241 down_read(&current->mm->mmap_sem);
242 ret = get_user_pages(current, current->mm, 242 ret = get_user_pages(current, current->mm,
243 (unsigned long)xfer->mem_addr, 243 (unsigned long)xfer->mem_addr,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 51d9f9f1d7f2..76954e3528c1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -691,6 +691,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
691 691
692 fence_rep.error = ret; 692 fence_rep.error = ret;
693 fence_rep.fence_seq = (uint64_t) sequence; 693 fence_rep.fence_seq = (uint64_t) sequence;
694 fence_rep.pad64 = 0;
694 695
695 user_fence_rep = (struct drm_vmw_fence_rep __user *) 696 user_fence_rep = (struct drm_vmw_fence_rep __user *)
696 (unsigned long)arg->fence_rep; 697 (unsigned long)arg->fence_rep;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 87c6e6156d7d..cceeb42789b6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -720,6 +720,8 @@ static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
720 &vmw_vram_ne_placement, 720 &vmw_vram_ne_placement,
721 false, &vmw_dmabuf_bo_free); 721 false, &vmw_dmabuf_bo_free);
722 vmw_overlay_resume_all(dev_priv); 722 vmw_overlay_resume_all(dev_priv);
723 if (unlikely(ret != 0))
724 vfbs->buffer = NULL;
723 725
724 return ret; 726 return ret;
725} 727}
@@ -730,6 +732,9 @@ static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
730 struct vmw_framebuffer_surface *vfbs = 732 struct vmw_framebuffer_surface *vfbs =
731 vmw_framebuffer_to_vfbs(&vfb->base); 733 vmw_framebuffer_to_vfbs(&vfb->base);
732 734
735 if (unlikely(vfbs->buffer == NULL))
736 return 0;
737
733 bo = &vfbs->buffer->base; 738 bo = &vfbs->buffer->base;
734 ttm_bo_unref(&bo); 739 ttm_bo_unref(&bo);
735 vfbs->buffer = NULL; 740 vfbs->buffer = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index a01c47ddb5bc..29113c9b26a8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -557,7 +557,7 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
557 return -EINVAL; 557 return -EINVAL;
558 } 558 }
559 559
560 dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv)); 560 dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL);
561 561
562 if (!dev_priv->ldu_priv) 562 if (!dev_priv->ldu_priv)
563 return -ENOMEM; 563 return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index df2036ed18d5..f1a52f9e7298 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -585,7 +585,7 @@ int vmw_overlay_init(struct vmw_private *dev_priv)
585 return -ENOSYS; 585 return -ENOSYS;
586 } 586 }
587 587
588 overlay = kmalloc(GFP_KERNEL, sizeof(*overlay)); 588 overlay = kmalloc(sizeof(*overlay), GFP_KERNEL);
589 if (!overlay) 589 if (!overlay)
590 return -ENOMEM; 590 return -ENOMEM;
591 591
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 36e129f0023f..5408b1b7996f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -862,7 +862,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
862 &vmw_vram_sys_placement, true, 862 &vmw_vram_sys_placement, true,
863 &vmw_user_dmabuf_destroy); 863 &vmw_user_dmabuf_destroy);
864 if (unlikely(ret != 0)) 864 if (unlikely(ret != 0))
865 return ret; 865 goto out_no_dmabuf;
866 866
867 tmp = ttm_bo_reference(&vmw_user_bo->dma.base); 867 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
868 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, 868 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
@@ -870,19 +870,21 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
870 false, 870 false,
871 ttm_buffer_type, 871 ttm_buffer_type,
872 &vmw_user_dmabuf_release, NULL); 872 &vmw_user_dmabuf_release, NULL);
873 if (unlikely(ret != 0)) { 873 if (unlikely(ret != 0))
874 ttm_bo_unref(&tmp); 874 goto out_no_base_object;
875 } else { 875 else {
876 rep->handle = vmw_user_bo->base.hash.key; 876 rep->handle = vmw_user_bo->base.hash.key;
877 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset; 877 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
878 rep->cur_gmr_id = vmw_user_bo->base.hash.key; 878 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
879 rep->cur_gmr_offset = 0; 879 rep->cur_gmr_offset = 0;
880 } 880 }
881 ttm_bo_unref(&tmp);
882 881
882out_no_base_object:
883 ttm_bo_unref(&tmp);
884out_no_dmabuf:
883 ttm_read_unlock(&vmaster->lock); 885 ttm_read_unlock(&vmaster->lock);
884 886
885 return 0; 887 return ret;
886} 888}
887 889
888int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, 890int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
index 742c423567cf..0e1edd7311ff 100644
--- a/drivers/gpu/stub/Kconfig
+++ b/drivers/gpu/stub/Kconfig
@@ -3,6 +3,9 @@ config STUB_POULSBO
3 depends on PCI 3 depends on PCI
4 # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled 4 # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
5 # but for select to work, need to select ACPI_VIDEO's dependencies, ick 5 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
6 select VIDEO_OUTPUT_CONTROL if ACPI
7 select BACKLIGHT_CLASS_DEVICE if ACPI
8 select INPUT if ACPI
6 select ACPI_VIDEO if ACPI 9 select ACPI_VIDEO if ACPI
7 help 10 help
8 Choose this option if you have a system that has Intel GMA500 11 Choose this option if you have a system that has Intel GMA500