aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c26
-rw-r--r--drivers/gpu/drm/drm_fops.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c195
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c8
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c4
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c159
-rw-r--r--drivers/gpu/drm/i915/intel_display.c82
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c11
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c16
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c129
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c77
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h55
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c49
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.c71
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_calc.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c35
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c52
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c1
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c39
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c14
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c311
-rw-r--r--drivers/gpu/drm/radeon/r600d.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c382
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h22
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c5
-rw-r--r--drivers/gpu/drm/radeon/rs400.c2
-rw-r--r--drivers/gpu/drm/radeon/rs600.c4
-rw-r--r--drivers/gpu/drm/radeon/rv770.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c97
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c81
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c14
-rw-r--r--drivers/gpu/stub/Kconfig3
87 files changed, 1667 insertions, 799 deletions
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index dcbeb98f195..f7af91cb273 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
276 struct drm_crtc *tmp; 276 struct drm_crtc *tmp;
277 int crtc_mask = 1; 277 int crtc_mask = 1;
278 278
279 WARN(!crtc, "checking null crtc?"); 279 WARN(!crtc, "checking null crtc?\n");
280 280
281 dev = crtc->dev; 281 dev = crtc->dev;
282 282
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c1a26217a53..a245d17165a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -240,7 +240,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
240 .addr = DDC_ADDR, 240 .addr = DDC_ADDR,
241 .flags = I2C_M_RD, 241 .flags = I2C_M_RD,
242 .len = len, 242 .len = len,
243 .buf = buf + start, 243 .buf = buf,
244 } 244 }
245 }; 245 };
246 246
@@ -253,7 +253,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
253static u8 * 253static u8 *
254drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 254drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
255{ 255{
256 int i, j = 0; 256 int i, j = 0, valid_extensions = 0;
257 u8 *block, *new; 257 u8 *block, *new;
258 258
259 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) 259 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
@@ -280,14 +280,28 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
280 280
281 for (j = 1; j <= block[0x7e]; j++) { 281 for (j = 1; j <= block[0x7e]; j++) {
282 for (i = 0; i < 4; i++) { 282 for (i = 0; i < 4; i++) {
283 if (drm_do_probe_ddc_edid(adapter, block, j, 283 if (drm_do_probe_ddc_edid(adapter,
284 EDID_LENGTH)) 284 block + (valid_extensions + 1) * EDID_LENGTH,
285 j, EDID_LENGTH))
285 goto out; 286 goto out;
286 if (drm_edid_block_valid(block + j * EDID_LENGTH)) 287 if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
288 valid_extensions++;
287 break; 289 break;
290 }
288 } 291 }
289 if (i == 4) 292 if (i == 4)
290 goto carp; 293 dev_warn(connector->dev->dev,
294 "%s: Ignoring invalid EDID block %d.\n",
295 drm_get_connector_name(connector), j);
296 }
297
298 if (valid_extensions != block[0x7e]) {
299 block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
300 block[0x7e] = valid_extensions;
301 new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
302 if (!new)
303 goto out;
304 block = new;
291 } 305 }
292 306
293 return block; 307 return block;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index b744dad5c23..a39794bac04 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -37,7 +37,6 @@
37#include "drmP.h" 37#include "drmP.h"
38#include <linux/poll.h> 38#include <linux/poll.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/smp_lock.h>
41 40
42/* from BKL pushdown: note that nothing else serializes idr_find() */ 41/* from BKL pushdown: note that nothing else serializes idr_find() */
43DEFINE_MUTEX(drm_global_mutex); 42DEFINE_MUTEX(drm_global_mutex);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3467dd42076..f737960712e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -44,7 +44,7 @@ unsigned int i915_fbpercrtc = 0;
44module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); 44module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
45 45
46unsigned int i915_powersave = 1; 46unsigned int i915_powersave = 1;
47module_param_named(powersave, i915_powersave, int, 0400); 47module_param_named(powersave, i915_powersave, int, 0600);
48 48
49unsigned int i915_lvds_downclock = 0; 49unsigned int i915_lvds_downclock = 0;
50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
@@ -150,7 +150,8 @@ static const struct intel_device_info intel_ironlake_d_info = {
150 150
151static const struct intel_device_info intel_ironlake_m_info = { 151static const struct intel_device_info intel_ironlake_m_info = {
152 .gen = 5, .is_mobile = 1, 152 .gen = 5, .is_mobile = 1,
153 .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, 153 .need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1,
154 .has_fbc = 0, /* disabled due to buggy hardware */
154 .has_bsd_ring = 1, 155 .has_bsd_ring = 1,
155}; 156};
156 157
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2c2c19b6285..409826da309 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1045,6 +1045,8 @@ void i915_gem_clflush_object(struct drm_gem_object *obj);
1045int i915_gem_object_set_domain(struct drm_gem_object *obj, 1045int i915_gem_object_set_domain(struct drm_gem_object *obj,
1046 uint32_t read_domains, 1046 uint32_t read_domains,
1047 uint32_t write_domain); 1047 uint32_t write_domain);
1048int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
1049 bool interruptible);
1048int i915_gem_init_ringbuffer(struct drm_device *dev); 1050int i915_gem_init_ringbuffer(struct drm_device *dev);
1049void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1051void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1050int i915_gem_do_init(struct drm_device *dev, unsigned long start, 1052int i915_gem_do_init(struct drm_device *dev, unsigned long start,
@@ -1321,6 +1323,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
1321 1323
1322#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 1324#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1323#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1325#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1326#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1324 1327
1325#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1328#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1326 1329
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8eb8453208b..17b1cba3b5f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -547,6 +547,19 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
547 struct drm_i915_gem_object *obj_priv; 547 struct drm_i915_gem_object *obj_priv;
548 int ret = 0; 548 int ret = 0;
549 549
550 if (args->size == 0)
551 return 0;
552
553 if (!access_ok(VERIFY_WRITE,
554 (char __user *)(uintptr_t)args->data_ptr,
555 args->size))
556 return -EFAULT;
557
558 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
559 args->size);
560 if (ret)
561 return -EFAULT;
562
550 ret = i915_mutex_lock_interruptible(dev); 563 ret = i915_mutex_lock_interruptible(dev);
551 if (ret) 564 if (ret)
552 return ret; 565 return ret;
@@ -564,23 +577,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
564 goto out; 577 goto out;
565 } 578 }
566 579
567 if (args->size == 0)
568 goto out;
569
570 if (!access_ok(VERIFY_WRITE,
571 (char __user *)(uintptr_t)args->data_ptr,
572 args->size)) {
573 ret = -EFAULT;
574 goto out;
575 }
576
577 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
578 args->size);
579 if (ret) {
580 ret = -EFAULT;
581 goto out;
582 }
583
584 ret = i915_gem_object_get_pages_or_evict(obj); 580 ret = i915_gem_object_get_pages_or_evict(obj);
585 if (ret) 581 if (ret)
586 goto out; 582 goto out;
@@ -981,7 +977,20 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
981 struct drm_i915_gem_pwrite *args = data; 977 struct drm_i915_gem_pwrite *args = data;
982 struct drm_gem_object *obj; 978 struct drm_gem_object *obj;
983 struct drm_i915_gem_object *obj_priv; 979 struct drm_i915_gem_object *obj_priv;
984 int ret = 0; 980 int ret;
981
982 if (args->size == 0)
983 return 0;
984
985 if (!access_ok(VERIFY_READ,
986 (char __user *)(uintptr_t)args->data_ptr,
987 args->size))
988 return -EFAULT;
989
990 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
991 args->size);
992 if (ret)
993 return -EFAULT;
985 994
986 ret = i915_mutex_lock_interruptible(dev); 995 ret = i915_mutex_lock_interruptible(dev);
987 if (ret) 996 if (ret)
@@ -994,30 +1003,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
994 } 1003 }
995 obj_priv = to_intel_bo(obj); 1004 obj_priv = to_intel_bo(obj);
996 1005
997
998 /* Bounds check destination. */ 1006 /* Bounds check destination. */
999 if (args->offset > obj->size || args->size > obj->size - args->offset) { 1007 if (args->offset > obj->size || args->size > obj->size - args->offset) {
1000 ret = -EINVAL; 1008 ret = -EINVAL;
1001 goto out; 1009 goto out;
1002 } 1010 }
1003 1011
1004 if (args->size == 0)
1005 goto out;
1006
1007 if (!access_ok(VERIFY_READ,
1008 (char __user *)(uintptr_t)args->data_ptr,
1009 args->size)) {
1010 ret = -EFAULT;
1011 goto out;
1012 }
1013
1014 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
1015 args->size);
1016 if (ret) {
1017 ret = -EFAULT;
1018 goto out;
1019 }
1020
1021 /* We can only do the GTT pwrite on untiled buffers, as otherwise 1012 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1022 * it would end up going through the fenced access, and we'll get 1013 * it would end up going through the fenced access, and we'll get
1023 * different detiling behavior between reading and writing. 1014 * different detiling behavior between reading and writing.
@@ -2172,7 +2163,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2172static int i915_ring_idle(struct drm_device *dev, 2163static int i915_ring_idle(struct drm_device *dev,
2173 struct intel_ring_buffer *ring) 2164 struct intel_ring_buffer *ring)
2174{ 2165{
2175 if (list_empty(&ring->gpu_write_list)) 2166 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2176 return 0; 2167 return 0;
2177 2168
2178 i915_gem_flush_ring(dev, NULL, ring, 2169 i915_gem_flush_ring(dev, NULL, ring,
@@ -2190,9 +2181,7 @@ i915_gpu_idle(struct drm_device *dev)
2190 int ret; 2181 int ret;
2191 2182
2192 lists_empty = (list_empty(&dev_priv->mm.flushing_list) && 2183 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2193 list_empty(&dev_priv->render_ring.active_list) && 2184 list_empty(&dev_priv->mm.active_list));
2194 list_empty(&dev_priv->bsd_ring.active_list) &&
2195 list_empty(&dev_priv->blt_ring.active_list));
2196 if (lists_empty) 2185 if (lists_empty)
2197 return 0; 2186 return 0;
2198 2187
@@ -2909,6 +2898,20 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
2909 return 0; 2898 return 0;
2910} 2899}
2911 2900
2901int
2902i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
2903 bool interruptible)
2904{
2905 if (!obj->active)
2906 return 0;
2907
2908 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
2909 i915_gem_flush_ring(obj->base.dev, NULL, obj->ring,
2910 0, obj->base.write_domain);
2911
2912 return i915_gem_object_wait_rendering(&obj->base, interruptible);
2913}
2914
2912/** 2915/**
2913 * Moves a single object to the CPU read, and possibly write domain. 2916 * Moves a single object to the CPU read, and possibly write domain.
2914 * 2917 *
@@ -3108,7 +3111,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
3108 * write domain 3111 * write domain
3109 */ 3112 */
3110 if (obj->write_domain && 3113 if (obj->write_domain &&
3111 obj->write_domain != obj->pending_read_domains) { 3114 (obj->write_domain != obj->pending_read_domains ||
3115 obj_priv->ring != ring)) {
3112 flush_domains |= obj->write_domain; 3116 flush_domains |= obj->write_domain;
3113 invalidate_domains |= 3117 invalidate_domains |=
3114 obj->pending_read_domains & ~obj->write_domain; 3118 obj->pending_read_domains & ~obj->write_domain;
@@ -3497,6 +3501,52 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
3497 return 0; 3501 return 0;
3498} 3502}
3499 3503
3504static int
3505i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
3506 struct drm_file *file,
3507 struct intel_ring_buffer *ring,
3508 struct drm_gem_object **objects,
3509 int count)
3510{
3511 struct drm_i915_private *dev_priv = dev->dev_private;
3512 int ret, i;
3513
3514 /* Zero the global flush/invalidate flags. These
3515 * will be modified as new domains are computed
3516 * for each object
3517 */
3518 dev->invalidate_domains = 0;
3519 dev->flush_domains = 0;
3520 dev_priv->mm.flush_rings = 0;
3521 for (i = 0; i < count; i++)
3522 i915_gem_object_set_to_gpu_domain(objects[i], ring);
3523
3524 if (dev->invalidate_domains | dev->flush_domains) {
3525#if WATCH_EXEC
3526 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3527 __func__,
3528 dev->invalidate_domains,
3529 dev->flush_domains);
3530#endif
3531 i915_gem_flush(dev, file,
3532 dev->invalidate_domains,
3533 dev->flush_domains,
3534 dev_priv->mm.flush_rings);
3535 }
3536
3537 for (i = 0; i < count; i++) {
3538 struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
3539 /* XXX replace with semaphores */
3540 if (obj->ring && ring != obj->ring) {
3541 ret = i915_gem_object_wait_rendering(&obj->base, true);
3542 if (ret)
3543 return ret;
3544 }
3545 }
3546
3547 return 0;
3548}
3549
3500/* Throttle our rendering by waiting until the ring has completed our requests 3550/* Throttle our rendering by waiting until the ring has completed our requests
3501 * emitted over 20 msec ago. 3551 * emitted over 20 msec ago.
3502 * 3552 *
@@ -3757,33 +3807,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3757 goto err; 3807 goto err;
3758 } 3808 }
3759 3809
3760 /* Zero the global flush/invalidate flags. These 3810 ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
3761 * will be modified as new domains are computed 3811 object_list, args->buffer_count);
3762 * for each object 3812 if (ret)
3763 */ 3813 goto err;
3764 dev->invalidate_domains = 0;
3765 dev->flush_domains = 0;
3766 dev_priv->mm.flush_rings = 0;
3767
3768 for (i = 0; i < args->buffer_count; i++) {
3769 struct drm_gem_object *obj = object_list[i];
3770
3771 /* Compute new gpu domains and update invalidate/flush */
3772 i915_gem_object_set_to_gpu_domain(obj, ring);
3773 }
3774
3775 if (dev->invalidate_domains | dev->flush_domains) {
3776#if WATCH_EXEC
3777 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3778 __func__,
3779 dev->invalidate_domains,
3780 dev->flush_domains);
3781#endif
3782 i915_gem_flush(dev, file,
3783 dev->invalidate_domains,
3784 dev->flush_domains,
3785 dev_priv->mm.flush_rings);
3786 }
3787 3814
3788 for (i = 0; i < args->buffer_count; i++) { 3815 for (i = 0; i < args->buffer_count; i++) {
3789 struct drm_gem_object *obj = object_list[i]; 3816 struct drm_gem_object *obj = object_list[i];
@@ -4043,8 +4070,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4043 alignment = i915_gem_get_gtt_alignment(obj); 4070 alignment = i915_gem_get_gtt_alignment(obj);
4044 if (obj_priv->gtt_offset & (alignment - 1)) { 4071 if (obj_priv->gtt_offset & (alignment - 1)) {
4045 WARN(obj_priv->pin_count, 4072 WARN(obj_priv->pin_count,
4046 "bo is already pinned with incorrect alignment:" 4073 "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
4047 " offset=%x, req.alignment=%x\n",
4048 obj_priv->gtt_offset, alignment); 4074 obj_priv->gtt_offset, alignment);
4049 ret = i915_gem_object_unbind(obj); 4075 ret = i915_gem_object_unbind(obj);
4050 if (ret) 4076 if (ret)
@@ -4856,17 +4882,24 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4856 struct drm_file *file_priv) 4882 struct drm_file *file_priv)
4857{ 4883{
4858 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 4884 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4859 void *obj_addr; 4885 void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
4860 int ret; 4886 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
4861 char __user *user_data;
4862 4887
4863 user_data = (char __user *) (uintptr_t) args->data_ptr; 4888 DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
4864 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4865 4889
4866 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size); 4890 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4867 ret = copy_from_user(obj_addr, user_data, args->size); 4891 unsigned long unwritten;
4868 if (ret) 4892
4869 return -EFAULT; 4893 /* The physical object once assigned is fixed for the lifetime
4894 * of the obj, so we can safely drop the lock and continue
4895 * to access vaddr.
4896 */
4897 mutex_unlock(&dev->struct_mutex);
4898 unwritten = copy_from_user(vaddr, user_data, args->size);
4899 mutex_lock(&dev->struct_mutex);
4900 if (unwritten)
4901 return -EFAULT;
4902 }
4870 4903
4871 drm_agp_chipset_flush(dev); 4904 drm_agp_chipset_flush(dev);
4872 return 0; 4905 return 0;
@@ -4900,9 +4933,7 @@ i915_gpu_is_active(struct drm_device *dev)
4900 int lists_empty; 4933 int lists_empty;
4901 4934
4902 lists_empty = list_empty(&dev_priv->mm.flushing_list) && 4935 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4903 list_empty(&dev_priv->render_ring.active_list) && 4936 list_empty(&dev_priv->mm.active_list);
4904 list_empty(&dev_priv->bsd_ring.active_list) &&
4905 list_empty(&dev_priv->blt_ring.active_list);
4906 4937
4907 return !lists_empty; 4938 return !lists_empty;
4908} 4939}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 43a4013f53f..d8ae7d1d0cc 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -165,9 +165,7 @@ i915_gem_evict_everything(struct drm_device *dev)
165 165
166 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 166 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
167 list_empty(&dev_priv->mm.flushing_list) && 167 list_empty(&dev_priv->mm.flushing_list) &&
168 list_empty(&dev_priv->render_ring.active_list) && 168 list_empty(&dev_priv->mm.active_list));
169 list_empty(&dev_priv->bsd_ring.active_list) &&
170 list_empty(&dev_priv->blt_ring.active_list));
171 if (lists_empty) 169 if (lists_empty)
172 return -ENOSPC; 170 return -ENOSPC;
173 171
@@ -184,9 +182,7 @@ i915_gem_evict_everything(struct drm_device *dev)
184 182
185 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 183 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
186 list_empty(&dev_priv->mm.flushing_list) && 184 list_empty(&dev_priv->mm.flushing_list) &&
187 list_empty(&dev_priv->render_ring.active_list) && 185 list_empty(&dev_priv->mm.active_list));
188 list_empty(&dev_priv->bsd_ring.active_list) &&
189 list_empty(&dev_priv->blt_ring.active_list));
190 BUG_ON(!lists_empty); 186 BUG_ON(!lists_empty);
191 187
192 return 0; 188 return 0;
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 989c19d2d95..454c064f8ef 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev)
862 /* Clock gating state */ 862 /* Clock gating state */
863 intel_init_clock_gating(dev); 863 intel_init_clock_gating(dev);
864 864
865 if (HAS_PCH_SPLIT(dev)) 865 if (HAS_PCH_SPLIT(dev)) {
866 ironlake_enable_drps(dev); 866 ironlake_enable_drps(dev);
867 intel_init_emon(dev);
868 }
867 869
868 /* Cache mode state */ 870 /* Cache mode state */
869 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 871 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index c55c7704335..8df57431606 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -34,6 +34,25 @@
34#include "i915_drm.h" 34#include "i915_drm.h"
35#include "i915_drv.h" 35#include "i915_drv.h"
36 36
37/* Here's the desired hotplug mode */
38#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
39 ADPA_CRT_HOTPLUG_WARMUP_10MS | \
40 ADPA_CRT_HOTPLUG_SAMPLE_4S | \
41 ADPA_CRT_HOTPLUG_VOLTAGE_50 | \
42 ADPA_CRT_HOTPLUG_VOLREF_325MV | \
43 ADPA_CRT_HOTPLUG_ENABLE)
44
45struct intel_crt {
46 struct intel_encoder base;
47 bool force_hotplug_required;
48};
49
50static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
51{
52 return container_of(intel_attached_encoder(connector),
53 struct intel_crt, base);
54}
55
37static void intel_crt_dpms(struct drm_encoder *encoder, int mode) 56static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
38{ 57{
39 struct drm_device *dev = encoder->dev; 58 struct drm_device *dev = encoder->dev;
@@ -129,7 +148,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
129 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); 148 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
130 } 149 }
131 150
132 adpa = 0; 151 adpa = ADPA_HOTPLUG_BITS;
133 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 152 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
134 adpa |= ADPA_HSYNC_ACTIVE_HIGH; 153 adpa |= ADPA_HSYNC_ACTIVE_HIGH;
135 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 154 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -157,53 +176,44 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
157static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) 176static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
158{ 177{
159 struct drm_device *dev = connector->dev; 178 struct drm_device *dev = connector->dev;
179 struct intel_crt *crt = intel_attached_crt(connector);
160 struct drm_i915_private *dev_priv = dev->dev_private; 180 struct drm_i915_private *dev_priv = dev->dev_private;
161 u32 adpa, temp; 181 u32 adpa;
162 bool ret; 182 bool ret;
163 bool turn_off_dac = false;
164 183
165 temp = adpa = I915_READ(PCH_ADPA); 184 /* The first time through, trigger an explicit detection cycle */
185 if (crt->force_hotplug_required) {
186 bool turn_off_dac = HAS_PCH_SPLIT(dev);
187 u32 save_adpa;
166 188
167 if (HAS_PCH_SPLIT(dev)) 189 crt->force_hotplug_required = 0;
168 turn_off_dac = true; 190
169 191 save_adpa = adpa = I915_READ(PCH_ADPA);
170 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 192 DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
171 if (turn_off_dac) 193
172 adpa &= ~ADPA_DAC_ENABLE; 194 adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
173 195 if (turn_off_dac)
174 /* disable HPD first */ 196 adpa &= ~ADPA_DAC_ENABLE;
175 I915_WRITE(PCH_ADPA, adpa); 197
176 (void)I915_READ(PCH_ADPA); 198 I915_WRITE(PCH_ADPA, adpa);
177 199
178 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | 200 if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
179 ADPA_CRT_HOTPLUG_WARMUP_10MS | 201 1000))
180 ADPA_CRT_HOTPLUG_SAMPLE_4S | 202 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
181 ADPA_CRT_HOTPLUG_VOLTAGE_50 | /* default */ 203
182 ADPA_CRT_HOTPLUG_VOLREF_325MV | 204 if (turn_off_dac) {
183 ADPA_CRT_HOTPLUG_ENABLE | 205 I915_WRITE(PCH_ADPA, save_adpa);
184 ADPA_CRT_HOTPLUG_FORCE_TRIGGER); 206 POSTING_READ(PCH_ADPA);
185 207 }
186 DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
187 I915_WRITE(PCH_ADPA, adpa);
188
189 if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
190 1000))
191 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
192
193 if (turn_off_dac) {
194 /* Make sure hotplug is enabled */
195 I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE);
196 (void)I915_READ(PCH_ADPA);
197 } 208 }
198 209
199 /* Check the status to see if both blue and green are on now */ 210 /* Check the status to see if both blue and green are on now */
200 adpa = I915_READ(PCH_ADPA); 211 adpa = I915_READ(PCH_ADPA);
201 adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK; 212 if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
202 if ((adpa == ADPA_CRT_HOTPLUG_MONITOR_COLOR) ||
203 (adpa == ADPA_CRT_HOTPLUG_MONITOR_MONO))
204 ret = true; 213 ret = true;
205 else 214 else
206 ret = false; 215 ret = false;
216 DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret);
207 217
208 return ret; 218 return ret;
209} 219}
@@ -277,13 +287,12 @@ static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
277 return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1; 287 return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
278} 288}
279 289
280static bool intel_crt_detect_ddc(struct drm_encoder *encoder) 290static bool intel_crt_detect_ddc(struct intel_crt *crt)
281{ 291{
282 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 292 struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
283 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
284 293
285 /* CRT should always be at 0, but check anyway */ 294 /* CRT should always be at 0, but check anyway */
286 if (intel_encoder->type != INTEL_OUTPUT_ANALOG) 295 if (crt->base.type != INTEL_OUTPUT_ANALOG)
287 return false; 296 return false;
288 297
289 if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) { 298 if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) {
@@ -291,7 +300,7 @@ static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
291 return true; 300 return true;
292 } 301 }
293 302
294 if (intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin)) { 303 if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
295 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); 304 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
296 return true; 305 return true;
297 } 306 }
@@ -300,9 +309,9 @@ static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
300} 309}
301 310
302static enum drm_connector_status 311static enum drm_connector_status
303intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder) 312intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt)
304{ 313{
305 struct drm_encoder *encoder = &intel_encoder->base; 314 struct drm_encoder *encoder = &crt->base.base;
306 struct drm_device *dev = encoder->dev; 315 struct drm_device *dev = encoder->dev;
307 struct drm_i915_private *dev_priv = dev->dev_private; 316 struct drm_i915_private *dev_priv = dev->dev_private;
308 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 317 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -434,7 +443,7 @@ static enum drm_connector_status
434intel_crt_detect(struct drm_connector *connector, bool force) 443intel_crt_detect(struct drm_connector *connector, bool force)
435{ 444{
436 struct drm_device *dev = connector->dev; 445 struct drm_device *dev = connector->dev;
437 struct intel_encoder *encoder = intel_attached_encoder(connector); 446 struct intel_crt *crt = intel_attached_crt(connector);
438 struct drm_crtc *crtc; 447 struct drm_crtc *crtc;
439 int dpms_mode; 448 int dpms_mode;
440 enum drm_connector_status status; 449 enum drm_connector_status status;
@@ -443,28 +452,31 @@ intel_crt_detect(struct drm_connector *connector, bool force)
443 if (intel_crt_detect_hotplug(connector)) { 452 if (intel_crt_detect_hotplug(connector)) {
444 DRM_DEBUG_KMS("CRT detected via hotplug\n"); 453 DRM_DEBUG_KMS("CRT detected via hotplug\n");
445 return connector_status_connected; 454 return connector_status_connected;
446 } else 455 } else {
456 DRM_DEBUG_KMS("CRT not detected via hotplug\n");
447 return connector_status_disconnected; 457 return connector_status_disconnected;
458 }
448 } 459 }
449 460
450 if (intel_crt_detect_ddc(&encoder->base)) 461 if (intel_crt_detect_ddc(crt))
451 return connector_status_connected; 462 return connector_status_connected;
452 463
453 if (!force) 464 if (!force)
454 return connector->status; 465 return connector->status;
455 466
456 /* for pre-945g platforms use load detect */ 467 /* for pre-945g platforms use load detect */
457 if (encoder->base.crtc && encoder->base.crtc->enabled) { 468 crtc = crt->base.base.crtc;
458 status = intel_crt_load_detect(encoder->base.crtc, encoder); 469 if (crtc && crtc->enabled) {
470 status = intel_crt_load_detect(crtc, crt);
459 } else { 471 } else {
460 crtc = intel_get_load_detect_pipe(encoder, connector, 472 crtc = intel_get_load_detect_pipe(&crt->base, connector,
461 NULL, &dpms_mode); 473 NULL, &dpms_mode);
462 if (crtc) { 474 if (crtc) {
463 if (intel_crt_detect_ddc(&encoder->base)) 475 if (intel_crt_detect_ddc(crt))
464 status = connector_status_connected; 476 status = connector_status_connected;
465 else 477 else
466 status = intel_crt_load_detect(crtc, encoder); 478 status = intel_crt_load_detect(crtc, crt);
467 intel_release_load_detect_pipe(encoder, 479 intel_release_load_detect_pipe(&crt->base,
468 connector, dpms_mode); 480 connector, dpms_mode);
469 } else 481 } else
470 status = connector_status_unknown; 482 status = connector_status_unknown;
@@ -536,17 +548,17 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
536void intel_crt_init(struct drm_device *dev) 548void intel_crt_init(struct drm_device *dev)
537{ 549{
538 struct drm_connector *connector; 550 struct drm_connector *connector;
539 struct intel_encoder *intel_encoder; 551 struct intel_crt *crt;
540 struct intel_connector *intel_connector; 552 struct intel_connector *intel_connector;
541 struct drm_i915_private *dev_priv = dev->dev_private; 553 struct drm_i915_private *dev_priv = dev->dev_private;
542 554
543 intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL); 555 crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
544 if (!intel_encoder) 556 if (!crt)
545 return; 557 return;
546 558
547 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 559 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
548 if (!intel_connector) { 560 if (!intel_connector) {
549 kfree(intel_encoder); 561 kfree(crt);
550 return; 562 return;
551 } 563 }
552 564
@@ -554,20 +566,20 @@ void intel_crt_init(struct drm_device *dev)
554 drm_connector_init(dev, &intel_connector->base, 566 drm_connector_init(dev, &intel_connector->base,
555 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 567 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
556 568
557 drm_encoder_init(dev, &intel_encoder->base, &intel_crt_enc_funcs, 569 drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
558 DRM_MODE_ENCODER_DAC); 570 DRM_MODE_ENCODER_DAC);
559 571
560 intel_connector_attach_encoder(intel_connector, intel_encoder); 572 intel_connector_attach_encoder(intel_connector, &crt->base);
561 573
562 intel_encoder->type = INTEL_OUTPUT_ANALOG; 574 crt->base.type = INTEL_OUTPUT_ANALOG;
563 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 575 crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
564 (1 << INTEL_ANALOG_CLONE_BIT) | 576 1 << INTEL_ANALOG_CLONE_BIT |
565 (1 << INTEL_SDVO_LVDS_CLONE_BIT); 577 1 << INTEL_SDVO_LVDS_CLONE_BIT);
566 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 578 crt->base.crtc_mask = (1 << 0) | (1 << 1);
567 connector->interlace_allowed = 1; 579 connector->interlace_allowed = 1;
568 connector->doublescan_allowed = 0; 580 connector->doublescan_allowed = 0;
569 581
570 drm_encoder_helper_add(&intel_encoder->base, &intel_crt_helper_funcs); 582 drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);
571 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 583 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
572 584
573 drm_sysfs_connector_add(connector); 585 drm_sysfs_connector_add(connector);
@@ -577,5 +589,22 @@ void intel_crt_init(struct drm_device *dev)
577 else 589 else
578 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 590 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
579 591
592 /*
593 * Configure the automatic hotplug detection stuff
594 */
595 crt->force_hotplug_required = 0;
596 if (HAS_PCH_SPLIT(dev)) {
597 u32 adpa;
598
599 adpa = I915_READ(PCH_ADPA);
600 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
601 adpa |= ADPA_HOTPLUG_BITS;
602 I915_WRITE(PCH_ADPA, adpa);
603 POSTING_READ(PCH_ADPA);
604
605 DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
606 crt->force_hotplug_required = 1;
607 }
608
580 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; 609 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
581} 610}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 990f065374b..bee24b1a58e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1611,6 +1611,18 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1611 1611
1612 wait_event(dev_priv->pending_flip_queue, 1612 wait_event(dev_priv->pending_flip_queue,
1613 atomic_read(&obj_priv->pending_flip) == 0); 1613 atomic_read(&obj_priv->pending_flip) == 0);
1614
1615 /* Big Hammer, we also need to ensure that any pending
1616 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
1617 * current scanout is retired before unpinning the old
1618 * framebuffer.
1619 */
1620 ret = i915_gem_object_flush_gpu(obj_priv, false);
1621 if (ret) {
1622 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
1623 mutex_unlock(&dev->struct_mutex);
1624 return ret;
1625 }
1614 } 1626 }
1615 1627
1616 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, 1628 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -1681,6 +1693,37 @@ static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
1681 udelay(500); 1693 udelay(500);
1682} 1694}
1683 1695
1696static void intel_fdi_normal_train(struct drm_crtc *crtc)
1697{
1698 struct drm_device *dev = crtc->dev;
1699 struct drm_i915_private *dev_priv = dev->dev_private;
1700 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1701 int pipe = intel_crtc->pipe;
1702 u32 reg, temp;
1703
1704 /* enable normal train */
1705 reg = FDI_TX_CTL(pipe);
1706 temp = I915_READ(reg);
1707 temp &= ~FDI_LINK_TRAIN_NONE;
1708 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
1709 I915_WRITE(reg, temp);
1710
1711 reg = FDI_RX_CTL(pipe);
1712 temp = I915_READ(reg);
1713 if (HAS_PCH_CPT(dev)) {
1714 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1715 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
1716 } else {
1717 temp &= ~FDI_LINK_TRAIN_NONE;
1718 temp |= FDI_LINK_TRAIN_NONE;
1719 }
1720 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
1721
1722 /* wait one idle pattern time */
1723 POSTING_READ(reg);
1724 udelay(1000);
1725}
1726
1684/* The FDI link training functions for ILK/Ibexpeak. */ 1727/* The FDI link training functions for ILK/Ibexpeak. */
1685static void ironlake_fdi_link_train(struct drm_crtc *crtc) 1728static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1686{ 1729{
@@ -1767,27 +1810,6 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1767 1810
1768 DRM_DEBUG_KMS("FDI train done\n"); 1811 DRM_DEBUG_KMS("FDI train done\n");
1769 1812
1770 /* enable normal train */
1771 reg = FDI_TX_CTL(pipe);
1772 temp = I915_READ(reg);
1773 temp &= ~FDI_LINK_TRAIN_NONE;
1774 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
1775 I915_WRITE(reg, temp);
1776
1777 reg = FDI_RX_CTL(pipe);
1778 temp = I915_READ(reg);
1779 if (HAS_PCH_CPT(dev)) {
1780 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1781 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
1782 } else {
1783 temp &= ~FDI_LINK_TRAIN_NONE;
1784 temp |= FDI_LINK_TRAIN_NONE;
1785 }
1786 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
1787
1788 /* wait one idle pattern time */
1789 POSTING_READ(reg);
1790 udelay(1000);
1791} 1813}
1792 1814
1793static const int const snb_b_fdi_train_param [] = { 1815static const int const snb_b_fdi_train_param [] = {
@@ -2090,6 +2112,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2090 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); 2112 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2091 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); 2113 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
2092 2114
2115 intel_fdi_normal_train(crtc);
2116
2093 /* For PCH DP, enable TRANS_DP_CTL */ 2117 /* For PCH DP, enable TRANS_DP_CTL */
2094 if (HAS_PCH_CPT(dev) && 2118 if (HAS_PCH_CPT(dev) &&
2095 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 2119 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
@@ -2200,9 +2224,10 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
2200 udelay(100); 2224 udelay(100);
2201 2225
2202 /* Ironlake workaround, disable clock pointer after downing FDI */ 2226 /* Ironlake workaround, disable clock pointer after downing FDI */
2203 I915_WRITE(FDI_RX_CHICKEN(pipe), 2227 if (HAS_PCH_IBX(dev))
2204 I915_READ(FDI_RX_CHICKEN(pipe) & 2228 I915_WRITE(FDI_RX_CHICKEN(pipe),
2205 ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); 2229 I915_READ(FDI_RX_CHICKEN(pipe) &
2230 ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
2206 2231
2207 /* still set train pattern 1 */ 2232 /* still set train pattern 1 */
2208 reg = FDI_TX_CTL(pipe); 2233 reg = FDI_TX_CTL(pipe);
@@ -5581,20 +5606,19 @@ void ironlake_enable_drps(struct drm_device *dev)
5581 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 5606 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
5582 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 5607 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
5583 MEMMODE_FSTART_SHIFT; 5608 MEMMODE_FSTART_SHIFT;
5584 fstart = fmax;
5585 5609
5586 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> 5610 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
5587 PXVFREQ_PX_SHIFT; 5611 PXVFREQ_PX_SHIFT;
5588 5612
5589 dev_priv->fmax = fstart; /* IPS callback will increase this */ 5613 dev_priv->fmax = fmax; /* IPS callback will increase this */
5590 dev_priv->fstart = fstart; 5614 dev_priv->fstart = fstart;
5591 5615
5592 dev_priv->max_delay = fmax; 5616 dev_priv->max_delay = fstart;
5593 dev_priv->min_delay = fmin; 5617 dev_priv->min_delay = fmin;
5594 dev_priv->cur_delay = fstart; 5618 dev_priv->cur_delay = fstart;
5595 5619
5596 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, 5620 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
5597 fstart); 5621 fmax, fmin, fstart);
5598 5622
5599 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 5623 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
5600 5624
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 891f4f1d63b..c8e00555331 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1517,7 +1517,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
1517 status = connector_status_connected; 1517 status = connector_status_connected;
1518 } 1518 }
1519 1519
1520 return bit; 1520 return status;
1521} 1521}
1522 1522
1523/** 1523/**
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9af9f86a876..21551fe7454 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -296,6 +296,7 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
296extern void intel_init_clock_gating(struct drm_device *dev); 296extern void intel_init_clock_gating(struct drm_device *dev);
297extern void ironlake_enable_drps(struct drm_device *dev); 297extern void ironlake_enable_drps(struct drm_device *dev);
298extern void ironlake_disable_drps(struct drm_device *dev); 298extern void ironlake_disable_drps(struct drm_device *dev);
299extern void intel_init_emon(struct drm_device *dev);
299 300
300extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, 301extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
301 struct drm_gem_object *obj, 302 struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 2be4f728ed0..3dba086e7ee 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -160,7 +160,7 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
160 }; 160 };
161 struct intel_gpio *gpio; 161 struct intel_gpio *gpio;
162 162
163 if (pin < 1 || pin > 7) 163 if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
164 return NULL; 164 return NULL;
165 165
166 gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL); 166 gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
@@ -172,7 +172,8 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
172 gpio->reg += PCH_GPIOA - GPIOA; 172 gpio->reg += PCH_GPIOA - GPIOA;
173 gpio->dev_priv = dev_priv; 173 gpio->dev_priv = dev_priv;
174 174
175 snprintf(gpio->adapter.name, I2C_NAME_SIZE, "GPIO%c", "?BACDEF?"[pin]); 175 snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
176 "i915 GPIO%c", "?BACDE?F"[pin]);
176 gpio->adapter.owner = THIS_MODULE; 177 gpio->adapter.owner = THIS_MODULE;
177 gpio->adapter.algo_data = &gpio->algo; 178 gpio->adapter.algo_data = &gpio->algo;
178 gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev; 179 gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
@@ -349,7 +350,7 @@ int intel_setup_gmbus(struct drm_device *dev)
349 "panel", 350 "panel",
350 "dpc", 351 "dpc",
351 "dpb", 352 "dpb",
352 "reserved" 353 "reserved",
353 "dpd", 354 "dpd",
354 }; 355 };
355 struct drm_i915_private *dev_priv = dev->dev_private; 356 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -366,8 +367,8 @@ int intel_setup_gmbus(struct drm_device *dev)
366 bus->adapter.owner = THIS_MODULE; 367 bus->adapter.owner = THIS_MODULE;
367 bus->adapter.class = I2C_CLASS_DDC; 368 bus->adapter.class = I2C_CLASS_DDC;
368 snprintf(bus->adapter.name, 369 snprintf(bus->adapter.name,
369 I2C_NAME_SIZE, 370 sizeof(bus->adapter.name),
370 "gmbus %s", 371 "i915 gmbus %s",
371 names[i]); 372 names[i]);
372 373
373 bus->adapter.dev.parent = &dev->pdev->dev; 374 bus->adapter.dev.parent = &dev->pdev->dev;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f1a649990ea..4324a326f98 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -481,11 +481,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
481 struct drm_device *dev = connector->dev; 481 struct drm_device *dev = connector->dev;
482 struct drm_display_mode *mode; 482 struct drm_display_mode *mode;
483 483
484 if (intel_lvds->edid) { 484 if (intel_lvds->edid)
485 drm_mode_connector_update_edid_property(connector,
486 intel_lvds->edid);
487 return drm_add_edid_modes(connector, intel_lvds->edid); 485 return drm_add_edid_modes(connector, intel_lvds->edid);
488 }
489 486
490 mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); 487 mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
491 if (mode == 0) 488 if (mode == 0)
@@ -939,7 +936,16 @@ void intel_lvds_init(struct drm_device *dev)
939 */ 936 */
940 intel_lvds->edid = drm_get_edid(connector, 937 intel_lvds->edid = drm_get_edid(connector,
941 &dev_priv->gmbus[pin].adapter); 938 &dev_priv->gmbus[pin].adapter);
942 939 if (intel_lvds->edid) {
940 if (drm_add_edid_modes(connector,
941 intel_lvds->edid)) {
942 drm_mode_connector_update_edid_property(connector,
943 intel_lvds->edid);
944 } else {
945 kfree(intel_lvds->edid);
946 intel_lvds->edid = NULL;
947 }
948 }
943 if (!intel_lvds->edid) { 949 if (!intel_lvds->edid) {
944 /* Didn't get an EDID, so 950 /* Didn't get an EDID, so
945 * Set wide sync ranges so we get all modes 951 * Set wide sync ranges so we get all modes
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 917c7dc3cd6..9b0d9a867ae 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -512,6 +512,6 @@ int intel_opregion_setup(struct drm_device *dev)
512 return 0; 512 return 0;
513 513
514err_out: 514err_out:
515 iounmap(opregion->header); 515 iounmap(base);
516 return err; 516 return err;
517} 517}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index afb96d25219..02ff0a481f4 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -946,7 +946,9 @@ static int check_overlay_src(struct drm_device *dev,
946{ 946{
947 int uv_hscale = uv_hsubsampling(rec->flags); 947 int uv_hscale = uv_hsubsampling(rec->flags);
948 int uv_vscale = uv_vsubsampling(rec->flags); 948 int uv_vscale = uv_vsubsampling(rec->flags);
949 u32 stride_mask, depth, tmp; 949 u32 stride_mask;
950 int depth;
951 u32 tmp;
950 952
951 /* check src dimensions */ 953 /* check src dimensions */
952 if (IS_845G(dev) || IS_I830(dev)) { 954 if (IS_845G(dev) || IS_I830(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 09f2dc353ae..b83306f9244 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -177,7 +177,7 @@ static int init_ring_common(struct drm_device *dev,
177 177
178 I915_WRITE_CTL(ring, 178 I915_WRITE_CTL(ring,
179 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) 179 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
180 | RING_NO_REPORT | RING_VALID); 180 | RING_REPORT_64K | RING_VALID);
181 181
182 head = I915_READ_HEAD(ring) & HEAD_ADDR; 182 head = I915_READ_HEAD(ring) & HEAD_ADDR;
183 /* If the head is still not zero, the ring is dead */ 183 /* If the head is still not zero, the ring is dead */
@@ -654,6 +654,10 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
654 i915_gem_object_unpin(ring->gem_object); 654 i915_gem_object_unpin(ring->gem_object);
655 drm_gem_object_unreference(ring->gem_object); 655 drm_gem_object_unreference(ring->gem_object);
656 ring->gem_object = NULL; 656 ring->gem_object = NULL;
657
658 if (ring->cleanup)
659 ring->cleanup(ring);
660
657 cleanup_status_page(dev, ring); 661 cleanup_status_page(dev, ring);
658} 662}
659 663
@@ -688,6 +692,17 @@ int intel_wait_ring_buffer(struct drm_device *dev,
688{ 692{
689 unsigned long end; 693 unsigned long end;
690 drm_i915_private_t *dev_priv = dev->dev_private; 694 drm_i915_private_t *dev_priv = dev->dev_private;
695 u32 head;
696
697 head = intel_read_status_page(ring, 4);
698 if (head) {
699 ring->head = head & HEAD_ADDR;
700 ring->space = ring->head - (ring->tail + 8);
701 if (ring->space < 0)
702 ring->space += ring->size;
703 if (ring->space >= n)
704 return 0;
705 }
691 706
692 trace_i915_ring_wait_begin (dev); 707 trace_i915_ring_wait_begin (dev);
693 end = jiffies + 3 * HZ; 708 end = jiffies + 3 * HZ;
@@ -854,19 +869,125 @@ blt_ring_put_user_irq(struct drm_device *dev,
854 /* do nothing */ 869 /* do nothing */
855} 870}
856 871
872
873/* Workaround for some stepping of SNB,
874 * each time when BLT engine ring tail moved,
875 * the first command in the ring to be parsed
876 * should be MI_BATCH_BUFFER_START
877 */
878#define NEED_BLT_WORKAROUND(dev) \
879 (IS_GEN6(dev) && (dev->pdev->revision < 8))
880
881static inline struct drm_i915_gem_object *
882to_blt_workaround(struct intel_ring_buffer *ring)
883{
884 return ring->private;
885}
886
887static int blt_ring_init(struct drm_device *dev,
888 struct intel_ring_buffer *ring)
889{
890 if (NEED_BLT_WORKAROUND(dev)) {
891 struct drm_i915_gem_object *obj;
892 u32 __iomem *ptr;
893 int ret;
894
895 obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
896 if (obj == NULL)
897 return -ENOMEM;
898
899 ret = i915_gem_object_pin(&obj->base, 4096);
900 if (ret) {
901 drm_gem_object_unreference(&obj->base);
902 return ret;
903 }
904
905 ptr = kmap(obj->pages[0]);
906 iowrite32(MI_BATCH_BUFFER_END, ptr);
907 iowrite32(MI_NOOP, ptr+1);
908 kunmap(obj->pages[0]);
909
910 ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
911 if (ret) {
912 i915_gem_object_unpin(&obj->base);
913 drm_gem_object_unreference(&obj->base);
914 return ret;
915 }
916
917 ring->private = obj;
918 }
919
920 return init_ring_common(dev, ring);
921}
922
923static void blt_ring_begin(struct drm_device *dev,
924 struct intel_ring_buffer *ring,
925 int num_dwords)
926{
927 if (ring->private) {
928 intel_ring_begin(dev, ring, num_dwords+2);
929 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
930 intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
931 } else
932 intel_ring_begin(dev, ring, 4);
933}
934
935static void blt_ring_flush(struct drm_device *dev,
936 struct intel_ring_buffer *ring,
937 u32 invalidate_domains,
938 u32 flush_domains)
939{
940 blt_ring_begin(dev, ring, 4);
941 intel_ring_emit(dev, ring, MI_FLUSH_DW);
942 intel_ring_emit(dev, ring, 0);
943 intel_ring_emit(dev, ring, 0);
944 intel_ring_emit(dev, ring, 0);
945 intel_ring_advance(dev, ring);
946}
947
948static u32
949blt_ring_add_request(struct drm_device *dev,
950 struct intel_ring_buffer *ring,
951 u32 flush_domains)
952{
953 u32 seqno = i915_gem_get_seqno(dev);
954
955 blt_ring_begin(dev, ring, 4);
956 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
957 intel_ring_emit(dev, ring,
958 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
959 intel_ring_emit(dev, ring, seqno);
960 intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
961 intel_ring_advance(dev, ring);
962
963 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
964 return seqno;
965}
966
967static void blt_ring_cleanup(struct intel_ring_buffer *ring)
968{
969 if (!ring->private)
970 return;
971
972 i915_gem_object_unpin(ring->private);
973 drm_gem_object_unreference(ring->private);
974 ring->private = NULL;
975}
976
857static const struct intel_ring_buffer gen6_blt_ring = { 977static const struct intel_ring_buffer gen6_blt_ring = {
858 .name = "blt ring", 978 .name = "blt ring",
859 .id = RING_BLT, 979 .id = RING_BLT,
860 .mmio_base = BLT_RING_BASE, 980 .mmio_base = BLT_RING_BASE,
861 .size = 32 * PAGE_SIZE, 981 .size = 32 * PAGE_SIZE,
862 .init = init_ring_common, 982 .init = blt_ring_init,
863 .write_tail = ring_write_tail, 983 .write_tail = ring_write_tail,
864 .flush = gen6_ring_flush, 984 .flush = blt_ring_flush,
865 .add_request = ring_add_request, 985 .add_request = blt_ring_add_request,
866 .get_seqno = ring_status_page_get_seqno, 986 .get_seqno = ring_status_page_get_seqno,
867 .user_irq_get = blt_ring_get_user_irq, 987 .user_irq_get = blt_ring_get_user_irq,
868 .user_irq_put = blt_ring_put_user_irq, 988 .user_irq_put = blt_ring_put_user_irq,
869 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, 989 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
990 .cleanup = blt_ring_cleanup,
870}; 991};
871 992
872int intel_init_render_ring_buffer(struct drm_device *dev) 993int intel_init_render_ring_buffer(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a05aff0e576..3126c268198 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -63,6 +63,7 @@ struct intel_ring_buffer {
63 struct drm_i915_gem_execbuffer2 *exec, 63 struct drm_i915_gem_execbuffer2 *exec,
64 struct drm_clip_rect *cliprects, 64 struct drm_clip_rect *cliprects,
65 uint64_t exec_offset); 65 uint64_t exec_offset);
66 void (*cleanup)(struct intel_ring_buffer *ring);
66 67
67 /** 68 /**
68 * List of objects currently involved in rendering from the 69 * List of objects currently involved in rendering from the
@@ -98,6 +99,8 @@ struct intel_ring_buffer {
98 99
99 wait_queue_head_t irq_queue; 100 wait_queue_head_t irq_queue;
100 drm_local_map_t map; 101 drm_local_map_t map;
102
103 void *private;
101}; 104};
102 105
103static inline u32 106static inline u32
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 406228f4a2a..b14c8111057 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -31,6 +31,7 @@
31 */ 31 */
32 32
33#include <linux/backlight.h> 33#include <linux/backlight.h>
34#include <linux/acpi.h>
34 35
35#include "drmP.h" 36#include "drmP.h"
36#include "nouveau_drv.h" 37#include "nouveau_drv.h"
@@ -136,6 +137,14 @@ int nouveau_backlight_init(struct drm_device *dev)
136{ 137{
137 struct drm_nouveau_private *dev_priv = dev->dev_private; 138 struct drm_nouveau_private *dev_priv = dev->dev_private;
138 139
140#ifdef CONFIG_ACPI
141 if (acpi_video_backlight_support()) {
142 NV_INFO(dev, "ACPI backlight interface available, "
143 "not registering our own\n");
144 return 0;
145 }
146#endif
147
139 switch (dev_priv->card_type) { 148 switch (dev_priv->card_type) {
140 case NV_40: 149 case NV_40:
141 return nouveau_nv40_backlight_init(dev); 150 return nouveau_nv40_backlight_init(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 5f21030a293..b2293576f27 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6829,7 +6829,7 @@ nouveau_bios_posted(struct drm_device *dev)
6829 struct drm_nouveau_private *dev_priv = dev->dev_private; 6829 struct drm_nouveau_private *dev_priv = dev->dev_private;
6830 unsigned htotal; 6830 unsigned htotal;
6831 6831
6832 if (dev_priv->chipset >= NV_50) { 6832 if (dev_priv->card_type >= NV_50) {
6833 if (NVReadVgaCrtc(dev, 0, 0x00) == 0 && 6833 if (NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
6834 NVReadVgaCrtc(dev, 0, 0x1a) == 0) 6834 NVReadVgaCrtc(dev, 0, 0x1a) == 0)
6835 return false; 6835 return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 80353e2b840..c41e1c200ef 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -143,8 +143,10 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
143 nvbo->no_vm = no_vm; 143 nvbo->no_vm = no_vm;
144 nvbo->tile_mode = tile_mode; 144 nvbo->tile_mode = tile_mode;
145 nvbo->tile_flags = tile_flags; 145 nvbo->tile_flags = tile_flags;
146 nvbo->bo.bdev = &dev_priv->ttm.bdev;
146 147
147 nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size); 148 nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo),
149 &align, &size);
148 align >>= PAGE_SHIFT; 150 align >>= PAGE_SHIFT;
149 151
150 nouveau_bo_placement_set(nvbo, flags, 0); 152 nouveau_bo_placement_set(nvbo, flags, 0);
@@ -176,6 +178,31 @@ set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
176 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; 178 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
177} 179}
178 180
181static void
182set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
183{
184 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
185
186 if (dev_priv->card_type == NV_10 &&
187 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) {
188 /*
189 * Make sure that the color and depth buffers are handled
190 * by independent memory controller units. Up to a 9x
191 * speed up when alpha-blending and depth-test are enabled
192 * at the same time.
193 */
194 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
195
196 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
197 nvbo->placement.fpfn = vram_pages / 2;
198 nvbo->placement.lpfn = ~0;
199 } else {
200 nvbo->placement.fpfn = 0;
201 nvbo->placement.lpfn = vram_pages / 2;
202 }
203 }
204}
205
179void 206void
180nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) 207nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
181{ 208{
@@ -190,6 +217,8 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
190 pl->busy_placement = nvbo->busy_placements; 217 pl->busy_placement = nvbo->busy_placements;
191 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, 218 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
192 type | busy, flags); 219 type | busy, flags);
220
221 set_placement_range(nvbo, type);
193} 222}
194 223
195int 224int
@@ -525,7 +554,8 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
525 stride = 16 * 4; 554 stride = 16 * 4;
526 height = amount / stride; 555 height = amount / stride;
527 556
528 if (new_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) { 557 if (new_mem->mem_type == TTM_PL_VRAM &&
558 nouveau_bo_tile_layout(nvbo)) {
529 ret = RING_SPACE(chan, 8); 559 ret = RING_SPACE(chan, 8);
530 if (ret) 560 if (ret)
531 return ret; 561 return ret;
@@ -546,7 +576,8 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
546 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); 576 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
547 OUT_RING (chan, 1); 577 OUT_RING (chan, 1);
548 } 578 }
549 if (old_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) { 579 if (old_mem->mem_type == TTM_PL_VRAM &&
580 nouveau_bo_tile_layout(nvbo)) {
550 ret = RING_SPACE(chan, 8); 581 ret = RING_SPACE(chan, 8);
551 if (ret) 582 if (ret)
552 return ret; 583 return ret;
@@ -753,7 +784,8 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
753 if (dev_priv->card_type == NV_50) { 784 if (dev_priv->card_type == NV_50) {
754 ret = nv50_mem_vm_bind_linear(dev, 785 ret = nv50_mem_vm_bind_linear(dev,
755 offset + dev_priv->vm_vram_base, 786 offset + dev_priv->vm_vram_base,
756 new_mem->size, nvbo->tile_flags, 787 new_mem->size,
788 nouveau_bo_tile_layout(nvbo),
757 offset); 789 offset);
758 if (ret) 790 if (ret)
759 return ret; 791 return ret;
@@ -894,7 +926,8 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
894 * nothing to do here. 926 * nothing to do here.
895 */ 927 */
896 if (bo->mem.mem_type != TTM_PL_VRAM) { 928 if (bo->mem.mem_type != TTM_PL_VRAM) {
897 if (dev_priv->card_type < NV_50 || !nvbo->tile_flags) 929 if (dev_priv->card_type < NV_50 ||
930 !nouveau_bo_tile_layout(nvbo))
898 return 0; 931 return 0;
899 } 932 }
900 933
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 0871495096f..52c356e9a3d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -281,7 +281,7 @@ detect_analog:
281 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); 281 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
282 if (!nv_encoder && !nouveau_tv_disable) 282 if (!nv_encoder && !nouveau_tv_disable)
283 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); 283 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
284 if (nv_encoder) { 284 if (nv_encoder && force) {
285 struct drm_encoder *encoder = to_drm_encoder(nv_encoder); 285 struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
286 struct drm_encoder_helper_funcs *helper = 286 struct drm_encoder_helper_funcs *helper =
287 encoder->helper_private; 287 encoder->helper_private;
@@ -641,11 +641,28 @@ nouveau_connector_get_modes(struct drm_connector *connector)
641 return ret; 641 return ret;
642} 642}
643 643
644static unsigned
645get_tmds_link_bandwidth(struct drm_connector *connector)
646{
647 struct nouveau_connector *nv_connector = nouveau_connector(connector);
648 struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
649 struct dcb_entry *dcb = nv_connector->detected_encoder->dcb;
650
651 if (dcb->location != DCB_LOC_ON_CHIP ||
652 dev_priv->chipset >= 0x46)
653 return 165000;
654 else if (dev_priv->chipset >= 0x40)
655 return 155000;
656 else if (dev_priv->chipset >= 0x18)
657 return 135000;
658 else
659 return 112000;
660}
661
644static int 662static int
645nouveau_connector_mode_valid(struct drm_connector *connector, 663nouveau_connector_mode_valid(struct drm_connector *connector,
646 struct drm_display_mode *mode) 664 struct drm_display_mode *mode)
647{ 665{
648 struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
649 struct nouveau_connector *nv_connector = nouveau_connector(connector); 666 struct nouveau_connector *nv_connector = nouveau_connector(connector);
650 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; 667 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
651 struct drm_encoder *encoder = to_drm_encoder(nv_encoder); 668 struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
@@ -663,11 +680,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
663 max_clock = 400000; 680 max_clock = 400000;
664 break; 681 break;
665 case OUTPUT_TMDS: 682 case OUTPUT_TMDS:
666 if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) || 683 max_clock = get_tmds_link_bandwidth(connector);
667 !nv_encoder->dcb->duallink_possible) 684 if (nouveau_duallink && nv_encoder->dcb->duallink_possible)
668 max_clock = 165000; 685 max_clock *= 2;
669 else
670 max_clock = 330000;
671 break; 686 break;
672 case OUTPUT_ANALOG: 687 case OUTPUT_ANALOG:
673 max_clock = nv_encoder->dcb->crtconf.maxfreq; 688 max_clock = nv_encoder->dcb->crtconf.maxfreq;
@@ -709,44 +724,6 @@ nouveau_connector_best_encoder(struct drm_connector *connector)
709 return NULL; 724 return NULL;
710} 725}
711 726
712void
713nouveau_connector_set_polling(struct drm_connector *connector)
714{
715 struct drm_device *dev = connector->dev;
716 struct drm_nouveau_private *dev_priv = dev->dev_private;
717 struct drm_crtc *crtc;
718 bool spare_crtc = false;
719
720 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
721 spare_crtc |= !crtc->enabled;
722
723 connector->polled = 0;
724
725 switch (connector->connector_type) {
726 case DRM_MODE_CONNECTOR_VGA:
727 case DRM_MODE_CONNECTOR_TV:
728 if (dev_priv->card_type >= NV_50 ||
729 (nv_gf4_disp_arch(dev) && spare_crtc))
730 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
731 break;
732
733 case DRM_MODE_CONNECTOR_DVII:
734 case DRM_MODE_CONNECTOR_DVID:
735 case DRM_MODE_CONNECTOR_HDMIA:
736 case DRM_MODE_CONNECTOR_DisplayPort:
737 case DRM_MODE_CONNECTOR_eDP:
738 if (dev_priv->card_type >= NV_50)
739 connector->polled = DRM_CONNECTOR_POLL_HPD;
740 else if (connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
741 spare_crtc)
742 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
743 break;
744
745 default:
746 break;
747 }
748}
749
750static const struct drm_connector_helper_funcs 727static const struct drm_connector_helper_funcs
751nouveau_connector_helper_funcs = { 728nouveau_connector_helper_funcs = {
752 .get_modes = nouveau_connector_get_modes, 729 .get_modes = nouveau_connector_get_modes,
@@ -872,6 +849,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
872 dev->mode_config.scaling_mode_property, 849 dev->mode_config.scaling_mode_property,
873 nv_connector->scaling_mode); 850 nv_connector->scaling_mode);
874 } 851 }
852 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
875 /* fall-through */ 853 /* fall-through */
876 case DCB_CONNECTOR_TV_0: 854 case DCB_CONNECTOR_TV_0:
877 case DCB_CONNECTOR_TV_1: 855 case DCB_CONNECTOR_TV_1:
@@ -888,11 +866,16 @@ nouveau_connector_create(struct drm_device *dev, int index)
888 dev->mode_config.dithering_mode_property, 866 dev->mode_config.dithering_mode_property,
889 nv_connector->use_dithering ? 867 nv_connector->use_dithering ?
890 DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF); 868 DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
869
870 if (dcb->type != DCB_CONNECTOR_LVDS) {
871 if (dev_priv->card_type >= NV_50)
872 connector->polled = DRM_CONNECTOR_POLL_HPD;
873 else
874 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
875 }
891 break; 876 break;
892 } 877 }
893 878
894 nouveau_connector_set_polling(connector);
895
896 drm_sysfs_connector_add(connector); 879 drm_sysfs_connector_add(connector);
897 dcb->drm = connector; 880 dcb->drm = connector;
898 return dcb->drm; 881 return dcb->drm;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index c21ed6b16f8..711b1e9203a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -52,9 +52,6 @@ static inline struct nouveau_connector *nouveau_connector(
52struct drm_connector * 52struct drm_connector *
53nouveau_connector_create(struct drm_device *, int index); 53nouveau_connector_create(struct drm_device *, int index);
54 54
55void
56nouveau_connector_set_polling(struct drm_connector *);
57
58int 55int
59nouveau_connector_bpp(struct drm_connector *); 56nouveau_connector_bpp(struct drm_connector *);
60 57
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 3a07e580d27..1c7db64c03b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -100,6 +100,9 @@ struct nouveau_bo {
100 int pin_refcnt; 100 int pin_refcnt;
101}; 101};
102 102
103#define nouveau_bo_tile_layout(nvbo) \
104 ((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)
105
103static inline struct nouveau_bo * 106static inline struct nouveau_bo *
104nouveau_bo(struct ttm_buffer_object *bo) 107nouveau_bo(struct ttm_buffer_object *bo)
105{ 108{
@@ -304,6 +307,7 @@ struct nouveau_fifo_engine {
304 void (*destroy_context)(struct nouveau_channel *); 307 void (*destroy_context)(struct nouveau_channel *);
305 int (*load_context)(struct nouveau_channel *); 308 int (*load_context)(struct nouveau_channel *);
306 int (*unload_context)(struct drm_device *); 309 int (*unload_context)(struct drm_device *);
310 void (*tlb_flush)(struct drm_device *dev);
307}; 311};
308 312
309struct nouveau_pgraph_object_method { 313struct nouveau_pgraph_object_method {
@@ -336,6 +340,7 @@ struct nouveau_pgraph_engine {
336 void (*destroy_context)(struct nouveau_channel *); 340 void (*destroy_context)(struct nouveau_channel *);
337 int (*load_context)(struct nouveau_channel *); 341 int (*load_context)(struct nouveau_channel *);
338 int (*unload_context)(struct drm_device *); 342 int (*unload_context)(struct drm_device *);
343 void (*tlb_flush)(struct drm_device *dev);
339 344
340 void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, 345 void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
341 uint32_t size, uint32_t pitch); 346 uint32_t size, uint32_t pitch);
@@ -485,13 +490,13 @@ enum nv04_fp_display_regs {
485}; 490};
486 491
487struct nv04_crtc_reg { 492struct nv04_crtc_reg {
488 unsigned char MiscOutReg; /* */ 493 unsigned char MiscOutReg;
489 uint8_t CRTC[0xa0]; 494 uint8_t CRTC[0xa0];
490 uint8_t CR58[0x10]; 495 uint8_t CR58[0x10];
491 uint8_t Sequencer[5]; 496 uint8_t Sequencer[5];
492 uint8_t Graphics[9]; 497 uint8_t Graphics[9];
493 uint8_t Attribute[21]; 498 uint8_t Attribute[21];
494 unsigned char DAC[768]; /* Internal Colorlookuptable */ 499 unsigned char DAC[768];
495 500
496 /* PCRTC regs */ 501 /* PCRTC regs */
497 uint32_t fb_start; 502 uint32_t fb_start;
@@ -539,43 +544,9 @@ struct nv04_output_reg {
539}; 544};
540 545
541struct nv04_mode_state { 546struct nv04_mode_state {
542 uint32_t bpp; 547 struct nv04_crtc_reg crtc_reg[2];
543 uint32_t width;
544 uint32_t height;
545 uint32_t interlace;
546 uint32_t repaint0;
547 uint32_t repaint1;
548 uint32_t screen;
549 uint32_t scale;
550 uint32_t dither;
551 uint32_t extra;
552 uint32_t fifo;
553 uint32_t pixel;
554 uint32_t horiz;
555 int arbitration0;
556 int arbitration1;
557 uint32_t pll;
558 uint32_t pllB;
559 uint32_t vpll;
560 uint32_t vpll2;
561 uint32_t vpllB;
562 uint32_t vpll2B;
563 uint32_t pllsel; 548 uint32_t pllsel;
564 uint32_t sel_clk; 549 uint32_t sel_clk;
565 uint32_t general;
566 uint32_t crtcOwner;
567 uint32_t head;
568 uint32_t head2;
569 uint32_t cursorConfig;
570 uint32_t cursor0;
571 uint32_t cursor1;
572 uint32_t cursor2;
573 uint32_t timingH;
574 uint32_t timingV;
575 uint32_t displayV;
576 uint32_t crtcSync;
577
578 struct nv04_crtc_reg crtc_reg[2];
579}; 550};
580 551
581enum nouveau_card_type { 552enum nouveau_card_type {
@@ -613,6 +584,12 @@ struct drm_nouveau_private {
613 struct work_struct irq_work; 584 struct work_struct irq_work;
614 struct work_struct hpd_work; 585 struct work_struct hpd_work;
615 586
587 struct {
588 spinlock_t lock;
589 uint32_t hpd0_bits;
590 uint32_t hpd1_bits;
591 } hpd_state;
592
616 struct list_head vbl_waiting; 593 struct list_head vbl_waiting;
617 594
618 struct { 595 struct {
@@ -1045,6 +1022,7 @@ extern int nv50_fifo_create_context(struct nouveau_channel *);
1045extern void nv50_fifo_destroy_context(struct nouveau_channel *); 1022extern void nv50_fifo_destroy_context(struct nouveau_channel *);
1046extern int nv50_fifo_load_context(struct nouveau_channel *); 1023extern int nv50_fifo_load_context(struct nouveau_channel *);
1047extern int nv50_fifo_unload_context(struct drm_device *); 1024extern int nv50_fifo_unload_context(struct drm_device *);
1025extern void nv50_fifo_tlb_flush(struct drm_device *dev);
1048 1026
1049/* nvc0_fifo.c */ 1027/* nvc0_fifo.c */
1050extern int nvc0_fifo_init(struct drm_device *); 1028extern int nvc0_fifo_init(struct drm_device *);
@@ -1122,6 +1100,8 @@ extern int nv50_graph_load_context(struct nouveau_channel *);
1122extern int nv50_graph_unload_context(struct drm_device *); 1100extern int nv50_graph_unload_context(struct drm_device *);
1123extern void nv50_graph_context_switch(struct drm_device *); 1101extern void nv50_graph_context_switch(struct drm_device *);
1124extern int nv50_grctx_init(struct nouveau_grctx *); 1102extern int nv50_grctx_init(struct nouveau_grctx *);
1103extern void nv50_graph_tlb_flush(struct drm_device *dev);
1104extern void nv86_graph_tlb_flush(struct drm_device *dev);
1125 1105
1126/* nvc0_graph.c */ 1106/* nvc0_graph.c */
1127extern int nvc0_graph_init(struct drm_device *); 1107extern int nvc0_graph_init(struct drm_device *);
@@ -1239,7 +1219,6 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
1239extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); 1219extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
1240extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); 1220extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
1241extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); 1221extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
1242extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *);
1243 1222
1244/* nouveau_fence.c */ 1223/* nouveau_fence.c */
1245struct nouveau_fence; 1224struct nouveau_fence;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 441b12420bb..ab1bbfbf266 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -249,6 +249,7 @@ alloc_semaphore(struct drm_device *dev)
249{ 249{
250 struct drm_nouveau_private *dev_priv = dev->dev_private; 250 struct drm_nouveau_private *dev_priv = dev->dev_private;
251 struct nouveau_semaphore *sema; 251 struct nouveau_semaphore *sema;
252 int ret;
252 253
253 if (!USE_SEMA(dev)) 254 if (!USE_SEMA(dev))
254 return NULL; 255 return NULL;
@@ -257,10 +258,14 @@ alloc_semaphore(struct drm_device *dev)
257 if (!sema) 258 if (!sema)
258 goto fail; 259 goto fail;
259 260
261 ret = drm_mm_pre_get(&dev_priv->fence.heap);
262 if (ret)
263 goto fail;
264
260 spin_lock(&dev_priv->fence.lock); 265 spin_lock(&dev_priv->fence.lock);
261 sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0); 266 sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0);
262 if (sema->mem) 267 if (sema->mem)
263 sema->mem = drm_mm_get_block(sema->mem, 4, 0); 268 sema->mem = drm_mm_get_block_atomic(sema->mem, 4, 0);
264 spin_unlock(&dev_priv->fence.lock); 269 spin_unlock(&dev_priv->fence.lock);
265 270
266 if (!sema->mem) 271 if (!sema->mem)
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 5c4c929d7f7..9a1fdcf400c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -107,23 +107,29 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
107} 107}
108 108
109static bool 109static bool
110nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) { 110nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags)
111 switch (tile_flags) { 111{
112 case 0x0000: 112 struct drm_nouveau_private *dev_priv = dev->dev_private;
113 case 0x1800: 113
114 case 0x2800: 114 if (dev_priv->card_type >= NV_50) {
115 case 0x4800: 115 switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
116 case 0x7000: 116 case 0x0000:
117 case 0x7400: 117 case 0x1800:
118 case 0x7a00: 118 case 0x2800:
119 case 0xe000: 119 case 0x4800:
120 break; 120 case 0x7000:
121 default: 121 case 0x7400:
122 NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags); 122 case 0x7a00:
123 return false; 123 case 0xe000:
124 return true;
125 }
126 } else {
127 if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
128 return true;
124 } 129 }
125 130
126 return true; 131 NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
132 return false;
127} 133}
128 134
129int 135int
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index bed669a54a2..b9672a05c41 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -519,11 +519,11 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
519 519
520 struct pll_lims pll_lim; 520 struct pll_lims pll_lim;
521 struct nouveau_pll_vals pv; 521 struct nouveau_pll_vals pv;
522 uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; 522 enum pll_types pll = head ? PLL_VPLL1 : PLL_VPLL0;
523 523
524 if (get_pll_limits(dev, pllreg, &pll_lim)) 524 if (get_pll_limits(dev, pll, &pll_lim))
525 return; 525 return;
526 nouveau_hw_get_pllvals(dev, pllreg, &pv); 526 nouveau_hw_get_pllvals(dev, pll, &pv);
527 527
528 if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m && 528 if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
529 pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n && 529 pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
@@ -536,7 +536,7 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
536 pv.M1 = pll_lim.vco1.max_m; 536 pv.M1 = pll_lim.vco1.max_m;
537 pv.N1 = pll_lim.vco1.min_n; 537 pv.N1 = pll_lim.vco1.min_n;
538 pv.log2P = pll_lim.max_usable_log2p; 538 pv.log2P = pll_lim.max_usable_log2p;
539 nouveau_hw_setpll(dev, pllreg, &pv); 539 nouveau_hw_setpll(dev, pll_lim.reg, &pv);
540} 540}
541 541
542/* 542/*
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.h b/drivers/gpu/drm/nouveau/nouveau_hw.h
index 869130f8360..2989090b943 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.h
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.h
@@ -416,6 +416,25 @@ nv_fix_nv40_hw_cursor(struct drm_device *dev, int head)
416} 416}
417 417
418static inline void 418static inline void
419nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset)
420{
421 struct drm_nouveau_private *dev_priv = dev->dev_private;
422
423 NVWriteCRTC(dev, head, NV_PCRTC_START, offset);
424
425 if (dev_priv->card_type == NV_04) {
426 /*
427 * Hilarious, the 24th bit doesn't want to stick to
428 * PCRTC_START...
429 */
430 int cre_heb = NVReadVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX);
431
432 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX,
433 (cre_heb & ~0x40) | ((offset >> 18) & 0x40));
434 }
435}
436
437static inline void
419nv_show_cursor(struct drm_device *dev, int head, bool show) 438nv_show_cursor(struct drm_device *dev, int head, bool show)
420{ 439{
421 struct drm_nouveau_private *dev_priv = dev->dev_private; 440 struct drm_nouveau_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index fdd7e3de79c..cb389d01432 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -256,7 +256,7 @@ nouveau_i2c_find(struct drm_device *dev, int index)
256 if (index >= DCB_MAX_NUM_I2C_ENTRIES) 256 if (index >= DCB_MAX_NUM_I2C_ENTRIES)
257 return NULL; 257 return NULL;
258 258
259 if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) { 259 if (dev_priv->card_type >= NV_50 && (i2c->entry & 0x00000100)) {
260 uint32_t reg = 0xe500, val; 260 uint32_t reg = 0xe500, val;
261 261
262 if (i2c->port_type == 6) { 262 if (i2c->port_type == 6) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 6fd51a51c60..7bfd9e6c9d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -42,6 +42,13 @@
42#include "nouveau_connector.h" 42#include "nouveau_connector.h"
43#include "nv50_display.h" 43#include "nv50_display.h"
44 44
45static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
46
47static int nouveau_ratelimit(void)
48{
49 return __ratelimit(&nouveau_ratelimit_state);
50}
51
45void 52void
46nouveau_irq_preinstall(struct drm_device *dev) 53nouveau_irq_preinstall(struct drm_device *dev)
47{ 54{
@@ -53,6 +60,7 @@ nouveau_irq_preinstall(struct drm_device *dev)
53 if (dev_priv->card_type >= NV_50) { 60 if (dev_priv->card_type >= NV_50) {
54 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); 61 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
55 INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh); 62 INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
63 spin_lock_init(&dev_priv->hpd_state.lock);
56 INIT_LIST_HEAD(&dev_priv->vbl_waiting); 64 INIT_LIST_HEAD(&dev_priv->vbl_waiting);
57 } 65 }
58} 66}
@@ -202,8 +210,8 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
202 } 210 }
203 211
204 if (status & NV_PFIFO_INTR_DMA_PUSHER) { 212 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
205 u32 get = nv_rd32(dev, 0x003244); 213 u32 dma_get = nv_rd32(dev, 0x003244);
206 u32 put = nv_rd32(dev, 0x003240); 214 u32 dma_put = nv_rd32(dev, 0x003240);
207 u32 push = nv_rd32(dev, 0x003220); 215 u32 push = nv_rd32(dev, 0x003220);
208 u32 state = nv_rd32(dev, 0x003228); 216 u32 state = nv_rd32(dev, 0x003228);
209 217
@@ -213,16 +221,18 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
213 u32 ib_get = nv_rd32(dev, 0x003334); 221 u32 ib_get = nv_rd32(dev, 0x003334);
214 u32 ib_put = nv_rd32(dev, 0x003330); 222 u32 ib_put = nv_rd32(dev, 0x003330);
215 223
216 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " 224 if (nouveau_ratelimit())
225 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
217 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " 226 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
218 "State 0x%08x Push 0x%08x\n", 227 "State 0x%08x Push 0x%08x\n",
219 chid, ho_get, get, ho_put, put, ib_get, ib_put, 228 chid, ho_get, dma_get, ho_put,
220 state, push); 229 dma_put, ib_get, ib_put, state,
230 push);
221 231
222 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ 232 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
223 nv_wr32(dev, 0x003364, 0x00000000); 233 nv_wr32(dev, 0x003364, 0x00000000);
224 if (get != put || ho_get != ho_put) { 234 if (dma_get != dma_put || ho_get != ho_put) {
225 nv_wr32(dev, 0x003244, put); 235 nv_wr32(dev, 0x003244, dma_put);
226 nv_wr32(dev, 0x003328, ho_put); 236 nv_wr32(dev, 0x003328, ho_put);
227 } else 237 } else
228 if (ib_get != ib_put) { 238 if (ib_get != ib_put) {
@@ -231,10 +241,10 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
231 } else { 241 } else {
232 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " 242 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
233 "Put 0x%08x State 0x%08x Push 0x%08x\n", 243 "Put 0x%08x State 0x%08x Push 0x%08x\n",
234 chid, get, put, state, push); 244 chid, dma_get, dma_put, state, push);
235 245
236 if (get != put) 246 if (dma_get != dma_put)
237 nv_wr32(dev, 0x003244, put); 247 nv_wr32(dev, 0x003244, dma_put);
238 } 248 }
239 249
240 nv_wr32(dev, 0x003228, 0x00000000); 250 nv_wr32(dev, 0x003228, 0x00000000);
@@ -266,8 +276,9 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
266 } 276 }
267 277
268 if (status) { 278 if (status) {
269 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", 279 if (nouveau_ratelimit())
270 status, chid); 280 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
281 status, chid);
271 nv_wr32(dev, NV03_PFIFO_INTR_0, status); 282 nv_wr32(dev, NV03_PFIFO_INTR_0, status);
272 status = 0; 283 status = 0;
273 } 284 }
@@ -544,13 +555,6 @@ nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
544 nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap); 555 nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
545} 556}
546 557
547static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
548
549static int nouveau_ratelimit(void)
550{
551 return __ratelimit(&nouveau_ratelimit_state);
552}
553
554 558
555static inline void 559static inline void
556nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource) 560nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index a163c7c612e..fe4a30dc4b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -33,9 +33,9 @@
33#include "drmP.h" 33#include "drmP.h"
34#include "drm.h" 34#include "drm.h"
35#include "drm_sarea.h" 35#include "drm_sarea.h"
36#include "nouveau_drv.h"
37 36
38#define MIN(a,b) a < b ? a : b 37#include "nouveau_drv.h"
38#include "nouveau_pm.h"
39 39
40/* 40/*
41 * NV10-NV40 tiling helpers 41 * NV10-NV40 tiling helpers
@@ -175,11 +175,10 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
175 } 175 }
176 } 176 }
177 } 177 }
178 dev_priv->engine.instmem.flush(dev);
179 178
180 nv50_vm_flush(dev, 5); 179 dev_priv->engine.instmem.flush(dev);
181 nv50_vm_flush(dev, 0); 180 dev_priv->engine.fifo.tlb_flush(dev);
182 nv50_vm_flush(dev, 4); 181 dev_priv->engine.graph.tlb_flush(dev);
183 nv50_vm_flush(dev, 6); 182 nv50_vm_flush(dev, 6);
184 return 0; 183 return 0;
185} 184}
@@ -209,11 +208,10 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
209 pte++; 208 pte++;
210 } 209 }
211 } 210 }
212 dev_priv->engine.instmem.flush(dev);
213 211
214 nv50_vm_flush(dev, 5); 212 dev_priv->engine.instmem.flush(dev);
215 nv50_vm_flush(dev, 0); 213 dev_priv->engine.fifo.tlb_flush(dev);
216 nv50_vm_flush(dev, 4); 214 dev_priv->engine.graph.tlb_flush(dev);
217 nv50_vm_flush(dev, 6); 215 nv50_vm_flush(dev, 6);
218} 216}
219 217
@@ -653,6 +651,7 @@ nouveau_mem_gart_init(struct drm_device *dev)
653void 651void
654nouveau_mem_timing_init(struct drm_device *dev) 652nouveau_mem_timing_init(struct drm_device *dev)
655{ 653{
654 /* cards < NVC0 only */
656 struct drm_nouveau_private *dev_priv = dev->dev_private; 655 struct drm_nouveau_private *dev_priv = dev->dev_private;
657 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 656 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
658 struct nouveau_pm_memtimings *memtimings = &pm->memtimings; 657 struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
@@ -719,14 +718,14 @@ nouveau_mem_timing_init(struct drm_device *dev)
719 tUNK_19 = 1; 718 tUNK_19 = 1;
720 tUNK_20 = 0; 719 tUNK_20 = 0;
721 tUNK_21 = 0; 720 tUNK_21 = 0;
722 switch (MIN(recordlen,21)) { 721 switch (min(recordlen, 22)) {
723 case 21: 722 case 22:
724 tUNK_21 = entry[21]; 723 tUNK_21 = entry[21];
725 case 20: 724 case 21:
726 tUNK_20 = entry[20]; 725 tUNK_20 = entry[20];
727 case 19: 726 case 20:
728 tUNK_19 = entry[19]; 727 tUNK_19 = entry[19];
729 case 18: 728 case 19:
730 tUNK_18 = entry[18]; 729 tUNK_18 = entry[18];
731 default: 730 default:
732 tUNK_0 = entry[0]; 731 tUNK_0 = entry[0];
@@ -756,24 +755,30 @@ nouveau_mem_timing_init(struct drm_device *dev)
756 timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); 755 timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
757 if(recordlen > 19) { 756 if(recordlen > 19) {
758 timing->reg_100228 += (tUNK_19 - 1) << 24; 757 timing->reg_100228 += (tUNK_19 - 1) << 24;
759 } else { 758 }/* I cannot back-up this else-statement right now
759 else {
760 timing->reg_100228 += tUNK_12 << 24; 760 timing->reg_100228 += tUNK_12 << 24;
761 } 761 }*/
762 762
763 /* XXX: reg_10022c */ 763 /* XXX: reg_10022c */
764 timing->reg_10022c = tUNK_2 - 1;
764 765
765 timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | 766 timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
766 tUNK_13 << 8 | tUNK_13); 767 tUNK_13 << 8 | tUNK_13);
767 768
768 /* XXX: +6? */ 769 /* XXX: +6? */
769 timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC); 770 timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC);
770 if(tUNK_10 > tUNK_11) { 771 timing->reg_100234 += max(tUNK_10,tUNK_11) << 16;
771 timing->reg_100234 += tUNK_10 << 16; 772
772 } else { 773 /* XXX; reg_100238, reg_10023c
773 timing->reg_100234 += tUNK_11 << 16; 774 * reg: 0x00??????
775 * reg_10023c:
776 * 0 for pre-NV50 cards
777 * 0x????0202 for NV50+ cards (empirical evidence) */
778 if(dev_priv->card_type >= NV_50) {
779 timing->reg_10023c = 0x202;
774 } 780 }
775 781
776 /* XXX; reg_100238, reg_10023c */
777 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, 782 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
778 timing->reg_100220, timing->reg_100224, 783 timing->reg_100220, timing->reg_100224,
779 timing->reg_100228, timing->reg_10022c); 784 timing->reg_100228, timing->reg_10022c);
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 896cf863414..dd572adca02 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -129,7 +129,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
129 if (ramin == NULL) { 129 if (ramin == NULL) {
130 spin_unlock(&dev_priv->ramin_lock); 130 spin_unlock(&dev_priv->ramin_lock);
131 nouveau_gpuobj_ref(NULL, &gpuobj); 131 nouveau_gpuobj_ref(NULL, &gpuobj);
132 return ret; 132 return -ENOMEM;
133 } 133 }
134 134
135 ramin = drm_mm_get_block_atomic(ramin, size, align); 135 ramin = drm_mm_get_block_atomic(ramin, size, align);
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 1c99c55d6d4..9f7b158f582 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -284,6 +284,7 @@ nouveau_sysfs_fini(struct drm_device *dev)
284 } 284 }
285} 285}
286 286
287#ifdef CONFIG_HWMON
287static ssize_t 288static ssize_t
288nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) 289nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
289{ 290{
@@ -395,10 +396,12 @@ static struct attribute *hwmon_attributes[] = {
395static const struct attribute_group hwmon_attrgroup = { 396static const struct attribute_group hwmon_attrgroup = {
396 .attrs = hwmon_attributes, 397 .attrs = hwmon_attributes,
397}; 398};
399#endif
398 400
399static int 401static int
400nouveau_hwmon_init(struct drm_device *dev) 402nouveau_hwmon_init(struct drm_device *dev)
401{ 403{
404#ifdef CONFIG_HWMON
402 struct drm_nouveau_private *dev_priv = dev->dev_private; 405 struct drm_nouveau_private *dev_priv = dev->dev_private;
403 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 406 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
404 struct device *hwmon_dev; 407 struct device *hwmon_dev;
@@ -425,13 +428,14 @@ nouveau_hwmon_init(struct drm_device *dev)
425 } 428 }
426 429
427 pm->hwmon = hwmon_dev; 430 pm->hwmon = hwmon_dev;
428 431#endif
429 return 0; 432 return 0;
430} 433}
431 434
432static void 435static void
433nouveau_hwmon_fini(struct drm_device *dev) 436nouveau_hwmon_fini(struct drm_device *dev)
434{ 437{
438#ifdef CONFIG_HWMON
435 struct drm_nouveau_private *dev_priv = dev->dev_private; 439 struct drm_nouveau_private *dev_priv = dev->dev_private;
436 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 440 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
437 441
@@ -439,6 +443,7 @@ nouveau_hwmon_fini(struct drm_device *dev)
439 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); 443 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup);
440 hwmon_device_unregister(pm->hwmon); 444 hwmon_device_unregister(pm->hwmon);
441 } 445 }
446#endif
442} 447}
443 448
444int 449int
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
index 7f16697cc96..2d8580927ca 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c
@@ -153,26 +153,42 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
153 return -ENOMEM; 153 return -ENOMEM;
154} 154}
155 155
156static struct nouveau_ramht_entry *
157nouveau_ramht_remove_entry(struct nouveau_channel *chan, u32 handle)
158{
159 struct nouveau_ramht *ramht = chan ? chan->ramht : NULL;
160 struct nouveau_ramht_entry *entry;
161 unsigned long flags;
162
163 if (!ramht)
164 return NULL;
165
166 spin_lock_irqsave(&ramht->lock, flags);
167 list_for_each_entry(entry, &ramht->entries, head) {
168 if (entry->channel == chan &&
169 (!handle || entry->handle == handle)) {
170 list_del(&entry->head);
171 spin_unlock_irqrestore(&ramht->lock, flags);
172
173 return entry;
174 }
175 }
176 spin_unlock_irqrestore(&ramht->lock, flags);
177
178 return NULL;
179}
180
156static void 181static void
157nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle) 182nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle)
158{ 183{
159 struct drm_device *dev = chan->dev; 184 struct drm_device *dev = chan->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private; 185 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; 186 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
162 struct nouveau_gpuobj *ramht = chan->ramht->gpuobj; 187 struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
163 struct nouveau_ramht_entry *entry, *tmp; 188 unsigned long flags;
164 u32 co, ho; 189 u32 co, ho;
165 190
166 list_for_each_entry_safe(entry, tmp, &chan->ramht->entries, head) { 191 spin_lock_irqsave(&chan->ramht->lock, flags);
167 if (entry->channel != chan || entry->handle != handle)
168 continue;
169
170 nouveau_gpuobj_ref(NULL, &entry->gpuobj);
171 list_del(&entry->head);
172 kfree(entry);
173 break;
174 }
175
176 co = ho = nouveau_ramht_hash_handle(chan, handle); 192 co = ho = nouveau_ramht_hash_handle(chan, handle);
177 do { 193 do {
178 if (nouveau_ramht_entry_valid(dev, ramht, co) && 194 if (nouveau_ramht_entry_valid(dev, ramht, co) &&
@@ -184,7 +200,7 @@ nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle)
184 nv_wo32(ramht, co + 0, 0x00000000); 200 nv_wo32(ramht, co + 0, 0x00000000);
185 nv_wo32(ramht, co + 4, 0x00000000); 201 nv_wo32(ramht, co + 4, 0x00000000);
186 instmem->flush(dev); 202 instmem->flush(dev);
187 return; 203 goto out;
188 } 204 }
189 205
190 co += 8; 206 co += 8;
@@ -194,17 +210,22 @@ nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle)
194 210
195 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", 211 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
196 chan->id, handle); 212 chan->id, handle);
213out:
214 spin_unlock_irqrestore(&chan->ramht->lock, flags);
197} 215}
198 216
199void 217void
200nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) 218nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
201{ 219{
202 struct nouveau_ramht *ramht = chan->ramht; 220 struct nouveau_ramht_entry *entry;
203 unsigned long flags;
204 221
205 spin_lock_irqsave(&ramht->lock, flags); 222 entry = nouveau_ramht_remove_entry(chan, handle);
206 nouveau_ramht_remove_locked(chan, handle); 223 if (!entry)
207 spin_unlock_irqrestore(&ramht->lock, flags); 224 return;
225
226 nouveau_ramht_remove_hash(chan, entry->handle);
227 nouveau_gpuobj_ref(NULL, &entry->gpuobj);
228 kfree(entry);
208} 229}
209 230
210struct nouveau_gpuobj * 231struct nouveau_gpuobj *
@@ -265,23 +286,19 @@ void
265nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr, 286nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr,
266 struct nouveau_channel *chan) 287 struct nouveau_channel *chan)
267{ 288{
268 struct nouveau_ramht_entry *entry, *tmp; 289 struct nouveau_ramht_entry *entry;
269 struct nouveau_ramht *ramht; 290 struct nouveau_ramht *ramht;
270 unsigned long flags;
271 291
272 if (ref) 292 if (ref)
273 kref_get(&ref->refcount); 293 kref_get(&ref->refcount);
274 294
275 ramht = *ptr; 295 ramht = *ptr;
276 if (ramht) { 296 if (ramht) {
277 spin_lock_irqsave(&ramht->lock, flags); 297 while ((entry = nouveau_ramht_remove_entry(chan, 0))) {
278 list_for_each_entry_safe(entry, tmp, &ramht->entries, head) { 298 nouveau_ramht_remove_hash(chan, entry->handle);
279 if (entry->channel != chan) 299 nouveau_gpuobj_ref(NULL, &entry->gpuobj);
280 continue; 300 kfree(entry);
281
282 nouveau_ramht_remove_locked(chan, entry->handle);
283 } 301 }
284 spin_unlock_irqrestore(&ramht->lock, flags);
285 302
286 kref_put(&ramht->refcount, nouveau_ramht_del); 303 kref_put(&ramht->refcount, nouveau_ramht_del);
287 } 304 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 288bacac7e5..d4ac9700703 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -120,8 +120,8 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
120 dev_priv->engine.instmem.flush(nvbe->dev); 120 dev_priv->engine.instmem.flush(nvbe->dev);
121 121
122 if (dev_priv->card_type == NV_50) { 122 if (dev_priv->card_type == NV_50) {
123 nv50_vm_flush(dev, 5); /* PGRAPH */ 123 dev_priv->engine.fifo.tlb_flush(dev);
124 nv50_vm_flush(dev, 0); /* PFIFO */ 124 dev_priv->engine.graph.tlb_flush(dev);
125 } 125 }
126 126
127 nvbe->bound = true; 127 nvbe->bound = true;
@@ -162,8 +162,8 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
162 dev_priv->engine.instmem.flush(nvbe->dev); 162 dev_priv->engine.instmem.flush(nvbe->dev);
163 163
164 if (dev_priv->card_type == NV_50) { 164 if (dev_priv->card_type == NV_50) {
165 nv50_vm_flush(dev, 5); 165 dev_priv->engine.fifo.tlb_flush(dev);
166 nv50_vm_flush(dev, 0); 166 dev_priv->engine.graph.tlb_flush(dev);
167 } 167 }
168 168
169 nvbe->bound = false; 169 nvbe->bound = false;
@@ -224,7 +224,11 @@ nouveau_sgdma_init(struct drm_device *dev)
224 int i, ret; 224 int i, ret;
225 225
226 if (dev_priv->card_type < NV_50) { 226 if (dev_priv->card_type < NV_50) {
227 aper_size = (64 * 1024 * 1024); 227 if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024)
228 aper_size = 64 * 1024 * 1024;
229 else
230 aper_size = 512 * 1024 * 1024;
231
228 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; 232 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
229 obj_size += 8; /* ctxdma header */ 233 obj_size += 8; /* ctxdma header */
230 } else { 234 } else {
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index ed7757f1408..049f755567e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -354,6 +354,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
354 engine->graph.destroy_context = nv50_graph_destroy_context; 354 engine->graph.destroy_context = nv50_graph_destroy_context;
355 engine->graph.load_context = nv50_graph_load_context; 355 engine->graph.load_context = nv50_graph_load_context;
356 engine->graph.unload_context = nv50_graph_unload_context; 356 engine->graph.unload_context = nv50_graph_unload_context;
357 if (dev_priv->chipset != 0x86)
358 engine->graph.tlb_flush = nv50_graph_tlb_flush;
359 else {
360 /* from what i can see nvidia do this on every
361 * pre-NVA3 board except NVAC, but, we've only
362 * ever seen problems on NV86
363 */
364 engine->graph.tlb_flush = nv86_graph_tlb_flush;
365 }
357 engine->fifo.channels = 128; 366 engine->fifo.channels = 128;
358 engine->fifo.init = nv50_fifo_init; 367 engine->fifo.init = nv50_fifo_init;
359 engine->fifo.takedown = nv50_fifo_takedown; 368 engine->fifo.takedown = nv50_fifo_takedown;
@@ -365,6 +374,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
365 engine->fifo.destroy_context = nv50_fifo_destroy_context; 374 engine->fifo.destroy_context = nv50_fifo_destroy_context;
366 engine->fifo.load_context = nv50_fifo_load_context; 375 engine->fifo.load_context = nv50_fifo_load_context;
367 engine->fifo.unload_context = nv50_fifo_unload_context; 376 engine->fifo.unload_context = nv50_fifo_unload_context;
377 engine->fifo.tlb_flush = nv50_fifo_tlb_flush;
368 engine->display.early_init = nv50_display_early_init; 378 engine->display.early_init = nv50_display_early_init;
369 engine->display.late_takedown = nv50_display_late_takedown; 379 engine->display.late_takedown = nv50_display_late_takedown;
370 engine->display.create = nv50_display_create; 380 engine->display.create = nv50_display_create;
@@ -1041,6 +1051,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
1041 case NOUVEAU_GETPARAM_PTIMER_TIME: 1051 case NOUVEAU_GETPARAM_PTIMER_TIME:
1042 getparam->value = dev_priv->engine.timer.read(dev); 1052 getparam->value = dev_priv->engine.timer.read(dev);
1043 break; 1053 break;
1054 case NOUVEAU_GETPARAM_HAS_BO_USAGE:
1055 getparam->value = 1;
1056 break;
1044 case NOUVEAU_GETPARAM_GRAPH_UNITS: 1057 case NOUVEAU_GETPARAM_GRAPH_UNITS:
1045 /* NV40 and NV50 versions are quite different, but register 1058 /* NV40 and NV50 versions are quite different, but register
1046 * address is the same. User is supposed to know the card 1059 * address is the same. User is supposed to know the card
@@ -1051,7 +1064,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
1051 } 1064 }
1052 /* FALLTHRU */ 1065 /* FALLTHRU */
1053 default: 1066 default:
1054 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); 1067 NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param);
1055 return -EINVAL; 1068 return -EINVAL;
1056 } 1069 }
1057 1070
@@ -1066,7 +1079,7 @@ nouveau_ioctl_setparam(struct drm_device *dev, void *data,
1066 1079
1067 switch (setparam->param) { 1080 switch (setparam->param) {
1068 default: 1081 default:
1069 NV_ERROR(dev, "unknown parameter %lld\n", setparam->param); 1082 NV_DEBUG(dev, "unknown parameter %lld\n", setparam->param);
1070 return -EINVAL; 1083 return -EINVAL;
1071 } 1084 }
1072 1085
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 16bbbf1eff6..7ecc4adc1e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -191,7 +191,7 @@ nv40_temp_get(struct drm_device *dev)
191 int offset = sensor->offset_mult / sensor->offset_div; 191 int offset = sensor->offset_mult / sensor->offset_div;
192 int core_temp; 192 int core_temp;
193 193
194 if (dev_priv->chipset >= 0x50) { 194 if (dev_priv->card_type >= NV_50) {
195 core_temp = nv_rd32(dev, 0x20008); 195 core_temp = nv_rd32(dev, 0x20008);
196 } else { 196 } else {
197 core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff; 197 core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff;
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index c71abc2a34d..40e18074162 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -158,7 +158,6 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
158{ 158{
159 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 159 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
160 struct drm_device *dev = crtc->dev; 160 struct drm_device *dev = crtc->dev;
161 struct drm_connector *connector;
162 unsigned char seq1 = 0, crtc17 = 0; 161 unsigned char seq1 = 0, crtc17 = 0;
163 unsigned char crtc1A; 162 unsigned char crtc1A;
164 163
@@ -213,10 +212,6 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
213 NVVgaSeqReset(dev, nv_crtc->index, false); 212 NVVgaSeqReset(dev, nv_crtc->index, false);
214 213
215 NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A); 214 NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A);
216
217 /* Update connector polling modes */
218 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
219 nouveau_connector_set_polling(connector);
220} 215}
221 216
222static bool 217static bool
@@ -831,7 +826,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
831 /* Update the framebuffer location. */ 826 /* Update the framebuffer location. */
832 regp->fb_start = nv_crtc->fb.offset & ~3; 827 regp->fb_start = nv_crtc->fb.offset & ~3;
833 regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8); 828 regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8);
834 NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_START, regp->fb_start); 829 nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start);
835 830
836 /* Update the arbitration parameters. */ 831 /* Update the arbitration parameters. */
837 nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel, 832 nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel,
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index c936403b26e..ef23550407b 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -185,14 +185,15 @@ static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder,
185 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 185 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
186 struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder); 186 struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
187 187
188 /* For internal panels and gpu scaling on DVI we need the native mode */ 188 if (!nv_connector->native_mode ||
189 if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { 189 nv_connector->scaling_mode == DRM_MODE_SCALE_NONE ||
190 if (!nv_connector->native_mode) 190 mode->hdisplay > nv_connector->native_mode->hdisplay ||
191 return false; 191 mode->vdisplay > nv_connector->native_mode->vdisplay) {
192 nv_encoder->mode = *adjusted_mode;
193
194 } else {
192 nv_encoder->mode = *nv_connector->native_mode; 195 nv_encoder->mode = *nv_connector->native_mode;
193 adjusted_mode->clock = nv_connector->native_mode->clock; 196 adjusted_mode->clock = nv_connector->native_mode->clock;
194 } else {
195 nv_encoder->mode = *adjusted_mode;
196 } 197 }
197 198
198 return true; 199 return true;
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
index 6a6eb697d38..eb1c70dd82e 100644
--- a/drivers/gpu/drm/nouveau/nv04_pm.c
+++ b/drivers/gpu/drm/nouveau/nv04_pm.c
@@ -76,6 +76,15 @@ nv04_pm_clock_set(struct drm_device *dev, void *pre_state)
76 reg += 4; 76 reg += 4;
77 77
78 nouveau_hw_setpll(dev, reg, &state->calc); 78 nouveau_hw_setpll(dev, reg, &state->calc);
79
80 if (dev_priv->card_type < NV_30 && reg == NV_PRAMDAC_MPLL_COEFF) {
81 if (dev_priv->card_type == NV_20)
82 nv_mask(dev, 0x1002c4, 0, 1 << 20);
83
84 /* Reset the DLLs */
85 nv_mask(dev, 0x1002c0, 0, 1 << 8);
86 }
87
79 kfree(state); 88 kfree(state);
80} 89}
81 90
diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/nv50_calc.c
index 2cdc2bfe717..de81151648f 100644
--- a/drivers/gpu/drm/nouveau/nv50_calc.c
+++ b/drivers/gpu/drm/nouveau/nv50_calc.c
@@ -51,24 +51,28 @@ nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk,
51 int *N, int *fN, int *M, int *P) 51 int *N, int *fN, int *M, int *P)
52{ 52{
53 fixed20_12 fb_div, a, b; 53 fixed20_12 fb_div, a, b;
54 u32 refclk = pll->refclk / 10;
55 u32 max_vco_freq = pll->vco1.maxfreq / 10;
56 u32 max_vco_inputfreq = pll->vco1.max_inputfreq / 10;
57 clk /= 10;
54 58
55 *P = pll->vco1.maxfreq / clk; 59 *P = max_vco_freq / clk;
56 if (*P > pll->max_p) 60 if (*P > pll->max_p)
57 *P = pll->max_p; 61 *P = pll->max_p;
58 if (*P < pll->min_p) 62 if (*P < pll->min_p)
59 *P = pll->min_p; 63 *P = pll->min_p;
60 64
61 /* *M = ceil(refclk / pll->vco.max_inputfreq); */ 65 /* *M = floor((refclk + max_vco_inputfreq) / max_vco_inputfreq); */
62 a.full = dfixed_const(pll->refclk); 66 a.full = dfixed_const(refclk + max_vco_inputfreq);
63 b.full = dfixed_const(pll->vco1.max_inputfreq); 67 b.full = dfixed_const(max_vco_inputfreq);
64 a.full = dfixed_div(a, b); 68 a.full = dfixed_div(a, b);
65 a.full = dfixed_ceil(a); 69 a.full = dfixed_floor(a);
66 *M = dfixed_trunc(a); 70 *M = dfixed_trunc(a);
67 71
68 /* fb_div = (vco * *M) / refclk; */ 72 /* fb_div = (vco * *M) / refclk; */
69 fb_div.full = dfixed_const(clk * *P); 73 fb_div.full = dfixed_const(clk * *P);
70 fb_div.full = dfixed_mul(fb_div, a); 74 fb_div.full = dfixed_mul(fb_div, a);
71 a.full = dfixed_const(pll->refclk); 75 a.full = dfixed_const(refclk);
72 fb_div.full = dfixed_div(fb_div, a); 76 fb_div.full = dfixed_div(fb_div, a);
73 77
74 /* *N = floor(fb_div); */ 78 /* *N = floor(fb_div); */
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 16380d52cd8..56476d0c6de 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -546,7 +546,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
546 } 546 }
547 547
548 nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base; 548 nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base;
549 nv_crtc->fb.tile_flags = fb->nvbo->tile_flags; 549 nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
550 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; 550 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
551 if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { 551 if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
552 ret = RING_SPACE(evo, 2); 552 ret = RING_SPACE(evo, 2);
@@ -578,7 +578,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
578 fb->nvbo->tile_mode); 578 fb->nvbo->tile_mode);
579 } 579 }
580 if (dev_priv->chipset == 0x50) 580 if (dev_priv->chipset == 0x50)
581 OUT_RING(evo, (fb->nvbo->tile_flags << 8) | format); 581 OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format);
582 else 582 else
583 OUT_RING(evo, format); 583 OUT_RING(evo, format);
584 584
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 55c9663ef2b..f624c611dde 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1032,11 +1032,18 @@ nv50_display_irq_hotplug_bh(struct work_struct *work)
1032 struct drm_connector *connector; 1032 struct drm_connector *connector;
1033 const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; 1033 const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
1034 uint32_t unplug_mask, plug_mask, change_mask; 1034 uint32_t unplug_mask, plug_mask, change_mask;
1035 uint32_t hpd0, hpd1 = 0; 1035 uint32_t hpd0, hpd1;
1036 1036
1037 hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050); 1037 spin_lock_irq(&dev_priv->hpd_state.lock);
1038 hpd0 = dev_priv->hpd_state.hpd0_bits;
1039 dev_priv->hpd_state.hpd0_bits = 0;
1040 hpd1 = dev_priv->hpd_state.hpd1_bits;
1041 dev_priv->hpd_state.hpd1_bits = 0;
1042 spin_unlock_irq(&dev_priv->hpd_state.lock);
1043
1044 hpd0 &= nv_rd32(dev, 0xe050);
1038 if (dev_priv->chipset >= 0x90) 1045 if (dev_priv->chipset >= 0x90)
1039 hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070); 1046 hpd1 &= nv_rd32(dev, 0xe070);
1040 1047
1041 plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16); 1048 plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16);
1042 unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000); 1049 unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
@@ -1078,10 +1085,6 @@ nv50_display_irq_hotplug_bh(struct work_struct *work)
1078 helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF); 1085 helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
1079 } 1086 }
1080 1087
1081 nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
1082 if (dev_priv->chipset >= 0x90)
1083 nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
1084
1085 drm_helper_hpd_irq_event(dev); 1088 drm_helper_hpd_irq_event(dev);
1086} 1089}
1087 1090
@@ -1092,8 +1095,22 @@ nv50_display_irq_handler(struct drm_device *dev)
1092 uint32_t delayed = 0; 1095 uint32_t delayed = 0;
1093 1096
1094 if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) { 1097 if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) {
1095 if (!work_pending(&dev_priv->hpd_work)) 1098 uint32_t hpd0_bits, hpd1_bits = 0;
1096 queue_work(dev_priv->wq, &dev_priv->hpd_work); 1099
1100 hpd0_bits = nv_rd32(dev, 0xe054);
1101 nv_wr32(dev, 0xe054, hpd0_bits);
1102
1103 if (dev_priv->chipset >= 0x90) {
1104 hpd1_bits = nv_rd32(dev, 0xe074);
1105 nv_wr32(dev, 0xe074, hpd1_bits);
1106 }
1107
1108 spin_lock(&dev_priv->hpd_state.lock);
1109 dev_priv->hpd_state.hpd0_bits |= hpd0_bits;
1110 dev_priv->hpd_state.hpd1_bits |= hpd1_bits;
1111 spin_unlock(&dev_priv->hpd_state.lock);
1112
1113 queue_work(dev_priv->wq, &dev_priv->hpd_work);
1097 } 1114 }
1098 1115
1099 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { 1116 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index a46a961102f..1da65bd60c1 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -464,3 +464,8 @@ nv50_fifo_unload_context(struct drm_device *dev)
464 return 0; 464 return 0;
465} 465}
466 466
467void
468nv50_fifo_tlb_flush(struct drm_device *dev)
469{
470 nv50_vm_flush(dev, 5);
471}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index cbf5ae2f67d..8b669d0af61 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -402,3 +402,55 @@ struct nouveau_pgraph_object_class nv50_graph_grclass[] = {
402 { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */ 402 { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */
403 {} 403 {}
404}; 404};
405
406void
407nv50_graph_tlb_flush(struct drm_device *dev)
408{
409 nv50_vm_flush(dev, 0);
410}
411
412void
413nv86_graph_tlb_flush(struct drm_device *dev)
414{
415 struct drm_nouveau_private *dev_priv = dev->dev_private;
416 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
417 bool idle, timeout = false;
418 unsigned long flags;
419 u64 start;
420 u32 tmp;
421
422 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
423 nv_mask(dev, 0x400500, 0x00000001, 0x00000000);
424
425 start = ptimer->read(dev);
426 do {
427 idle = true;
428
429 for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) {
430 if ((tmp & 7) == 1)
431 idle = false;
432 }
433
434 for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) {
435 if ((tmp & 7) == 1)
436 idle = false;
437 }
438
439 for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) {
440 if ((tmp & 7) == 1)
441 idle = false;
442 }
443 } while (!idle && !(timeout = ptimer->read(dev) - start > 2000000000));
444
445 if (timeout) {
446 NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: "
447 "0x%08x 0x%08x 0x%08x 0x%08x\n",
448 nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380),
449 nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
450 }
451
452 nv50_vm_flush(dev, 0);
453
454 nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
455 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
456}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index a53fc974332..b773229b764 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -402,7 +402,6 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
402 } 402 }
403 dev_priv->engine.instmem.flush(dev); 403 dev_priv->engine.instmem.flush(dev);
404 404
405 nv50_vm_flush(dev, 4);
406 nv50_vm_flush(dev, 6); 405 nv50_vm_flush(dev, 6);
407 406
408 gpuobj->im_bound = 1; 407 gpuobj->im_bound = 1;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f12a5b3ec05..4dc5b4714c5 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1650,7 +1650,36 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1650 } 1650 }
1651 } 1651 }
1652 1652
1653 rdev->config.evergreen.tile_config = gb_addr_config; 1653 /* setup tiling info dword. gb_addr_config is not adequate since it does
1654 * not have bank info, so create a custom tiling dword.
1655 * bits 3:0 num_pipes
1656 * bits 7:4 num_banks
1657 * bits 11:8 group_size
1658 * bits 15:12 row_size
1659 */
1660 rdev->config.evergreen.tile_config = 0;
1661 switch (rdev->config.evergreen.max_tile_pipes) {
1662 case 1:
1663 default:
1664 rdev->config.evergreen.tile_config |= (0 << 0);
1665 break;
1666 case 2:
1667 rdev->config.evergreen.tile_config |= (1 << 0);
1668 break;
1669 case 4:
1670 rdev->config.evergreen.tile_config |= (2 << 0);
1671 break;
1672 case 8:
1673 rdev->config.evergreen.tile_config |= (3 << 0);
1674 break;
1675 }
1676 rdev->config.evergreen.tile_config |=
1677 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
1678 rdev->config.evergreen.tile_config |=
1679 ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
1680 rdev->config.evergreen.tile_config |=
1681 ((gb_addr_config & 0x30000000) >> 28) << 12;
1682
1654 WREG32(GB_BACKEND_MAP, gb_backend_map); 1683 WREG32(GB_BACKEND_MAP, gb_backend_map);
1655 WREG32(GB_ADDR_CONFIG, gb_addr_config); 1684 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1656 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 1685 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
@@ -2033,7 +2062,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
2033 u32 grbm_int_cntl = 0; 2062 u32 grbm_int_cntl = 0;
2034 2063
2035 if (!rdev->irq.installed) { 2064 if (!rdev->irq.installed) {
2036 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 2065 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2037 return -EINVAL; 2066 return -EINVAL;
2038 } 2067 }
2039 /* don't enable anything if the ih is disabled */ 2068 /* don't enable anything if the ih is disabled */
@@ -2295,6 +2324,7 @@ restart_ih:
2295 case 0: /* D1 vblank */ 2324 case 0: /* D1 vblank */
2296 if (disp_int & LB_D1_VBLANK_INTERRUPT) { 2325 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
2297 drm_handle_vblank(rdev->ddev, 0); 2326 drm_handle_vblank(rdev->ddev, 0);
2327 rdev->pm.vblank_sync = true;
2298 wake_up(&rdev->irq.vblank_queue); 2328 wake_up(&rdev->irq.vblank_queue);
2299 disp_int &= ~LB_D1_VBLANK_INTERRUPT; 2329 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2300 DRM_DEBUG("IH: D1 vblank\n"); 2330 DRM_DEBUG("IH: D1 vblank\n");
@@ -2316,6 +2346,7 @@ restart_ih:
2316 case 0: /* D2 vblank */ 2346 case 0: /* D2 vblank */
2317 if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 2347 if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
2318 drm_handle_vblank(rdev->ddev, 1); 2348 drm_handle_vblank(rdev->ddev, 1);
2349 rdev->pm.vblank_sync = true;
2319 wake_up(&rdev->irq.vblank_queue); 2350 wake_up(&rdev->irq.vblank_queue);
2320 disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; 2351 disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
2321 DRM_DEBUG("IH: D2 vblank\n"); 2352 DRM_DEBUG("IH: D2 vblank\n");
@@ -2337,6 +2368,7 @@ restart_ih:
2337 case 0: /* D3 vblank */ 2368 case 0: /* D3 vblank */
2338 if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 2369 if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
2339 drm_handle_vblank(rdev->ddev, 2); 2370 drm_handle_vblank(rdev->ddev, 2);
2371 rdev->pm.vblank_sync = true;
2340 wake_up(&rdev->irq.vblank_queue); 2372 wake_up(&rdev->irq.vblank_queue);
2341 disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; 2373 disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
2342 DRM_DEBUG("IH: D3 vblank\n"); 2374 DRM_DEBUG("IH: D3 vblank\n");
@@ -2358,6 +2390,7 @@ restart_ih:
2358 case 0: /* D4 vblank */ 2390 case 0: /* D4 vblank */
2359 if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 2391 if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
2360 drm_handle_vblank(rdev->ddev, 3); 2392 drm_handle_vblank(rdev->ddev, 3);
2393 rdev->pm.vblank_sync = true;
2361 wake_up(&rdev->irq.vblank_queue); 2394 wake_up(&rdev->irq.vblank_queue);
2362 disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; 2395 disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
2363 DRM_DEBUG("IH: D4 vblank\n"); 2396 DRM_DEBUG("IH: D4 vblank\n");
@@ -2379,6 +2412,7 @@ restart_ih:
2379 case 0: /* D5 vblank */ 2412 case 0: /* D5 vblank */
2380 if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 2413 if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
2381 drm_handle_vblank(rdev->ddev, 4); 2414 drm_handle_vblank(rdev->ddev, 4);
2415 rdev->pm.vblank_sync = true;
2382 wake_up(&rdev->irq.vblank_queue); 2416 wake_up(&rdev->irq.vblank_queue);
2383 disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; 2417 disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
2384 DRM_DEBUG("IH: D5 vblank\n"); 2418 DRM_DEBUG("IH: D5 vblank\n");
@@ -2400,6 +2434,7 @@ restart_ih:
2400 case 0: /* D6 vblank */ 2434 case 0: /* D6 vblank */
2401 if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 2435 if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
2402 drm_handle_vblank(rdev->ddev, 5); 2436 drm_handle_vblank(rdev->ddev, 5);
2437 rdev->pm.vblank_sync = true;
2403 wake_up(&rdev->irq.vblank_queue); 2438 wake_up(&rdev->irq.vblank_queue);
2404 disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; 2439 disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
2405 DRM_DEBUG("IH: D6 vblank\n"); 2440 DRM_DEBUG("IH: D6 vblank\n");
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index ac3b6dde23d..e0e590110dd 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -459,7 +459,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
459 obj_size += evergreen_ps_size * 4; 459 obj_size += evergreen_ps_size * 4;
460 obj_size = ALIGN(obj_size, 256); 460 obj_size = ALIGN(obj_size, 256);
461 461
462 r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM, 462 r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
463 &rdev->r600_blit.shader_obj); 463 &rdev->r600_blit.shader_obj);
464 if (r) { 464 if (r) {
465 DRM_ERROR("evergreen failed to allocate shader\n"); 465 DRM_ERROR("evergreen failed to allocate shader\n");
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 0e8f28a6892..8e10aa9f74b 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -442,7 +442,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
442 int r; 442 int r;
443 443
444 if (rdev->gart.table.ram.ptr) { 444 if (rdev->gart.table.ram.ptr) {
445 WARN(1, "R100 PCI GART already initialized.\n"); 445 WARN(1, "R100 PCI GART already initialized\n");
446 return 0; 446 return 0;
447 } 447 }
448 /* Initialize common gart structure */ 448 /* Initialize common gart structure */
@@ -516,7 +516,7 @@ int r100_irq_set(struct radeon_device *rdev)
516 uint32_t tmp = 0; 516 uint32_t tmp = 0;
517 517
518 if (!rdev->irq.installed) { 518 if (!rdev->irq.installed) {
519 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 519 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
520 WREG32(R_000040_GEN_INT_CNTL, 0); 520 WREG32(R_000040_GEN_INT_CNTL, 0);
521 return -EINVAL; 521 return -EINVAL;
522 } 522 }
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 34527e600fe..cde1d3480d9 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -91,7 +91,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
91 int r; 91 int r;
92 92
93 if (rdev->gart.table.vram.robj) { 93 if (rdev->gart.table.vram.robj) {
94 WARN(1, "RV370 PCIE GART already initialized.\n"); 94 WARN(1, "RV370 PCIE GART already initialized\n");
95 return 0; 95 return 0;
96 } 96 }
97 /* Initialize common gart structure */ 97 /* Initialize common gart structure */
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 33952a12f0a..a3552594ccc 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -97,14 +97,8 @@ u32 rv6xx_get_temp(struct radeon_device *rdev)
97{ 97{
98 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> 98 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
99 ASIC_T_SHIFT; 99 ASIC_T_SHIFT;
100 u32 actual_temp = 0;
101 100
102 if ((temp >> 7) & 1) 101 return temp * 1000;
103 actual_temp = 0;
104 else
105 actual_temp = (temp >> 1) & 0xff;
106
107 return actual_temp * 1000;
108} 102}
109 103
110void r600_pm_get_dynpm_state(struct radeon_device *rdev) 104void r600_pm_get_dynpm_state(struct radeon_device *rdev)
@@ -919,7 +913,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev)
919 int r; 913 int r;
920 914
921 if (rdev->gart.table.vram.robj) { 915 if (rdev->gart.table.vram.robj) {
922 WARN(1, "R600 PCIE GART already initialized.\n"); 916 WARN(1, "R600 PCIE GART already initialized\n");
923 return 0; 917 return 0;
924 } 918 }
925 /* Initialize common gart structure */ 919 /* Initialize common gart structure */
@@ -2724,7 +2718,7 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev)
2724 /* Allocate ring buffer */ 2718 /* Allocate ring buffer */
2725 if (rdev->ih.ring_obj == NULL) { 2719 if (rdev->ih.ring_obj == NULL) {
2726 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, 2720 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
2727 true, 2721 PAGE_SIZE, true,
2728 RADEON_GEM_DOMAIN_GTT, 2722 RADEON_GEM_DOMAIN_GTT,
2729 &rdev->ih.ring_obj); 2723 &rdev->ih.ring_obj);
2730 if (r) { 2724 if (r) {
@@ -2995,7 +2989,7 @@ int r600_irq_set(struct radeon_device *rdev)
2995 u32 hdmi1, hdmi2; 2989 u32 hdmi1, hdmi2;
2996 2990
2997 if (!rdev->irq.installed) { 2991 if (!rdev->irq.installed) {
2998 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 2992 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2999 return -EINVAL; 2993 return -EINVAL;
3000 } 2994 }
3001 /* don't enable anything if the ih is disabled */ 2995 /* don't enable anything if the ih is disabled */
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 8362974ef41..86e5aa07f0d 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -501,7 +501,7 @@ int r600_blit_init(struct radeon_device *rdev)
501 obj_size += r6xx_ps_size * 4; 501 obj_size += r6xx_ps_size * 4;
502 obj_size = ALIGN(obj_size, 256); 502 obj_size = ALIGN(obj_size, 256);
503 503
504 r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM, 504 r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
505 &rdev->r600_blit.shader_obj); 505 &rdev->r600_blit.shader_obj);
506 if (r) { 506 if (r) {
507 DRM_ERROR("r600 failed to allocate shader\n"); 507 DRM_ERROR("r600 failed to allocate shader\n");
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 37cc2aa9f92..9bebac1ec00 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -50,6 +50,7 @@ struct r600_cs_track {
50 u32 nsamples; 50 u32 nsamples;
51 u32 cb_color_base_last[8]; 51 u32 cb_color_base_last[8];
52 struct radeon_bo *cb_color_bo[8]; 52 struct radeon_bo *cb_color_bo[8];
53 u64 cb_color_bo_mc[8];
53 u32 cb_color_bo_offset[8]; 54 u32 cb_color_bo_offset[8];
54 struct radeon_bo *cb_color_frag_bo[8]; 55 struct radeon_bo *cb_color_frag_bo[8];
55 struct radeon_bo *cb_color_tile_bo[8]; 56 struct radeon_bo *cb_color_tile_bo[8];
@@ -67,6 +68,7 @@ struct r600_cs_track {
67 u32 db_depth_size; 68 u32 db_depth_size;
68 u32 db_offset; 69 u32 db_offset;
69 struct radeon_bo *db_bo; 70 struct radeon_bo *db_bo;
71 u64 db_bo_mc;
70}; 72};
71 73
72static inline int r600_bpe_from_format(u32 *bpe, u32 format) 74static inline int r600_bpe_from_format(u32 *bpe, u32 format)
@@ -140,6 +142,68 @@ static inline int r600_bpe_from_format(u32 *bpe, u32 format)
140 return 0; 142 return 0;
141} 143}
142 144
145struct array_mode_checker {
146 int array_mode;
147 u32 group_size;
148 u32 nbanks;
149 u32 npipes;
150 u32 nsamples;
151 u32 bpe;
152};
153
154/* returns alignment in pixels for pitch/height/depth and bytes for base */
155static inline int r600_get_array_mode_alignment(struct array_mode_checker *values,
156 u32 *pitch_align,
157 u32 *height_align,
158 u32 *depth_align,
159 u64 *base_align)
160{
161 u32 tile_width = 8;
162 u32 tile_height = 8;
163 u32 macro_tile_width = values->nbanks;
164 u32 macro_tile_height = values->npipes;
165 u32 tile_bytes = tile_width * tile_height * values->bpe * values->nsamples;
166 u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
167
168 switch (values->array_mode) {
169 case ARRAY_LINEAR_GENERAL:
170 /* technically tile_width/_height for pitch/height */
171 *pitch_align = 1; /* tile_width */
172 *height_align = 1; /* tile_height */
173 *depth_align = 1;
174 *base_align = 1;
175 break;
176 case ARRAY_LINEAR_ALIGNED:
177 *pitch_align = max((u32)64, (u32)(values->group_size / values->bpe));
178 *height_align = tile_height;
179 *depth_align = 1;
180 *base_align = values->group_size;
181 break;
182 case ARRAY_1D_TILED_THIN1:
183 *pitch_align = max((u32)tile_width,
184 (u32)(values->group_size /
185 (tile_height * values->bpe * values->nsamples)));
186 *height_align = tile_height;
187 *depth_align = 1;
188 *base_align = values->group_size;
189 break;
190 case ARRAY_2D_TILED_THIN1:
191 *pitch_align = max((u32)macro_tile_width,
192 (u32)(((values->group_size / tile_height) /
193 (values->bpe * values->nsamples)) *
194 values->nbanks)) * tile_width;
195 *height_align = macro_tile_height * tile_height;
196 *depth_align = 1;
197 *base_align = max(macro_tile_bytes,
198 (*pitch_align) * values->bpe * (*height_align) * values->nsamples);
199 break;
200 default:
201 return -EINVAL;
202 }
203
204 return 0;
205}
206
143static void r600_cs_track_init(struct r600_cs_track *track) 207static void r600_cs_track_init(struct r600_cs_track *track)
144{ 208{
145 int i; 209 int i;
@@ -153,10 +217,12 @@ static void r600_cs_track_init(struct r600_cs_track *track)
153 track->cb_color_info[i] = 0; 217 track->cb_color_info[i] = 0;
154 track->cb_color_bo[i] = NULL; 218 track->cb_color_bo[i] = NULL;
155 track->cb_color_bo_offset[i] = 0xFFFFFFFF; 219 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
220 track->cb_color_bo_mc[i] = 0xFFFFFFFF;
156 } 221 }
157 track->cb_target_mask = 0xFFFFFFFF; 222 track->cb_target_mask = 0xFFFFFFFF;
158 track->cb_shader_mask = 0xFFFFFFFF; 223 track->cb_shader_mask = 0xFFFFFFFF;
159 track->db_bo = NULL; 224 track->db_bo = NULL;
225 track->db_bo_mc = 0xFFFFFFFF;
160 /* assume the biggest format and that htile is enabled */ 226 /* assume the biggest format and that htile is enabled */
161 track->db_depth_info = 7 | (1 << 25); 227 track->db_depth_info = 7 | (1 << 25);
162 track->db_depth_view = 0xFFFFC000; 228 track->db_depth_view = 0xFFFFC000;
@@ -168,7 +234,10 @@ static void r600_cs_track_init(struct r600_cs_track *track)
168static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) 234static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
169{ 235{
170 struct r600_cs_track *track = p->track; 236 struct r600_cs_track *track = p->track;
171 u32 bpe = 0, pitch, slice_tile_max, size, tmp, height, pitch_align; 237 u32 bpe = 0, slice_tile_max, size, tmp;
238 u32 height, height_align, pitch, pitch_align, depth_align;
239 u64 base_offset, base_align;
240 struct array_mode_checker array_check;
172 volatile u32 *ib = p->ib->ptr; 241 volatile u32 *ib = p->ib->ptr;
173 unsigned array_mode; 242 unsigned array_mode;
174 243
@@ -183,60 +252,40 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
183 i, track->cb_color_info[i]); 252 i, track->cb_color_info[i]);
184 return -EINVAL; 253 return -EINVAL;
185 } 254 }
186 /* pitch is the number of 8x8 tiles per row */ 255 /* pitch in pixels */
187 pitch = G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1; 256 pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
188 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; 257 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
189 slice_tile_max *= 64; 258 slice_tile_max *= 64;
190 height = slice_tile_max / (pitch * 8); 259 height = slice_tile_max / pitch;
191 if (height > 8192) 260 if (height > 8192)
192 height = 8192; 261 height = 8192;
193 array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); 262 array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
263
264 base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
265 array_check.array_mode = array_mode;
266 array_check.group_size = track->group_size;
267 array_check.nbanks = track->nbanks;
268 array_check.npipes = track->npipes;
269 array_check.nsamples = track->nsamples;
270 array_check.bpe = bpe;
271 if (r600_get_array_mode_alignment(&array_check,
272 &pitch_align, &height_align, &depth_align, &base_align)) {
273 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
274 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
275 track->cb_color_info[i]);
276 return -EINVAL;
277 }
194 switch (array_mode) { 278 switch (array_mode) {
195 case V_0280A0_ARRAY_LINEAR_GENERAL: 279 case V_0280A0_ARRAY_LINEAR_GENERAL:
196 /* technically height & 0x7 */
197 break; 280 break;
198 case V_0280A0_ARRAY_LINEAR_ALIGNED: 281 case V_0280A0_ARRAY_LINEAR_ALIGNED:
199 pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8;
200 if (!IS_ALIGNED(pitch, pitch_align)) {
201 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
202 __func__, __LINE__, pitch);
203 return -EINVAL;
204 }
205 if (!IS_ALIGNED(height, 8)) {
206 dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
207 __func__, __LINE__, height);
208 return -EINVAL;
209 }
210 break; 282 break;
211 case V_0280A0_ARRAY_1D_TILED_THIN1: 283 case V_0280A0_ARRAY_1D_TILED_THIN1:
212 pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe * track->nsamples))) / 8;
213 if (!IS_ALIGNED(pitch, pitch_align)) {
214 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
215 __func__, __LINE__, pitch);
216 return -EINVAL;
217 }
218 /* avoid breaking userspace */ 284 /* avoid breaking userspace */
219 if (height > 7) 285 if (height > 7)
220 height &= ~0x7; 286 height &= ~0x7;
221 if (!IS_ALIGNED(height, 8)) {
222 dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
223 __func__, __LINE__, height);
224 return -EINVAL;
225 }
226 break; 287 break;
227 case V_0280A0_ARRAY_2D_TILED_THIN1: 288 case V_0280A0_ARRAY_2D_TILED_THIN1:
228 pitch_align = max((u32)track->nbanks,
229 (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks)) / 8;
230 if (!IS_ALIGNED(pitch, pitch_align)) {
231 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
232 __func__, __LINE__, pitch);
233 return -EINVAL;
234 }
235 if (!IS_ALIGNED((height / 8), track->npipes)) {
236 dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
237 __func__, __LINE__, height);
238 return -EINVAL;
239 }
240 break; 289 break;
241 default: 290 default:
242 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, 291 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
@@ -244,13 +293,29 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
244 track->cb_color_info[i]); 293 track->cb_color_info[i]);
245 return -EINVAL; 294 return -EINVAL;
246 } 295 }
296
297 if (!IS_ALIGNED(pitch, pitch_align)) {
298 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
299 __func__, __LINE__, pitch);
300 return -EINVAL;
301 }
302 if (!IS_ALIGNED(height, height_align)) {
303 dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
304 __func__, __LINE__, height);
305 return -EINVAL;
306 }
307 if (!IS_ALIGNED(base_offset, base_align)) {
308 dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
309 return -EINVAL;
310 }
311
247 /* check offset */ 312 /* check offset */
248 tmp = height * pitch * 8 * bpe; 313 tmp = height * pitch * bpe;
249 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { 314 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
250 if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { 315 if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
251 /* the initial DDX does bad things with the CB size occasionally */ 316 /* the initial DDX does bad things with the CB size occasionally */
252 /* it rounds up height too far for slice tile max but the BO is smaller */ 317 /* it rounds up height too far for slice tile max but the BO is smaller */
253 tmp = (height - 7) * 8 * bpe; 318 tmp = (height - 7) * pitch * bpe;
254 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { 319 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
255 dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); 320 dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
256 return -EINVAL; 321 return -EINVAL;
@@ -260,15 +325,11 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
260 return -EINVAL; 325 return -EINVAL;
261 } 326 }
262 } 327 }
263 if (!IS_ALIGNED(track->cb_color_bo_offset[i], track->group_size)) {
264 dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->cb_color_bo_offset[i]);
265 return -EINVAL;
266 }
267 /* limit max tile */ 328 /* limit max tile */
268 tmp = (height * pitch * 8) >> 6; 329 tmp = (height * pitch) >> 6;
269 if (tmp < slice_tile_max) 330 if (tmp < slice_tile_max)
270 slice_tile_max = tmp; 331 slice_tile_max = tmp;
271 tmp = S_028060_PITCH_TILE_MAX(pitch - 1) | 332 tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
272 S_028060_SLICE_TILE_MAX(slice_tile_max - 1); 333 S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
273 ib[track->cb_color_size_idx[i]] = tmp; 334 ib[track->cb_color_size_idx[i]] = tmp;
274 return 0; 335 return 0;
@@ -310,7 +371,12 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
310 /* Check depth buffer */ 371 /* Check depth buffer */
311 if (G_028800_STENCIL_ENABLE(track->db_depth_control) || 372 if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
312 G_028800_Z_ENABLE(track->db_depth_control)) { 373 G_028800_Z_ENABLE(track->db_depth_control)) {
313 u32 nviews, bpe, ntiles, pitch, pitch_align, height, size, slice_tile_max; 374 u32 nviews, bpe, ntiles, size, slice_tile_max;
375 u32 height, height_align, pitch, pitch_align, depth_align;
376 u64 base_offset, base_align;
377 struct array_mode_checker array_check;
378 int array_mode;
379
314 if (track->db_bo == NULL) { 380 if (track->db_bo == NULL) {
315 dev_warn(p->dev, "z/stencil with no depth buffer\n"); 381 dev_warn(p->dev, "z/stencil with no depth buffer\n");
316 return -EINVAL; 382 return -EINVAL;
@@ -353,41 +419,34 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
353 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); 419 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
354 } else { 420 } else {
355 size = radeon_bo_size(track->db_bo); 421 size = radeon_bo_size(track->db_bo);
356 pitch = G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1; 422 /* pitch in pixels */
423 pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
357 slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 424 slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
358 slice_tile_max *= 64; 425 slice_tile_max *= 64;
359 height = slice_tile_max / (pitch * 8); 426 height = slice_tile_max / pitch;
360 if (height > 8192) 427 if (height > 8192)
361 height = 8192; 428 height = 8192;
362 switch (G_028010_ARRAY_MODE(track->db_depth_info)) { 429 base_offset = track->db_bo_mc + track->db_offset;
430 array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
431 array_check.array_mode = array_mode;
432 array_check.group_size = track->group_size;
433 array_check.nbanks = track->nbanks;
434 array_check.npipes = track->npipes;
435 array_check.nsamples = track->nsamples;
436 array_check.bpe = bpe;
437 if (r600_get_array_mode_alignment(&array_check,
438 &pitch_align, &height_align, &depth_align, &base_align)) {
439 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
440 G_028010_ARRAY_MODE(track->db_depth_info),
441 track->db_depth_info);
442 return -EINVAL;
443 }
444 switch (array_mode) {
363 case V_028010_ARRAY_1D_TILED_THIN1: 445 case V_028010_ARRAY_1D_TILED_THIN1:
364 pitch_align = (max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8);
365 if (!IS_ALIGNED(pitch, pitch_align)) {
366 dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
367 __func__, __LINE__, pitch);
368 return -EINVAL;
369 }
370 /* don't break userspace */ 446 /* don't break userspace */
371 height &= ~0x7; 447 height &= ~0x7;
372 if (!IS_ALIGNED(height, 8)) {
373 dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
374 __func__, __LINE__, height);
375 return -EINVAL;
376 }
377 break; 448 break;
378 case V_028010_ARRAY_2D_TILED_THIN1: 449 case V_028010_ARRAY_2D_TILED_THIN1:
379 pitch_align = max((u32)track->nbanks,
380 (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8;
381 if (!IS_ALIGNED(pitch, pitch_align)) {
382 dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
383 __func__, __LINE__, pitch);
384 return -EINVAL;
385 }
386 if (!IS_ALIGNED((height / 8), track->npipes)) {
387 dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
388 __func__, __LINE__, height);
389 return -EINVAL;
390 }
391 break; 450 break;
392 default: 451 default:
393 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, 452 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
@@ -395,15 +454,27 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
395 track->db_depth_info); 454 track->db_depth_info);
396 return -EINVAL; 455 return -EINVAL;
397 } 456 }
398 if (!IS_ALIGNED(track->db_offset, track->group_size)) { 457
399 dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->db_offset); 458 if (!IS_ALIGNED(pitch, pitch_align)) {
459 dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
460 __func__, __LINE__, pitch);
461 return -EINVAL;
462 }
463 if (!IS_ALIGNED(height, height_align)) {
464 dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
465 __func__, __LINE__, height);
400 return -EINVAL; 466 return -EINVAL;
401 } 467 }
468 if (!IS_ALIGNED(base_offset, base_align)) {
469 dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
470 return -EINVAL;
471 }
472
402 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 473 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
403 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; 474 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
404 tmp = ntiles * bpe * 64 * nviews; 475 tmp = ntiles * bpe * 64 * nviews;
405 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { 476 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
406 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %d have %ld)\n", 477 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n",
407 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, 478 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
408 radeon_bo_size(track->db_bo)); 479 radeon_bo_size(track->db_bo));
409 return -EINVAL; 480 return -EINVAL;
@@ -954,6 +1025,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
954 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1025 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
955 track->cb_color_base_last[tmp] = ib[idx]; 1026 track->cb_color_base_last[tmp] = ib[idx];
956 track->cb_color_bo[tmp] = reloc->robj; 1027 track->cb_color_bo[tmp] = reloc->robj;
1028 track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
957 break; 1029 break;
958 case DB_DEPTH_BASE: 1030 case DB_DEPTH_BASE:
959 r = r600_cs_packet_next_reloc(p, &reloc); 1031 r = r600_cs_packet_next_reloc(p, &reloc);
@@ -965,6 +1037,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
965 track->db_offset = radeon_get_ib_value(p, idx) << 8; 1037 track->db_offset = radeon_get_ib_value(p, idx) << 8;
966 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1038 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
967 track->db_bo = reloc->robj; 1039 track->db_bo = reloc->robj;
1040 track->db_bo_mc = reloc->lobj.gpu_offset;
968 break; 1041 break;
969 case DB_HTILE_DATA_BASE: 1042 case DB_HTILE_DATA_BASE:
970 case SQ_PGM_START_FS: 1043 case SQ_PGM_START_FS:
@@ -1086,16 +1159,25 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels
1086static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, 1159static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
1087 struct radeon_bo *texture, 1160 struct radeon_bo *texture,
1088 struct radeon_bo *mipmap, 1161 struct radeon_bo *mipmap,
1162 u64 base_offset,
1163 u64 mip_offset,
1089 u32 tiling_flags) 1164 u32 tiling_flags)
1090{ 1165{
1091 struct r600_cs_track *track = p->track; 1166 struct r600_cs_track *track = p->track;
1092 u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0; 1167 u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0;
1093 u32 word0, word1, l0_size, mipmap_size, pitch, pitch_align; 1168 u32 word0, word1, l0_size, mipmap_size;
1169 u32 height_align, pitch, pitch_align, depth_align;
1170 u64 base_align;
1171 struct array_mode_checker array_check;
1094 1172
1095 /* on legacy kernel we don't perform advanced check */ 1173 /* on legacy kernel we don't perform advanced check */
1096 if (p->rdev == NULL) 1174 if (p->rdev == NULL)
1097 return 0; 1175 return 0;
1098 1176
1177 /* convert to bytes */
1178 base_offset <<= 8;
1179 mip_offset <<= 8;
1180
1099 word0 = radeon_get_ib_value(p, idx + 0); 1181 word0 = radeon_get_ib_value(p, idx + 0);
1100 if (tiling_flags & RADEON_TILING_MACRO) 1182 if (tiling_flags & RADEON_TILING_MACRO)
1101 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1183 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
@@ -1128,46 +1210,38 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
1128 return -EINVAL; 1210 return -EINVAL;
1129 } 1211 }
1130 1212
1131 pitch = G_038000_PITCH(word0) + 1; 1213 /* pitch in texels */
1132 switch (G_038000_TILE_MODE(word0)) { 1214 pitch = (G_038000_PITCH(word0) + 1) * 8;
1133 case V_038000_ARRAY_LINEAR_GENERAL: 1215 array_check.array_mode = G_038000_TILE_MODE(word0);
1134 pitch_align = 1; 1216 array_check.group_size = track->group_size;
1135 /* XXX check height align */ 1217 array_check.nbanks = track->nbanks;
1136 break; 1218 array_check.npipes = track->npipes;
1137 case V_038000_ARRAY_LINEAR_ALIGNED: 1219 array_check.nsamples = 1;
1138 pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8; 1220 array_check.bpe = bpe;
1139 if (!IS_ALIGNED(pitch, pitch_align)) { 1221 if (r600_get_array_mode_alignment(&array_check,
1140 dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", 1222 &pitch_align, &height_align, &depth_align, &base_align)) {
1141 __func__, __LINE__, pitch); 1223 dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
1142 return -EINVAL; 1224 __func__, __LINE__, G_038000_TILE_MODE(word0));
1143 } 1225 return -EINVAL;
1144 /* XXX check height align */ 1226 }
1145 break; 1227
1146 case V_038000_ARRAY_1D_TILED_THIN1: 1228 /* XXX check height as well... */
1147 pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8; 1229
1148 if (!IS_ALIGNED(pitch, pitch_align)) { 1230 if (!IS_ALIGNED(pitch, pitch_align)) {
1149 dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", 1231 dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
1150 __func__, __LINE__, pitch); 1232 __func__, __LINE__, pitch);
1151 return -EINVAL; 1233 return -EINVAL;
1152 } 1234 }
1153 /* XXX check height align */ 1235 if (!IS_ALIGNED(base_offset, base_align)) {
1154 break; 1236 dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n",
1155 case V_038000_ARRAY_2D_TILED_THIN1: 1237 __func__, __LINE__, base_offset);
1156 pitch_align = max((u32)track->nbanks, 1238 return -EINVAL;
1157 (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8; 1239 }
1158 if (!IS_ALIGNED(pitch, pitch_align)) { 1240 if (!IS_ALIGNED(mip_offset, base_align)) {
1159 dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", 1241 dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n",
1160 __func__, __LINE__, pitch); 1242 __func__, __LINE__, mip_offset);
1161 return -EINVAL;
1162 }
1163 /* XXX check height align */
1164 break;
1165 default:
1166 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
1167 G_038000_TILE_MODE(word0), word0);
1168 return -EINVAL; 1243 return -EINVAL;
1169 } 1244 }
1170 /* XXX check offset align */
1171 1245
1172 word0 = radeon_get_ib_value(p, idx + 4); 1246 word0 = radeon_get_ib_value(p, idx + 4);
1173 word1 = radeon_get_ib_value(p, idx + 5); 1247 word1 = radeon_get_ib_value(p, idx + 5);
@@ -1402,7 +1476,10 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1402 mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1476 mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1403 mipmap = reloc->robj; 1477 mipmap = reloc->robj;
1404 r = r600_check_texture_resource(p, idx+(i*7)+1, 1478 r = r600_check_texture_resource(p, idx+(i*7)+1,
1405 texture, mipmap, reloc->lobj.tiling_flags); 1479 texture, mipmap,
1480 base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
1481 mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
1482 reloc->lobj.tiling_flags);
1406 if (r) 1483 if (r)
1407 return r; 1484 return r;
1408 ib[idx+1+(i*7)+2] += base_offset; 1485 ib[idx+1+(i*7)+2] += base_offset;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 966a793e225..bff4dc4f410 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -51,6 +51,12 @@
51#define PTE_READABLE (1 << 5) 51#define PTE_READABLE (1 << 5)
52#define PTE_WRITEABLE (1 << 6) 52#define PTE_WRITEABLE (1 << 6)
53 53
54/* tiling bits */
55#define ARRAY_LINEAR_GENERAL 0x00000000
56#define ARRAY_LINEAR_ALIGNED 0x00000001
57#define ARRAY_1D_TILED_THIN1 0x00000002
58#define ARRAY_2D_TILED_THIN1 0x00000004
59
54/* Registers */ 60/* Registers */
55#define ARB_POP 0x2418 61#define ARB_POP 0x2418
56#define ENABLE_TC128 (1 << 30) 62#define ENABLE_TC128 (1 << 30)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 73f600d39ad..3a7095743d4 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1262,6 +1262,10 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
1262 (rdev->family == CHIP_RS400) || \ 1262 (rdev->family == CHIP_RS400) || \
1263 (rdev->family == CHIP_RS480)) 1263 (rdev->family == CHIP_RS480))
1264#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) 1264#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
1265#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \
1266 (rdev->family == CHIP_RS690) || \
1267 (rdev->family == CHIP_RS740) || \
1268 (rdev->family >= CHIP_R600))
1265#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) 1269#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
1266#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) 1270#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
1267#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) 1271#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 04cac7ec903..87ead090c7d 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -526,8 +526,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
526 if (crev < 2) 526 if (crev < 2)
527 return false; 527 return false;
528 528
529 router.valid = false;
530
531 obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); 529 obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
532 path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) 530 path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
533 (ctx->bios + data_offset + 531 (ctx->bios + data_offset +
@@ -624,6 +622,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
624 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 622 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
625 continue; 623 continue;
626 624
625 router.ddc_valid = false;
626 router.cd_valid = false;
627 for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { 627 for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
628 uint8_t grph_obj_id, grph_obj_num, grph_obj_type; 628 uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
629 629
@@ -647,9 +647,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
647 usDeviceTag)); 647 usDeviceTag));
648 648
649 } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { 649 } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
650 router.valid = false;
651 for (k = 0; k < router_obj->ucNumberOfObjects; k++) { 650 for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
652 u16 router_obj_id = le16_to_cpu(router_obj->asObjects[j].usObjectID); 651 u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
653 if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) { 652 if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
654 ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) 653 ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
655 (ctx->bios + data_offset + 654 (ctx->bios + data_offset +
@@ -657,6 +656,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
657 ATOM_I2C_RECORD *i2c_record; 656 ATOM_I2C_RECORD *i2c_record;
658 ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; 657 ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
659 ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path; 658 ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
659 ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path;
660 ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table = 660 ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
661 (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) 661 (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
662 (ctx->bios + data_offset + 662 (ctx->bios + data_offset +
@@ -690,10 +690,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
690 case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE: 690 case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
691 ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *) 691 ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
692 record; 692 record;
693 router.valid = true; 693 router.ddc_valid = true;
694 router.mux_type = ddc_path->ucMuxType; 694 router.ddc_mux_type = ddc_path->ucMuxType;
695 router.mux_control_pin = ddc_path->ucMuxControlPin; 695 router.ddc_mux_control_pin = ddc_path->ucMuxControlPin;
696 router.mux_state = ddc_path->ucMuxState[enum_id]; 696 router.ddc_mux_state = ddc_path->ucMuxState[enum_id];
697 break;
698 case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE:
699 cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *)
700 record;
701 router.cd_valid = true;
702 router.cd_mux_type = cd_path->ucMuxType;
703 router.cd_mux_control_pin = cd_path->ucMuxControlPin;
704 router.cd_mux_state = cd_path->ucMuxState[enum_id];
697 break; 705 break;
698 } 706 }
699 record = (ATOM_COMMON_RECORD_HEADER *) 707 record = (ATOM_COMMON_RECORD_HEADER *)
@@ -860,7 +868,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
860 size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; 868 size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
861 struct radeon_router router; 869 struct radeon_router router;
862 870
863 router.valid = false; 871 router.ddc_valid = false;
872 router.cd_valid = false;
864 873
865 bios_connectors = kzalloc(bc_size, GFP_KERNEL); 874 bios_connectors = kzalloc(bc_size, GFP_KERNEL);
866 if (!bios_connectors) 875 if (!bios_connectors)
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 7932dc4d6b9..c558685cc63 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -41,7 +41,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
41 41
42 size = bsize; 42 size = bsize;
43 n = 1024; 43 n = 1024;
44 r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj); 44 r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, sdomain, &sobj);
45 if (r) { 45 if (r) {
46 goto out_cleanup; 46 goto out_cleanup;
47 } 47 }
@@ -53,7 +53,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
53 if (r) { 53 if (r) {
54 goto out_cleanup; 54 goto out_cleanup;
55 } 55 }
56 r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj); 56 r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, ddomain, &dobj);
57 if (r) { 57 if (r) {
58 goto out_cleanup; 58 goto out_cleanup;
59 } 59 }
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 7b7ea269549..3bddea5b529 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -571,6 +571,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
571 } 571 }
572 572
573 if (clk_mask && data_mask) { 573 if (clk_mask && data_mask) {
574 /* system specific masks */
574 i2c.mask_clk_mask = clk_mask; 575 i2c.mask_clk_mask = clk_mask;
575 i2c.mask_data_mask = data_mask; 576 i2c.mask_data_mask = data_mask;
576 i2c.a_clk_mask = clk_mask; 577 i2c.a_clk_mask = clk_mask;
@@ -579,7 +580,19 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
579 i2c.en_data_mask = data_mask; 580 i2c.en_data_mask = data_mask;
580 i2c.y_clk_mask = clk_mask; 581 i2c.y_clk_mask = clk_mask;
581 i2c.y_data_mask = data_mask; 582 i2c.y_data_mask = data_mask;
583 } else if ((ddc_line == RADEON_GPIOPAD_MASK) ||
584 (ddc_line == RADEON_MDGPIO_MASK)) {
585 /* default gpiopad masks */
586 i2c.mask_clk_mask = (0x20 << 8);
587 i2c.mask_data_mask = 0x80;
588 i2c.a_clk_mask = (0x20 << 8);
589 i2c.a_data_mask = 0x80;
590 i2c.en_clk_mask = (0x20 << 8);
591 i2c.en_data_mask = 0x80;
592 i2c.y_clk_mask = (0x20 << 8);
593 i2c.y_data_mask = 0x80;
582 } else { 594 } else {
595 /* default masks for ddc pads */
583 i2c.mask_clk_mask = RADEON_GPIO_EN_1; 596 i2c.mask_clk_mask = RADEON_GPIO_EN_1;
584 i2c.mask_data_mask = RADEON_GPIO_EN_0; 597 i2c.mask_data_mask = RADEON_GPIO_EN_0;
585 i2c.a_clk_mask = RADEON_GPIO_A_1; 598 i2c.a_clk_mask = RADEON_GPIO_A_1;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 4dac4b0a02e..3bef9f6d66f 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -183,13 +183,13 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
183 continue; 183 continue;
184 184
185 if (priority == true) { 185 if (priority == true) {
186 DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); 186 DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
187 DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); 187 DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector));
188 conflict->status = connector_status_disconnected; 188 conflict->status = connector_status_disconnected;
189 radeon_connector_update_scratch_regs(conflict, connector_status_disconnected); 189 radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
190 } else { 190 } else {
191 DRM_INFO("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector)); 191 DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
192 DRM_INFO("in favor of %s\n", drm_get_connector_name(conflict)); 192 DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict));
193 current_status = connector_status_disconnected; 193 current_status = connector_status_disconnected;
194 } 194 }
195 break; 195 break;
@@ -432,13 +432,13 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
432 mode->vdisplay == native_mode->vdisplay) { 432 mode->vdisplay == native_mode->vdisplay) {
433 *native_mode = *mode; 433 *native_mode = *mode;
434 drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V); 434 drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
435 DRM_INFO("Determined LVDS native mode details from EDID\n"); 435 DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
436 break; 436 break;
437 } 437 }
438 } 438 }
439 } 439 }
440 if (!native_mode->clock) { 440 if (!native_mode->clock) {
441 DRM_INFO("No LVDS native mode details, disabling RMX\n"); 441 DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
442 radeon_encoder->rmx_type = RMX_OFF; 442 radeon_encoder->rmx_type = RMX_OFF;
443 } 443 }
444} 444}
@@ -1008,9 +1008,21 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector)
1008static int radeon_dp_get_modes(struct drm_connector *connector) 1008static int radeon_dp_get_modes(struct drm_connector *connector)
1009{ 1009{
1010 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1010 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1011 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
1011 int ret; 1012 int ret;
1012 1013
1014 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
1015 if (!radeon_dig_connector->edp_on)
1016 atombios_set_edp_panel_power(connector,
1017 ATOM_TRANSMITTER_ACTION_POWER_ON);
1018 }
1013 ret = radeon_ddc_get_modes(radeon_connector); 1019 ret = radeon_ddc_get_modes(radeon_connector);
1020 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
1021 if (!radeon_dig_connector->edp_on)
1022 atombios_set_edp_panel_power(connector,
1023 ATOM_TRANSMITTER_ACTION_POWER_OFF);
1024 }
1025
1014 return ret; 1026 return ret;
1015} 1027}
1016 1028
@@ -1029,8 +1041,14 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1029 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1041 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
1030 /* eDP is always DP */ 1042 /* eDP is always DP */
1031 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; 1043 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
1044 if (!radeon_dig_connector->edp_on)
1045 atombios_set_edp_panel_power(connector,
1046 ATOM_TRANSMITTER_ACTION_POWER_ON);
1032 if (radeon_dp_getdpcd(radeon_connector)) 1047 if (radeon_dp_getdpcd(radeon_connector))
1033 ret = connector_status_connected; 1048 ret = connector_status_connected;
1049 if (!radeon_dig_connector->edp_on)
1050 atombios_set_edp_panel_power(connector,
1051 ATOM_TRANSMITTER_ACTION_POWER_OFF);
1034 } else { 1052 } else {
1035 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); 1053 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
1036 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 1054 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
@@ -1116,7 +1134,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1116 radeon_connector->shared_ddc = true; 1134 radeon_connector->shared_ddc = true;
1117 shared_ddc = true; 1135 shared_ddc = true;
1118 } 1136 }
1119 if (radeon_connector->router_bus && router->valid && 1137 if (radeon_connector->router_bus && router->ddc_valid &&
1120 (radeon_connector->router.router_id == router->router_id)) { 1138 (radeon_connector->router.router_id == router->router_id)) {
1121 radeon_connector->shared_ddc = false; 1139 radeon_connector->shared_ddc = false;
1122 shared_ddc = false; 1140 shared_ddc = false;
@@ -1136,7 +1154,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1136 radeon_connector->connector_object_id = connector_object_id; 1154 radeon_connector->connector_object_id = connector_object_id;
1137 radeon_connector->hpd = *hpd; 1155 radeon_connector->hpd = *hpd;
1138 radeon_connector->router = *router; 1156 radeon_connector->router = *router;
1139 if (router->valid) { 1157 if (router->ddc_valid || router->cd_valid) {
1140 radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); 1158 radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
1141 if (!radeon_connector->router_bus) 1159 if (!radeon_connector->router_bus)
1142 goto failed; 1160 goto failed;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 8adfedfe547..d8ac1849180 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -180,7 +180,7 @@ int radeon_wb_init(struct radeon_device *rdev)
180 int r; 180 int r;
181 181
182 if (rdev->wb.wb_obj == NULL) { 182 if (rdev->wb.wb_obj == NULL) {
183 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, 183 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
184 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); 184 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
185 if (r) { 185 if (r) {
186 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 186 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0383631da69..1df4dc6c063 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -315,10 +315,14 @@ static void radeon_print_display_setup(struct drm_device *dev)
315 radeon_connector->ddc_bus->rec.en_data_reg, 315 radeon_connector->ddc_bus->rec.en_data_reg,
316 radeon_connector->ddc_bus->rec.y_clk_reg, 316 radeon_connector->ddc_bus->rec.y_clk_reg,
317 radeon_connector->ddc_bus->rec.y_data_reg); 317 radeon_connector->ddc_bus->rec.y_data_reg);
318 if (radeon_connector->router_bus) 318 if (radeon_connector->router.ddc_valid)
319 DRM_INFO(" DDC Router 0x%x/0x%x\n", 319 DRM_INFO(" DDC Router 0x%x/0x%x\n",
320 radeon_connector->router.mux_control_pin, 320 radeon_connector->router.ddc_mux_control_pin,
321 radeon_connector->router.mux_state); 321 radeon_connector->router.ddc_mux_state);
322 if (radeon_connector->router.cd_valid)
323 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
324 radeon_connector->router.cd_mux_control_pin,
325 radeon_connector->router.cd_mux_state);
322 } else { 326 } else {
323 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || 327 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
324 connector->connector_type == DRM_MODE_CONNECTOR_DVII || 328 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
@@ -398,8 +402,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
398 int ret = 0; 402 int ret = 0;
399 403
400 /* on hw with routers, select right port */ 404 /* on hw with routers, select right port */
401 if (radeon_connector->router.valid) 405 if (radeon_connector->router.ddc_valid)
402 radeon_router_select_port(radeon_connector); 406 radeon_router_select_ddc_port(radeon_connector);
403 407
404 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 408 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
405 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { 409 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
@@ -432,8 +436,8 @@ static int radeon_ddc_dump(struct drm_connector *connector)
432 int ret = 0; 436 int ret = 0;
433 437
434 /* on hw with routers, select right port */ 438 /* on hw with routers, select right port */
435 if (radeon_connector->router.valid) 439 if (radeon_connector->router.ddc_valid)
436 radeon_router_select_port(radeon_connector); 440 radeon_router_select_ddc_port(radeon_connector);
437 441
438 if (!radeon_connector->ddc_bus) 442 if (!radeon_connector->ddc_bus)
439 return -1; 443 return -1;
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index ae58b6849a2..041943df966 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -176,6 +176,7 @@ static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
176 return false; 176 return false;
177 } 177 }
178} 178}
179
179void 180void
180radeon_link_encoder_connector(struct drm_device *dev) 181radeon_link_encoder_connector(struct drm_device *dev)
181{ 182{
@@ -228,6 +229,27 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
228 return NULL; 229 return NULL;
229} 230}
230 231
232struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder)
233{
234 struct drm_device *dev = encoder->dev;
235 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
236 struct drm_encoder *other_encoder;
237 struct radeon_encoder *other_radeon_encoder;
238
239 if (radeon_encoder->is_ext_encoder)
240 return NULL;
241
242 list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
243 if (other_encoder == encoder)
244 continue;
245 other_radeon_encoder = to_radeon_encoder(other_encoder);
246 if (other_radeon_encoder->is_ext_encoder &&
247 (radeon_encoder->devices & other_radeon_encoder->devices))
248 return other_encoder;
249 }
250 return NULL;
251}
252
231void radeon_panel_mode_fixup(struct drm_encoder *encoder, 253void radeon_panel_mode_fixup(struct drm_encoder *encoder,
232 struct drm_display_mode *adjusted_mode) 254 struct drm_display_mode *adjusted_mode)
233{ 255{
@@ -426,52 +448,49 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
426 448
427} 449}
428 450
429void 451union dvo_encoder_control {
430atombios_external_tmds_setup(struct drm_encoder *encoder, int action) 452 ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
431{ 453 DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
432 struct drm_device *dev = encoder->dev; 454 DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
433 struct radeon_device *rdev = dev->dev_private; 455};
434 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
435 ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION args;
436 int index = 0;
437
438 memset(&args, 0, sizeof(args));
439
440 index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
441
442 args.sXTmdsEncoder.ucEnable = action;
443
444 if (radeon_encoder->pixel_clock > 165000)
445 args.sXTmdsEncoder.ucMisc = PANEL_ENCODER_MISC_DUAL;
446
447 /*if (pScrn->rgbBits == 8)*/
448 args.sXTmdsEncoder.ucMisc |= (1 << 1);
449
450 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
451
452}
453 456
454static void 457void
455atombios_ddia_setup(struct drm_encoder *encoder, int action) 458atombios_dvo_setup(struct drm_encoder *encoder, int action)
456{ 459{
457 struct drm_device *dev = encoder->dev; 460 struct drm_device *dev = encoder->dev;
458 struct radeon_device *rdev = dev->dev_private; 461 struct radeon_device *rdev = dev->dev_private;
459 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 462 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
460 DVO_ENCODER_CONTROL_PS_ALLOCATION args; 463 union dvo_encoder_control args;
461 int index = 0; 464 int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
462 465
463 memset(&args, 0, sizeof(args)); 466 memset(&args, 0, sizeof(args));
464 467
465 index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); 468 if (ASIC_IS_DCE3(rdev)) {
469 /* DCE3+ */
470 args.dvo_v3.ucAction = action;
471 args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
472 args.dvo_v3.ucDVOConfig = 0; /* XXX */
473 } else if (ASIC_IS_DCE2(rdev)) {
474 /* DCE2 (pre-DCE3 R6xx, RS600/690/740 */
475 args.dvo.sDVOEncoder.ucAction = action;
476 args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
477 /* DFP1, CRT1, TV1 depending on the type of port */
478 args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
479
480 if (radeon_encoder->pixel_clock > 165000)
481 args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
482 } else {
483 /* R4xx, R5xx */
484 args.ext_tmds.sXTmdsEncoder.ucEnable = action;
466 485
467 args.sDVOEncoder.ucAction = action; 486 if (radeon_encoder->pixel_clock > 165000)
468 args.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 487 args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
469 488
470 if (radeon_encoder->pixel_clock > 165000) 489 /*if (pScrn->rgbBits == 8)*/
471 args.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute = PANEL_ENCODER_MISC_DUAL; 490 args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
491 }
472 492
473 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 493 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
474
475} 494}
476 495
477union lvds_encoder_control { 496union lvds_encoder_control {
@@ -532,14 +551,14 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
532 if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) 551 if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
533 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; 552 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
534 if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) 553 if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
535 args.v1.ucMisc |= (1 << 1); 554 args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
536 } else { 555 } else {
537 if (dig->linkb) 556 if (dig->linkb)
538 args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; 557 args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
539 if (radeon_encoder->pixel_clock > 165000) 558 if (radeon_encoder->pixel_clock > 165000)
540 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; 559 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
541 /*if (pScrn->rgbBits == 8) */ 560 /*if (pScrn->rgbBits == 8) */
542 args.v1.ucMisc |= (1 << 1); 561 args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
543 } 562 }
544 break; 563 break;
545 case 2: 564 case 2:
@@ -595,6 +614,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
595int 614int
596atombios_get_encoder_mode(struct drm_encoder *encoder) 615atombios_get_encoder_mode(struct drm_encoder *encoder)
597{ 616{
617 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
598 struct drm_device *dev = encoder->dev; 618 struct drm_device *dev = encoder->dev;
599 struct radeon_device *rdev = dev->dev_private; 619 struct radeon_device *rdev = dev->dev_private;
600 struct drm_connector *connector; 620 struct drm_connector *connector;
@@ -602,9 +622,20 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
602 struct radeon_connector_atom_dig *dig_connector; 622 struct radeon_connector_atom_dig *dig_connector;
603 623
604 connector = radeon_get_connector_for_encoder(encoder); 624 connector = radeon_get_connector_for_encoder(encoder);
605 if (!connector) 625 if (!connector) {
606 return 0; 626 switch (radeon_encoder->encoder_id) {
607 627 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
628 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
629 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
630 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
631 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
632 return ATOM_ENCODER_MODE_DVI;
633 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
634 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
635 default:
636 return ATOM_ENCODER_MODE_CRT;
637 }
638 }
608 radeon_connector = to_radeon_connector(connector); 639 radeon_connector = to_radeon_connector(connector);
609 640
610 switch (connector->connector_type) { 641 switch (connector->connector_type) {
@@ -834,6 +865,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
834 memset(&args, 0, sizeof(args)); 865 memset(&args, 0, sizeof(args));
835 866
836 switch (radeon_encoder->encoder_id) { 867 switch (radeon_encoder->encoder_id) {
868 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
869 index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
870 break;
837 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 871 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
838 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 872 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
839 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 873 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
@@ -978,6 +1012,105 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
978 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1012 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
979} 1013}
980 1014
1015void
1016atombios_set_edp_panel_power(struct drm_connector *connector, int action)
1017{
1018 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1019 struct drm_device *dev = radeon_connector->base.dev;
1020 struct radeon_device *rdev = dev->dev_private;
1021 union dig_transmitter_control args;
1022 int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
1023 uint8_t frev, crev;
1024
1025 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
1026 return;
1027
1028 if (!ASIC_IS_DCE4(rdev))
1029 return;
1030
1031 if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) ||
1032 (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
1033 return;
1034
1035 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
1036 return;
1037
1038 memset(&args, 0, sizeof(args));
1039
1040 args.v1.ucAction = action;
1041
1042 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1043}
1044
1045union external_encoder_control {
1046 EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
1047};
1048
1049static void
1050atombios_external_encoder_setup(struct drm_encoder *encoder,
1051 struct drm_encoder *ext_encoder,
1052 int action)
1053{
1054 struct drm_device *dev = encoder->dev;
1055 struct radeon_device *rdev = dev->dev_private;
1056 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1057 union external_encoder_control args;
1058 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1059 int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
1060 u8 frev, crev;
1061 int dp_clock = 0;
1062 int dp_lane_count = 0;
1063 int connector_object_id = 0;
1064
1065 if (connector) {
1066 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1067 struct radeon_connector_atom_dig *dig_connector =
1068 radeon_connector->con_priv;
1069
1070 dp_clock = dig_connector->dp_clock;
1071 dp_lane_count = dig_connector->dp_lane_count;
1072 connector_object_id =
1073 (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
1074 }
1075
1076 memset(&args, 0, sizeof(args));
1077
1078 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
1079 return;
1080
1081 switch (frev) {
1082 case 1:
1083 /* no params on frev 1 */
1084 break;
1085 case 2:
1086 switch (crev) {
1087 case 1:
1088 case 2:
1089 args.v1.sDigEncoder.ucAction = action;
1090 args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
1091 args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
1092
1093 if (args.v1.sDigEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
1094 if (dp_clock == 270000)
1095 args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
1096 args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
1097 } else if (radeon_encoder->pixel_clock > 165000)
1098 args.v1.sDigEncoder.ucLaneNum = 8;
1099 else
1100 args.v1.sDigEncoder.ucLaneNum = 4;
1101 break;
1102 default:
1103 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
1104 return;
1105 }
1106 break;
1107 default:
1108 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
1109 return;
1110 }
1111 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1112}
1113
981static void 1114static void
982atombios_yuv_setup(struct drm_encoder *encoder, bool enable) 1115atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
983{ 1116{
@@ -1021,6 +1154,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1021 struct drm_device *dev = encoder->dev; 1154 struct drm_device *dev = encoder->dev;
1022 struct radeon_device *rdev = dev->dev_private; 1155 struct radeon_device *rdev = dev->dev_private;
1023 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1156 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1157 struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
1024 DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; 1158 DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
1025 int index = 0; 1159 int index = 0;
1026 bool is_dig = false; 1160 bool is_dig = false;
@@ -1043,9 +1177,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1043 break; 1177 break;
1044 case ENCODER_OBJECT_ID_INTERNAL_DVO1: 1178 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1045 case ENCODER_OBJECT_ID_INTERNAL_DDI: 1179 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1046 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1047 index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); 1180 index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
1048 break; 1181 break;
1182 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1183 if (ASIC_IS_DCE3(rdev))
1184 is_dig = true;
1185 else
1186 index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
1187 break;
1049 case ENCODER_OBJECT_ID_INTERNAL_LVDS: 1188 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1050 index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); 1189 index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
1051 break; 1190 break;
@@ -1082,34 +1221,85 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1082 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { 1221 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
1083 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1222 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1084 1223
1224 if (connector &&
1225 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
1226 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1227 struct radeon_connector_atom_dig *radeon_dig_connector =
1228 radeon_connector->con_priv;
1229 atombios_set_edp_panel_power(connector,
1230 ATOM_TRANSMITTER_ACTION_POWER_ON);
1231 radeon_dig_connector->edp_on = true;
1232 }
1085 dp_link_train(encoder, connector); 1233 dp_link_train(encoder, connector);
1086 if (ASIC_IS_DCE4(rdev)) 1234 if (ASIC_IS_DCE4(rdev))
1087 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); 1235 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON);
1088 } 1236 }
1237 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1238 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
1089 break; 1239 break;
1090 case DRM_MODE_DPMS_STANDBY: 1240 case DRM_MODE_DPMS_STANDBY:
1091 case DRM_MODE_DPMS_SUSPEND: 1241 case DRM_MODE_DPMS_SUSPEND:
1092 case DRM_MODE_DPMS_OFF: 1242 case DRM_MODE_DPMS_OFF:
1093 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); 1243 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
1094 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { 1244 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
1245 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1246
1095 if (ASIC_IS_DCE4(rdev)) 1247 if (ASIC_IS_DCE4(rdev))
1096 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); 1248 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF);
1249 if (connector &&
1250 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
1251 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1252 struct radeon_connector_atom_dig *radeon_dig_connector =
1253 radeon_connector->con_priv;
1254 atombios_set_edp_panel_power(connector,
1255 ATOM_TRANSMITTER_ACTION_POWER_OFF);
1256 radeon_dig_connector->edp_on = false;
1257 }
1097 } 1258 }
1259 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1260 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
1098 break; 1261 break;
1099 } 1262 }
1100 } else { 1263 } else {
1101 switch (mode) { 1264 switch (mode) {
1102 case DRM_MODE_DPMS_ON: 1265 case DRM_MODE_DPMS_ON:
1103 args.ucAction = ATOM_ENABLE; 1266 args.ucAction = ATOM_ENABLE;
1267 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1268 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
1269 args.ucAction = ATOM_LCD_BLON;
1270 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1271 }
1104 break; 1272 break;
1105 case DRM_MODE_DPMS_STANDBY: 1273 case DRM_MODE_DPMS_STANDBY:
1106 case DRM_MODE_DPMS_SUSPEND: 1274 case DRM_MODE_DPMS_SUSPEND:
1107 case DRM_MODE_DPMS_OFF: 1275 case DRM_MODE_DPMS_OFF:
1108 args.ucAction = ATOM_DISABLE; 1276 args.ucAction = ATOM_DISABLE;
1277 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1278 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
1279 args.ucAction = ATOM_LCD_BLOFF;
1280 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1281 }
1109 break; 1282 break;
1110 } 1283 }
1111 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1112 } 1284 }
1285
1286 if (ext_encoder) {
1287 int action;
1288
1289 switch (mode) {
1290 case DRM_MODE_DPMS_ON:
1291 default:
1292 action = ATOM_ENABLE;
1293 break;
1294 case DRM_MODE_DPMS_STANDBY:
1295 case DRM_MODE_DPMS_SUSPEND:
1296 case DRM_MODE_DPMS_OFF:
1297 action = ATOM_DISABLE;
1298 break;
1299 }
1300 atombios_external_encoder_setup(encoder, ext_encoder, action);
1301 }
1302
1113 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 1303 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
1114 1304
1115} 1305}
@@ -1242,7 +1432,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1242 break; 1432 break;
1243 default: 1433 default:
1244 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); 1434 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
1245 break; 1435 return;
1246 } 1436 }
1247 1437
1248 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1438 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
@@ -1357,6 +1547,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1357 struct drm_device *dev = encoder->dev; 1547 struct drm_device *dev = encoder->dev;
1358 struct radeon_device *rdev = dev->dev_private; 1548 struct radeon_device *rdev = dev->dev_private;
1359 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1549 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1550 struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
1360 1551
1361 radeon_encoder->pixel_clock = adjusted_mode->clock; 1552 radeon_encoder->pixel_clock = adjusted_mode->clock;
1362 1553
@@ -1400,11 +1591,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1400 } 1591 }
1401 break; 1592 break;
1402 case ENCODER_OBJECT_ID_INTERNAL_DDI: 1593 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1403 atombios_ddia_setup(encoder, ATOM_ENABLE);
1404 break;
1405 case ENCODER_OBJECT_ID_INTERNAL_DVO1: 1594 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1406 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1595 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1407 atombios_external_tmds_setup(encoder, ATOM_ENABLE); 1596 atombios_dvo_setup(encoder, ATOM_ENABLE);
1408 break; 1597 break;
1409 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 1598 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1410 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 1599 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
@@ -1419,6 +1608,11 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1419 } 1608 }
1420 break; 1609 break;
1421 } 1610 }
1611
1612 if (ext_encoder) {
1613 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
1614 }
1615
1422 atombios_apply_encoder_quirks(encoder, adjusted_mode); 1616 atombios_apply_encoder_quirks(encoder, adjusted_mode);
1423 1617
1424 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { 1618 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
@@ -1520,6 +1714,7 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
1520static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) 1714static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
1521{ 1715{
1522 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1716 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1717 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1523 1718
1524 if (radeon_encoder->active_device & 1719 if (radeon_encoder->active_device &
1525 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { 1720 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
@@ -1531,6 +1726,13 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
1531 radeon_atom_output_lock(encoder, true); 1726 radeon_atom_output_lock(encoder, true);
1532 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 1727 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1533 1728
1729 /* select the clock/data port if it uses a router */
1730 if (connector) {
1731 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1732 if (radeon_connector->router.cd_valid)
1733 radeon_router_select_cd_port(radeon_connector);
1734 }
1735
1534 /* this is needed for the pll/ss setup to work correctly in some cases */ 1736 /* this is needed for the pll/ss setup to work correctly in some cases */
1535 atombios_set_encoder_crtc_source(encoder); 1737 atombios_set_encoder_crtc_source(encoder);
1536} 1738}
@@ -1547,6 +1749,23 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1547 struct radeon_device *rdev = dev->dev_private; 1749 struct radeon_device *rdev = dev->dev_private;
1548 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1750 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1549 struct radeon_encoder_atom_dig *dig; 1751 struct radeon_encoder_atom_dig *dig;
1752
1753 /* check for pre-DCE3 cards with shared encoders;
1754 * can't really use the links individually, so don't disable
1755 * the encoder if it's in use by another connector
1756 */
1757 if (!ASIC_IS_DCE3(rdev)) {
1758 struct drm_encoder *other_encoder;
1759 struct radeon_encoder *other_radeon_encoder;
1760
1761 list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
1762 other_radeon_encoder = to_radeon_encoder(other_encoder);
1763 if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
1764 drm_helper_encoder_in_use(other_encoder))
1765 goto disable_done;
1766 }
1767 }
1768
1550 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 1769 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1551 1770
1552 switch (radeon_encoder->encoder_id) { 1771 switch (radeon_encoder->encoder_id) {
@@ -1570,11 +1789,9 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1570 } 1789 }
1571 break; 1790 break;
1572 case ENCODER_OBJECT_ID_INTERNAL_DDI: 1791 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1573 atombios_ddia_setup(encoder, ATOM_DISABLE);
1574 break;
1575 case ENCODER_OBJECT_ID_INTERNAL_DVO1: 1792 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1576 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1793 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1577 atombios_external_tmds_setup(encoder, ATOM_DISABLE); 1794 atombios_dvo_setup(encoder, ATOM_DISABLE);
1578 break; 1795 break;
1579 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 1796 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1580 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 1797 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
@@ -1586,6 +1803,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1586 break; 1803 break;
1587 } 1804 }
1588 1805
1806disable_done:
1589 if (radeon_encoder_is_digital(encoder)) { 1807 if (radeon_encoder_is_digital(encoder)) {
1590 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) 1808 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
1591 r600_hdmi_disable(encoder); 1809 r600_hdmi_disable(encoder);
@@ -1595,6 +1813,53 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1595 radeon_encoder->active_device = 0; 1813 radeon_encoder->active_device = 0;
1596} 1814}
1597 1815
1816/* these are handled by the primary encoders */
1817static void radeon_atom_ext_prepare(struct drm_encoder *encoder)
1818{
1819
1820}
1821
1822static void radeon_atom_ext_commit(struct drm_encoder *encoder)
1823{
1824
1825}
1826
1827static void
1828radeon_atom_ext_mode_set(struct drm_encoder *encoder,
1829 struct drm_display_mode *mode,
1830 struct drm_display_mode *adjusted_mode)
1831{
1832
1833}
1834
1835static void radeon_atom_ext_disable(struct drm_encoder *encoder)
1836{
1837
1838}
1839
1840static void
1841radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
1842{
1843
1844}
1845
1846static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
1847 struct drm_display_mode *mode,
1848 struct drm_display_mode *adjusted_mode)
1849{
1850 return true;
1851}
1852
1853static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
1854 .dpms = radeon_atom_ext_dpms,
1855 .mode_fixup = radeon_atom_ext_mode_fixup,
1856 .prepare = radeon_atom_ext_prepare,
1857 .mode_set = radeon_atom_ext_mode_set,
1858 .commit = radeon_atom_ext_commit,
1859 .disable = radeon_atom_ext_disable,
1860 /* no detect for TMDS/LVDS yet */
1861};
1862
1598static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = { 1863static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
1599 .dpms = radeon_atom_encoder_dpms, 1864 .dpms = radeon_atom_encoder_dpms,
1600 .mode_fixup = radeon_atom_mode_fixup, 1865 .mode_fixup = radeon_atom_mode_fixup,
@@ -1704,6 +1969,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
1704 radeon_encoder->devices = supported_device; 1969 radeon_encoder->devices = supported_device;
1705 radeon_encoder->rmx_type = RMX_OFF; 1970 radeon_encoder->rmx_type = RMX_OFF;
1706 radeon_encoder->underscan_type = UNDERSCAN_OFF; 1971 radeon_encoder->underscan_type = UNDERSCAN_OFF;
1972 radeon_encoder->is_ext_encoder = false;
1707 1973
1708 switch (radeon_encoder->encoder_id) { 1974 switch (radeon_encoder->encoder_id) {
1709 case ENCODER_OBJECT_ID_INTERNAL_LVDS: 1975 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
@@ -1745,6 +2011,9 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
1745 radeon_encoder->rmx_type = RMX_FULL; 2011 radeon_encoder->rmx_type = RMX_FULL;
1746 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); 2012 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
1747 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); 2013 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
2014 } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
2015 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
2016 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
1748 } else { 2017 } else {
1749 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); 2018 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
1750 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); 2019 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
@@ -1753,5 +2022,22 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
1753 } 2022 }
1754 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); 2023 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
1755 break; 2024 break;
2025 case ENCODER_OBJECT_ID_SI170B:
2026 case ENCODER_OBJECT_ID_CH7303:
2027 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
2028 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
2029 case ENCODER_OBJECT_ID_TITFP513:
2030 case ENCODER_OBJECT_ID_VT1623:
2031 case ENCODER_OBJECT_ID_HDMI_SI1930:
2032 /* these are handled by the primary encoders */
2033 radeon_encoder->is_ext_encoder = true;
2034 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
2035 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
2036 else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
2037 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
2038 else
2039 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
2040 drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
2041 break;
1756 } 2042 }
1757} 2043}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 216392d0353..daacb281dfa 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -240,7 +240,8 @@ retry:
240 */ 240 */
241 if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { 241 if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
242 /* good news we believe it's a lockup */ 242 /* good news we believe it's a lockup */
243 WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq); 243 WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
244 fence->seq, seq);
244 /* FIXME: what should we do ? marking everyone 245 /* FIXME: what should we do ? marking everyone
245 * as signaled for now 246 * as signaled for now
246 */ 247 */
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index e65b90317fa..65016117d95 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -79,8 +79,8 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
79 79
80 if (rdev->gart.table.vram.robj == NULL) { 80 if (rdev->gart.table.vram.robj == NULL) {
81 r = radeon_bo_create(rdev, NULL, rdev->gart.table_size, 81 r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
82 true, RADEON_GEM_DOMAIN_VRAM, 82 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
83 &rdev->gart.table.vram.robj); 83 &rdev->gart.table.vram.robj);
84 if (r) { 84 if (r) {
85 return r; 85 return r;
86 } 86 }
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d1e595d9172..df95eb83dac 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -67,7 +67,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
67 if (alignment < PAGE_SIZE) { 67 if (alignment < PAGE_SIZE) {
68 alignment = PAGE_SIZE; 68 alignment = PAGE_SIZE;
69 } 69 }
70 r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj); 70 r = radeon_bo_create(rdev, gobj, size, alignment, kernel, initial_domain, &robj);
71 if (r) { 71 if (r) {
72 if (r != -ERESTARTSYS) 72 if (r != -ERESTARTSYS)
73 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", 73 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 6a13ee38a5b..ded2a45bc95 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -53,8 +53,8 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
53 }; 53 };
54 54
55 /* on hw with routers, select right port */ 55 /* on hw with routers, select right port */
56 if (radeon_connector->router.valid) 56 if (radeon_connector->router.ddc_valid)
57 radeon_router_select_port(radeon_connector); 57 radeon_router_select_ddc_port(radeon_connector);
58 58
59 ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); 59 ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
60 if (ret == 2) 60 if (ret == 2)
@@ -896,7 +896,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
896 ((rdev->family <= CHIP_RS480) || 896 ((rdev->family <= CHIP_RS480) ||
897 ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) { 897 ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) {
898 /* set the radeon hw i2c adapter */ 898 /* set the radeon hw i2c adapter */
899 sprintf(i2c->adapter.name, "Radeon i2c hw bus %s", name); 899 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
900 "Radeon i2c hw bus %s", name);
900 i2c->adapter.algo = &radeon_i2c_algo; 901 i2c->adapter.algo = &radeon_i2c_algo;
901 ret = i2c_add_adapter(&i2c->adapter); 902 ret = i2c_add_adapter(&i2c->adapter);
902 if (ret) { 903 if (ret) {
@@ -905,7 +906,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
905 } 906 }
906 } else { 907 } else {
907 /* set the radeon bit adapter */ 908 /* set the radeon bit adapter */
908 sprintf(i2c->adapter.name, "Radeon i2c bit bus %s", name); 909 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
910 "Radeon i2c bit bus %s", name);
909 i2c->adapter.algo_data = &i2c->algo.bit; 911 i2c->adapter.algo_data = &i2c->algo.bit;
910 i2c->algo.bit.pre_xfer = pre_xfer; 912 i2c->algo.bit.pre_xfer = pre_xfer;
911 i2c->algo.bit.post_xfer = post_xfer; 913 i2c->algo.bit.post_xfer = post_xfer;
@@ -946,6 +948,8 @@ struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
946 i2c->rec = *rec; 948 i2c->rec = *rec;
947 i2c->adapter.owner = THIS_MODULE; 949 i2c->adapter.owner = THIS_MODULE;
948 i2c->dev = dev; 950 i2c->dev = dev;
951 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
952 "Radeon aux bus %s", name);
949 i2c_set_adapdata(&i2c->adapter, i2c); 953 i2c_set_adapdata(&i2c->adapter, i2c);
950 i2c->adapter.algo_data = &i2c->algo.dp; 954 i2c->adapter.algo_data = &i2c->algo.dp;
951 i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch; 955 i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
@@ -1084,26 +1088,51 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
1084 addr, val); 1088 addr, val);
1085} 1089}
1086 1090
1087/* router switching */ 1091/* ddc router switching */
1088void radeon_router_select_port(struct radeon_connector *radeon_connector) 1092void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
1089{ 1093{
1090 u8 val; 1094 u8 val;
1091 1095
1092 if (!radeon_connector->router.valid) 1096 if (!radeon_connector->router.ddc_valid)
1093 return; 1097 return;
1094 1098
1095 radeon_i2c_get_byte(radeon_connector->router_bus, 1099 radeon_i2c_get_byte(radeon_connector->router_bus,
1096 radeon_connector->router.i2c_addr, 1100 radeon_connector->router.i2c_addr,
1097 0x3, &val); 1101 0x3, &val);
1098 val &= radeon_connector->router.mux_control_pin; 1102 val &= ~radeon_connector->router.ddc_mux_control_pin;
1099 radeon_i2c_put_byte(radeon_connector->router_bus, 1103 radeon_i2c_put_byte(radeon_connector->router_bus,
1100 radeon_connector->router.i2c_addr, 1104 radeon_connector->router.i2c_addr,
1101 0x3, val); 1105 0x3, val);
1102 radeon_i2c_get_byte(radeon_connector->router_bus, 1106 radeon_i2c_get_byte(radeon_connector->router_bus,
1103 radeon_connector->router.i2c_addr, 1107 radeon_connector->router.i2c_addr,
1104 0x1, &val); 1108 0x1, &val);
1105 val &= radeon_connector->router.mux_control_pin; 1109 val &= ~radeon_connector->router.ddc_mux_control_pin;
1106 val |= radeon_connector->router.mux_state; 1110 val |= radeon_connector->router.ddc_mux_state;
1111 radeon_i2c_put_byte(radeon_connector->router_bus,
1112 radeon_connector->router.i2c_addr,
1113 0x1, val);
1114}
1115
1116/* clock/data router switching */
1117void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
1118{
1119 u8 val;
1120
1121 if (!radeon_connector->router.cd_valid)
1122 return;
1123
1124 radeon_i2c_get_byte(radeon_connector->router_bus,
1125 radeon_connector->router.i2c_addr,
1126 0x3, &val);
1127 val &= ~radeon_connector->router.cd_mux_control_pin;
1128 radeon_i2c_put_byte(radeon_connector->router_bus,
1129 radeon_connector->router.i2c_addr,
1130 0x3, val);
1131 radeon_i2c_get_byte(radeon_connector->router_bus,
1132 radeon_connector->router.i2c_addr,
1133 0x1, &val);
1134 val &= ~radeon_connector->router.cd_mux_control_pin;
1135 val |= radeon_connector->router.cd_mux_state;
1107 radeon_i2c_put_byte(radeon_connector->router_bus, 1136 radeon_i2c_put_byte(radeon_connector->router_bus,
1108 radeon_connector->router.i2c_addr, 1137 radeon_connector->router.i2c_addr,
1109 0x1, val); 1138 0x1, val);
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index 2f349a30019..465746bd51b 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -76,7 +76,7 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc)
76 default: 76 default:
77 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", 77 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
78 crtc); 78 crtc);
79 return EINVAL; 79 return -EINVAL;
80 } 80 }
81 } else { 81 } else {
82 switch (crtc) { 82 switch (crtc) {
@@ -89,7 +89,7 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc)
89 default: 89 default:
90 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", 90 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
91 crtc); 91 crtc);
92 return EINVAL; 92 return -EINVAL;
93 } 93 }
94 } 94 }
95 95
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 0b8397000f4..59f834ba283 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -670,7 +670,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
670 670
671 if (rdev->is_atom_bios) { 671 if (rdev->is_atom_bios) {
672 radeon_encoder->pixel_clock = adjusted_mode->clock; 672 radeon_encoder->pixel_clock = adjusted_mode->clock;
673 atombios_external_tmds_setup(encoder, ATOM_ENABLE); 673 atombios_dvo_setup(encoder, ATOM_ENABLE);
674 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); 674 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
675 } else { 675 } else {
676 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); 676 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 92457163d07..e301c6f9e05 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -375,6 +375,7 @@ struct radeon_encoder {
375 int hdmi_config_offset; 375 int hdmi_config_offset;
376 int hdmi_audio_workaround; 376 int hdmi_audio_workaround;
377 int hdmi_buffer_status; 377 int hdmi_buffer_status;
378 bool is_ext_encoder;
378}; 379};
379 380
380struct radeon_connector_atom_dig { 381struct radeon_connector_atom_dig {
@@ -385,6 +386,7 @@ struct radeon_connector_atom_dig {
385 u8 dp_sink_type; 386 u8 dp_sink_type;
386 int dp_clock; 387 int dp_clock;
387 int dp_lane_count; 388 int dp_lane_count;
389 bool edp_on;
388}; 390};
389 391
390struct radeon_gpio_rec { 392struct radeon_gpio_rec {
@@ -401,13 +403,19 @@ struct radeon_hpd {
401}; 403};
402 404
403struct radeon_router { 405struct radeon_router {
404 bool valid;
405 u32 router_id; 406 u32 router_id;
406 struct radeon_i2c_bus_rec i2c_info; 407 struct radeon_i2c_bus_rec i2c_info;
407 u8 i2c_addr; 408 u8 i2c_addr;
408 u8 mux_type; 409 /* i2c mux */
409 u8 mux_control_pin; 410 bool ddc_valid;
410 u8 mux_state; 411 u8 ddc_mux_type;
412 u8 ddc_mux_control_pin;
413 u8 ddc_mux_state;
414 /* clock/data mux */
415 bool cd_valid;
416 u8 cd_mux_type;
417 u8 cd_mux_control_pin;
418 u8 cd_mux_state;
411}; 419};
412 420
413struct radeon_connector { 421struct radeon_connector {
@@ -488,7 +496,8 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
488 u8 slave_addr, 496 u8 slave_addr,
489 u8 addr, 497 u8 addr,
490 u8 val); 498 u8 val);
491extern void radeon_router_select_port(struct radeon_connector *radeon_connector); 499extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
500extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
492extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); 501extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
493extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); 502extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
494 503
@@ -516,9 +525,10 @@ struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev
516struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv); 525struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
517struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index); 526struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
518struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); 527struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
519extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action); 528extern void atombios_dvo_setup(struct drm_encoder *encoder, int action);
520extern void atombios_digital_setup(struct drm_encoder *encoder, int action); 529extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
521extern int atombios_get_encoder_mode(struct drm_encoder *encoder); 530extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
531extern void atombios_set_edp_panel_power(struct drm_connector *connector, int action);
522extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); 532extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
523 533
524extern void radeon_crtc_load_lut(struct drm_crtc *crtc); 534extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d7ab9141641..1d067743fee 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -86,11 +86,12 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
86} 86}
87 87
88int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, 88int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
89 unsigned long size, bool kernel, u32 domain, 89 unsigned long size, int byte_align, bool kernel, u32 domain,
90 struct radeon_bo **bo_ptr) 90 struct radeon_bo **bo_ptr)
91{ 91{
92 struct radeon_bo *bo; 92 struct radeon_bo *bo;
93 enum ttm_bo_type type; 93 enum ttm_bo_type type;
94 int page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
94 int r; 95 int r;
95 96
96 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { 97 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
@@ -102,6 +103,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
102 type = ttm_bo_type_device; 103 type = ttm_bo_type_device;
103 } 104 }
104 *bo_ptr = NULL; 105 *bo_ptr = NULL;
106
107retry:
105 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); 108 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
106 if (bo == NULL) 109 if (bo == NULL)
107 return -ENOMEM; 110 return -ENOMEM;
@@ -109,13 +112,11 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
109 bo->gobj = gobj; 112 bo->gobj = gobj;
110 bo->surface_reg = -1; 113 bo->surface_reg = -1;
111 INIT_LIST_HEAD(&bo->list); 114 INIT_LIST_HEAD(&bo->list);
112
113retry:
114 radeon_ttm_placement_from_domain(bo, domain); 115 radeon_ttm_placement_from_domain(bo, domain);
115 /* Kernel allocation are uninterruptible */ 116 /* Kernel allocation are uninterruptible */
116 mutex_lock(&rdev->vram_mutex); 117 mutex_lock(&rdev->vram_mutex);
117 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, 118 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
118 &bo->placement, 0, 0, !kernel, NULL, size, 119 &bo->placement, page_align, 0, !kernel, NULL, size,
119 &radeon_ttm_bo_destroy); 120 &radeon_ttm_bo_destroy);
120 mutex_unlock(&rdev->vram_mutex); 121 mutex_unlock(&rdev->vram_mutex);
121 if (unlikely(r != 0)) { 122 if (unlikely(r != 0)) {
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 3481bc7f6f5..d143702b244 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -137,9 +137,10 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
137} 137}
138 138
139extern int radeon_bo_create(struct radeon_device *rdev, 139extern int radeon_bo_create(struct radeon_device *rdev,
140 struct drm_gem_object *gobj, unsigned long size, 140 struct drm_gem_object *gobj, unsigned long size,
141 bool kernel, u32 domain, 141 int byte_align,
142 struct radeon_bo **bo_ptr); 142 bool kernel, u32 domain,
143 struct radeon_bo **bo_ptr);
143extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); 144extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
144extern void radeon_bo_kunmap(struct radeon_bo *bo); 145extern void radeon_bo_kunmap(struct radeon_bo *bo);
145extern void radeon_bo_unref(struct radeon_bo **bo); 146extern void radeon_bo_unref(struct radeon_bo **bo);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 6ea798ce821..06e79822a2b 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -176,8 +176,8 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
176 INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); 176 INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
177 /* Allocate 1M object buffer */ 177 /* Allocate 1M object buffer */
178 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, 178 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
179 true, RADEON_GEM_DOMAIN_GTT, 179 PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
180 &rdev->ib_pool.robj); 180 &rdev->ib_pool.robj);
181 if (r) { 181 if (r) {
182 DRM_ERROR("radeon: failed to ib pool (%d).\n", r); 182 DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
183 return r; 183 return r;
@@ -332,7 +332,7 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
332 rdev->cp.ring_size = ring_size; 332 rdev->cp.ring_size = ring_size;
333 /* Allocate ring buffer */ 333 /* Allocate ring buffer */
334 if (rdev->cp.ring_obj == NULL) { 334 if (rdev->cp.ring_obj == NULL) {
335 r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true, 335 r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, PAGE_SIZE, true,
336 RADEON_GEM_DOMAIN_GTT, 336 RADEON_GEM_DOMAIN_GTT,
337 &rdev->cp.ring_obj); 337 &rdev->cp.ring_obj);
338 if (r) { 338 if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 313c96bc09d..5b44f652145 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -52,7 +52,7 @@ void radeon_test_moves(struct radeon_device *rdev)
52 goto out_cleanup; 52 goto out_cleanup;
53 } 53 }
54 54
55 r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM, 55 r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
56 &vram_obj); 56 &vram_obj);
57 if (r) { 57 if (r) {
58 DRM_ERROR("Failed to create VRAM object\n"); 58 DRM_ERROR("Failed to create VRAM object\n");
@@ -71,7 +71,7 @@ void radeon_test_moves(struct radeon_device *rdev)
71 void **gtt_start, **gtt_end; 71 void **gtt_start, **gtt_end;
72 void **vram_start, **vram_end; 72 void **vram_start, **vram_end;
73 73
74 r = radeon_bo_create(rdev, NULL, size, true, 74 r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true,
75 RADEON_GEM_DOMAIN_GTT, gtt_obj + i); 75 RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
76 if (r) { 76 if (r) {
77 DRM_ERROR("Failed to create GTT object %d\n", i); 77 DRM_ERROR("Failed to create GTT object %d\n", i);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index fe95bb35317..1272e4b6a1d 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -529,7 +529,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
529 DRM_ERROR("Failed initializing VRAM heap.\n"); 529 DRM_ERROR("Failed initializing VRAM heap.\n");
530 return r; 530 return r;
531 } 531 }
532 r = radeon_bo_create(rdev, NULL, 256 * 1024, true, 532 r = radeon_bo_create(rdev, NULL, 256 * 1024, PAGE_SIZE, true,
533 RADEON_GEM_DOMAIN_VRAM, 533 RADEON_GEM_DOMAIN_VRAM,
534 &rdev->stollen_vga_memory); 534 &rdev->stollen_vga_memory);
535 if (r) { 535 if (r) {
@@ -689,7 +689,8 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
689 gtt = container_of(backend, struct radeon_ttm_backend, backend); 689 gtt = container_of(backend, struct radeon_ttm_backend, backend);
690 gtt->offset = bo_mem->start << PAGE_SHIFT; 690 gtt->offset = bo_mem->start << PAGE_SHIFT;
691 if (!gtt->num_pages) { 691 if (!gtt->num_pages) {
692 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); 692 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
693 gtt->num_pages, bo_mem, backend);
693 } 694 }
694 r = radeon_gart_bind(gtt->rdev, gtt->offset, 695 r = radeon_gart_bind(gtt->rdev, gtt->offset,
695 gtt->num_pages, gtt->pages); 696 gtt->num_pages, gtt->pages);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index f683e51a2a0..5512e4e5e63 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -78,7 +78,7 @@ int rs400_gart_init(struct radeon_device *rdev)
78 int r; 78 int r;
79 79
80 if (rdev->gart.table.ram.ptr) { 80 if (rdev->gart.table.ram.ptr) {
81 WARN(1, "RS400 GART already initialized.\n"); 81 WARN(1, "RS400 GART already initialized\n");
82 return 0; 82 return 0;
83 } 83 }
84 /* Check gart size */ 84 /* Check gart size */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index b091a1f6fa4..f1c6e02c2e6 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -375,7 +375,7 @@ int rs600_gart_init(struct radeon_device *rdev)
375 int r; 375 int r;
376 376
377 if (rdev->gart.table.vram.robj) { 377 if (rdev->gart.table.vram.robj) {
378 WARN(1, "RS600 GART already initialized.\n"); 378 WARN(1, "RS600 GART already initialized\n");
379 return 0; 379 return 0;
380 } 380 }
381 /* Initialize common gart structure */ 381 /* Initialize common gart structure */
@@ -505,7 +505,7 @@ int rs600_irq_set(struct radeon_device *rdev)
505 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); 505 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
506 506
507 if (!rdev->irq.installed) { 507 if (!rdev->irq.installed) {
508 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 508 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
509 WREG32(R_000040_GEN_INT_CNTL, 0); 509 WREG32(R_000040_GEN_INT_CNTL, 0);
510 return -EINVAL; 510 return -EINVAL;
511 } 511 }
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 245374e2b77..4dfead8cee3 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -915,8 +915,8 @@ static int rv770_vram_scratch_init(struct radeon_device *rdev)
915 915
916 if (rdev->vram_scratch.robj == NULL) { 916 if (rdev->vram_scratch.robj == NULL) {
917 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, 917 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
918 true, RADEON_GEM_DOMAIN_VRAM, 918 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
919 &rdev->vram_scratch.robj); 919 &rdev->vram_scratch.robj);
920 if (r) { 920 if (r) {
921 return r; 921 return r;
922 } 922 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a1cb783c713..148a322d8f5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,14 +27,6 @@
27/* 27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */ 29 */
30/* Notes:
31 *
32 * We store bo pointer in drm_mm_node struct so we know which bo own a
33 * specific node. There is no protection on the pointer, thus to make
34 * sure things don't go berserk you have to access this pointer while
35 * holding the global lru lock and make sure anytime you free a node you
36 * reset the pointer to NULL.
37 */
38 30
39#include "ttm/ttm_module.h" 31#include "ttm/ttm_module.h"
40#include "ttm/ttm_bo_driver.h" 32#include "ttm/ttm_bo_driver.h"
@@ -45,6 +37,7 @@
45#include <linux/mm.h> 37#include <linux/mm.h>
46#include <linux/file.h> 38#include <linux/file.h>
47#include <linux/module.h> 39#include <linux/module.h>
40#include <asm/atomic.h>
48 41
49#define TTM_ASSERT_LOCKED(param) 42#define TTM_ASSERT_LOCKED(param)
50#define TTM_DEBUG(fmt, arg...) 43#define TTM_DEBUG(fmt, arg...)
@@ -231,6 +224,9 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
231 int ret; 224 int ret;
232 225
233 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { 226 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
227 /**
228 * Deadlock avoidance for multi-bo reserving.
229 */
234 if (use_sequence && bo->seq_valid && 230 if (use_sequence && bo->seq_valid &&
235 (sequence - bo->val_seq < (1 << 31))) { 231 (sequence - bo->val_seq < (1 << 31))) {
236 return -EAGAIN; 232 return -EAGAIN;
@@ -248,6 +244,14 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
248 } 244 }
249 245
250 if (use_sequence) { 246 if (use_sequence) {
247 /**
248 * Wake up waiters that may need to recheck for deadlock,
249 * if we decreased the sequence number.
250 */
251 if (unlikely((bo->val_seq - sequence < (1 << 31))
252 || !bo->seq_valid))
253 wake_up_all(&bo->event_queue);
254
251 bo->val_seq = sequence; 255 bo->val_seq = sequence;
252 bo->seq_valid = true; 256 bo->seq_valid = true;
253 } else { 257 } else {
@@ -452,6 +456,11 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
452 ttm_bo_mem_put(bo, &bo->mem); 456 ttm_bo_mem_put(bo, &bo->mem);
453 457
454 atomic_set(&bo->reserved, 0); 458 atomic_set(&bo->reserved, 0);
459
460 /*
461 * Make processes trying to reserve really pick it up.
462 */
463 smp_mb__after_atomic_dec();
455 wake_up_all(&bo->event_queue); 464 wake_up_all(&bo->event_queue);
456} 465}
457 466
@@ -460,7 +469,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
460 struct ttm_bo_device *bdev = bo->bdev; 469 struct ttm_bo_device *bdev = bo->bdev;
461 struct ttm_bo_global *glob = bo->glob; 470 struct ttm_bo_global *glob = bo->glob;
462 struct ttm_bo_driver *driver; 471 struct ttm_bo_driver *driver;
463 void *sync_obj; 472 void *sync_obj = NULL;
464 void *sync_obj_arg; 473 void *sync_obj_arg;
465 int put_count; 474 int put_count;
466 int ret; 475 int ret;
@@ -495,17 +504,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
495 spin_lock(&glob->lru_lock); 504 spin_lock(&glob->lru_lock);
496 } 505 }
497queue: 506queue:
498 sync_obj = bo->sync_obj;
499 sync_obj_arg = bo->sync_obj_arg;
500 driver = bdev->driver; 507 driver = bdev->driver;
508 if (bo->sync_obj)
509 sync_obj = driver->sync_obj_ref(bo->sync_obj);
510 sync_obj_arg = bo->sync_obj_arg;
501 511
502 kref_get(&bo->list_kref); 512 kref_get(&bo->list_kref);
503 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 513 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
504 spin_unlock(&glob->lru_lock); 514 spin_unlock(&glob->lru_lock);
505 spin_unlock(&bo->lock); 515 spin_unlock(&bo->lock);
506 516
507 if (sync_obj) 517 if (sync_obj) {
508 driver->sync_obj_flush(sync_obj, sync_obj_arg); 518 driver->sync_obj_flush(sync_obj, sync_obj_arg);
519 driver->sync_obj_unref(&sync_obj);
520 }
509 schedule_delayed_work(&bdev->wq, 521 schedule_delayed_work(&bdev->wq,
510 ((HZ / 100) < 1) ? 1 : HZ / 100); 522 ((HZ / 100) < 1) ? 1 : HZ / 100);
511} 523}
@@ -822,7 +834,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
822 bool no_wait_gpu) 834 bool no_wait_gpu)
823{ 835{
824 struct ttm_bo_device *bdev = bo->bdev; 836 struct ttm_bo_device *bdev = bo->bdev;
825 struct ttm_bo_global *glob = bdev->glob;
826 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 837 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
827 int ret; 838 int ret;
828 839
@@ -832,12 +843,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
832 return ret; 843 return ret;
833 if (mem->mm_node) 844 if (mem->mm_node)
834 break; 845 break;
835 spin_lock(&glob->lru_lock);
836 if (list_empty(&man->lru)) {
837 spin_unlock(&glob->lru_lock);
838 break;
839 }
840 spin_unlock(&glob->lru_lock);
841 ret = ttm_mem_evict_first(bdev, mem_type, interruptible, 846 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
842 no_wait_reserve, no_wait_gpu); 847 no_wait_reserve, no_wait_gpu);
843 if (unlikely(ret != 0)) 848 if (unlikely(ret != 0))
@@ -1125,35 +1130,9 @@ EXPORT_SYMBOL(ttm_bo_validate);
1125int ttm_bo_check_placement(struct ttm_buffer_object *bo, 1130int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1126 struct ttm_placement *placement) 1131 struct ttm_placement *placement)
1127{ 1132{
1128 int i; 1133 BUG_ON((placement->fpfn || placement->lpfn) &&
1134 (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1129 1135
1130 if (placement->fpfn || placement->lpfn) {
1131 if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
1132 printk(KERN_ERR TTM_PFX "Page number range to small "
1133 "Need %lu pages, range is [%u, %u]\n",
1134 bo->mem.num_pages, placement->fpfn,
1135 placement->lpfn);
1136 return -EINVAL;
1137 }
1138 }
1139 for (i = 0; i < placement->num_placement; i++) {
1140 if (!capable(CAP_SYS_ADMIN)) {
1141 if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
1142 printk(KERN_ERR TTM_PFX "Need to be root to "
1143 "modify NO_EVICT status.\n");
1144 return -EINVAL;
1145 }
1146 }
1147 }
1148 for (i = 0; i < placement->num_busy_placement; i++) {
1149 if (!capable(CAP_SYS_ADMIN)) {
1150 if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
1151 printk(KERN_ERR TTM_PFX "Need to be root to "
1152 "modify NO_EVICT status.\n");
1153 return -EINVAL;
1154 }
1155 }
1156 }
1157 return 0; 1136 return 0;
1158} 1137}
1159 1138
@@ -1176,6 +1155,10 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1176 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1155 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1177 if (num_pages == 0) { 1156 if (num_pages == 0) {
1178 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n"); 1157 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1158 if (destroy)
1159 (*destroy)(bo);
1160 else
1161 kfree(bo);
1179 return -EINVAL; 1162 return -EINVAL;
1180 } 1163 }
1181 bo->destroy = destroy; 1164 bo->destroy = destroy;
@@ -1369,18 +1352,9 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1369 int ret = -EINVAL; 1352 int ret = -EINVAL;
1370 struct ttm_mem_type_manager *man; 1353 struct ttm_mem_type_manager *man;
1371 1354
1372 if (type >= TTM_NUM_MEM_TYPES) { 1355 BUG_ON(type >= TTM_NUM_MEM_TYPES);
1373 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1374 return ret;
1375 }
1376
1377 man = &bdev->man[type]; 1356 man = &bdev->man[type];
1378 if (man->has_type) { 1357 BUG_ON(man->has_type);
1379 printk(KERN_ERR TTM_PFX
1380 "Memory manager already initialized for type %d\n",
1381 type);
1382 return ret;
1383 }
1384 1358
1385 ret = bdev->driver->init_mem_type(bdev, type, man); 1359 ret = bdev->driver->init_mem_type(bdev, type, man);
1386 if (ret) 1360 if (ret)
@@ -1389,13 +1363,6 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1389 1363
1390 ret = 0; 1364 ret = 0;
1391 if (type != TTM_PL_SYSTEM) { 1365 if (type != TTM_PL_SYSTEM) {
1392 if (!p_size) {
1393 printk(KERN_ERR TTM_PFX
1394 "Zero size memory manager type %d\n",
1395 type);
1396 return ret;
1397 }
1398
1399 ret = (*man->func->init)(man, p_size); 1366 ret = (*man->func->init)(man, p_size);
1400 if (ret) 1367 if (ret)
1401 return ret; 1368 return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 7410c190c89..038e947d00f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,20 +31,29 @@
31#include "ttm/ttm_module.h" 31#include "ttm/ttm_module.h"
32#include "ttm/ttm_bo_driver.h" 32#include "ttm/ttm_bo_driver.h"
33#include "ttm/ttm_placement.h" 33#include "ttm/ttm_placement.h"
34#include <linux/jiffies.h> 34#include "drm_mm.h"
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/sched.h> 36#include <linux/spinlock.h>
37#include <linux/mm.h>
38#include <linux/file.h>
39#include <linux/module.h> 37#include <linux/module.h>
40 38
39/**
40 * Currently we use a spinlock for the lock, but a mutex *may* be
41 * more appropriate to reduce scheduling latency if the range manager
42 * ends up with very fragmented allocation patterns.
43 */
44
45struct ttm_range_manager {
46 struct drm_mm mm;
47 spinlock_t lock;
48};
49
41static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, 50static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
42 struct ttm_buffer_object *bo, 51 struct ttm_buffer_object *bo,
43 struct ttm_placement *placement, 52 struct ttm_placement *placement,
44 struct ttm_mem_reg *mem) 53 struct ttm_mem_reg *mem)
45{ 54{
46 struct ttm_bo_global *glob = man->bdev->glob; 55 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
47 struct drm_mm *mm = man->priv; 56 struct drm_mm *mm = &rman->mm;
48 struct drm_mm_node *node = NULL; 57 struct drm_mm_node *node = NULL;
49 unsigned long lpfn; 58 unsigned long lpfn;
50 int ret; 59 int ret;
@@ -57,19 +66,19 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
57 if (unlikely(ret)) 66 if (unlikely(ret))
58 return ret; 67 return ret;
59 68
60 spin_lock(&glob->lru_lock); 69 spin_lock(&rman->lock);
61 node = drm_mm_search_free_in_range(mm, 70 node = drm_mm_search_free_in_range(mm,
62 mem->num_pages, mem->page_alignment, 71 mem->num_pages, mem->page_alignment,
63 placement->fpfn, lpfn, 1); 72 placement->fpfn, lpfn, 1);
64 if (unlikely(node == NULL)) { 73 if (unlikely(node == NULL)) {
65 spin_unlock(&glob->lru_lock); 74 spin_unlock(&rman->lock);
66 return 0; 75 return 0;
67 } 76 }
68 node = drm_mm_get_block_atomic_range(node, mem->num_pages, 77 node = drm_mm_get_block_atomic_range(node, mem->num_pages,
69 mem->page_alignment, 78 mem->page_alignment,
70 placement->fpfn, 79 placement->fpfn,
71 lpfn); 80 lpfn);
72 spin_unlock(&glob->lru_lock); 81 spin_unlock(&rman->lock);
73 } while (node == NULL); 82 } while (node == NULL);
74 83
75 mem->mm_node = node; 84 mem->mm_node = node;
@@ -80,12 +89,12 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
80static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, 89static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
81 struct ttm_mem_reg *mem) 90 struct ttm_mem_reg *mem)
82{ 91{
83 struct ttm_bo_global *glob = man->bdev->glob; 92 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
84 93
85 if (mem->mm_node) { 94 if (mem->mm_node) {
86 spin_lock(&glob->lru_lock); 95 spin_lock(&rman->lock);
87 drm_mm_put_block(mem->mm_node); 96 drm_mm_put_block(mem->mm_node);
88 spin_unlock(&glob->lru_lock); 97 spin_unlock(&rman->lock);
89 mem->mm_node = NULL; 98 mem->mm_node = NULL;
90 } 99 }
91} 100}
@@ -93,49 +102,49 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
93static int ttm_bo_man_init(struct ttm_mem_type_manager *man, 102static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
94 unsigned long p_size) 103 unsigned long p_size)
95{ 104{
96 struct drm_mm *mm; 105 struct ttm_range_manager *rman;
97 int ret; 106 int ret;
98 107
99 mm = kzalloc(sizeof(*mm), GFP_KERNEL); 108 rman = kzalloc(sizeof(*rman), GFP_KERNEL);
100 if (!mm) 109 if (!rman)
101 return -ENOMEM; 110 return -ENOMEM;
102 111
103 ret = drm_mm_init(mm, 0, p_size); 112 ret = drm_mm_init(&rman->mm, 0, p_size);
104 if (ret) { 113 if (ret) {
105 kfree(mm); 114 kfree(rman);
106 return ret; 115 return ret;
107 } 116 }
108 117
109 man->priv = mm; 118 spin_lock_init(&rman->lock);
119 man->priv = rman;
110 return 0; 120 return 0;
111} 121}
112 122
113static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) 123static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
114{ 124{
115 struct ttm_bo_global *glob = man->bdev->glob; 125 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
116 struct drm_mm *mm = man->priv; 126 struct drm_mm *mm = &rman->mm;
117 int ret = 0;
118 127
119 spin_lock(&glob->lru_lock); 128 spin_lock(&rman->lock);
120 if (drm_mm_clean(mm)) { 129 if (drm_mm_clean(mm)) {
121 drm_mm_takedown(mm); 130 drm_mm_takedown(mm);
122 kfree(mm); 131 spin_unlock(&rman->lock);
132 kfree(rman);
123 man->priv = NULL; 133 man->priv = NULL;
124 } else 134 return 0;
125 ret = -EBUSY; 135 }
126 spin_unlock(&glob->lru_lock); 136 spin_unlock(&rman->lock);
127 return ret; 137 return -EBUSY;
128} 138}
129 139
130static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, 140static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
131 const char *prefix) 141 const char *prefix)
132{ 142{
133 struct ttm_bo_global *glob = man->bdev->glob; 143 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
134 struct drm_mm *mm = man->priv;
135 144
136 spin_lock(&glob->lru_lock); 145 spin_lock(&rman->lock);
137 drm_mm_debug_table(mm, prefix); 146 drm_mm_debug_table(&rman->mm, prefix);
138 spin_unlock(&glob->lru_lock); 147 spin_unlock(&rman->lock);
139} 148}
140 149
141const struct ttm_mem_type_manager_func ttm_bo_manager_func = { 150const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index a7bab87a548..af789dc869b 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -440,10 +440,8 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
440 return ret; 440 return ret;
441 441
442 ret = be->func->bind(be, bo_mem); 442 ret = be->func->bind(be, bo_mem);
443 if (ret) { 443 if (unlikely(ret != 0))
444 printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
445 return ret; 444 return ret;
446 }
447 445
448 ttm->state = tt_bound; 446 ttm->state = tt_bound;
449 447
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 9b5b4d9dd62..3e038a394c5 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -235,9 +235,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
235 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) - 235 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
236 first_pfn + 1; 236 first_pfn + 1;
237 237
238 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) 238 vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages);
239 if (NULL == vsg->pages)
239 return -ENOMEM; 240 return -ENOMEM;
240 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
241 down_read(&current->mm->mmap_sem); 241 down_read(&current->mm->mmap_sem);
242 ret = get_user_pages(current, current->mm, 242 ret = get_user_pages(current, current->mm,
243 (unsigned long)xfer->mem_addr, 243 (unsigned long)xfer->mem_addr,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 51d9f9f1d7f..76954e3528c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -691,6 +691,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
691 691
692 fence_rep.error = ret; 692 fence_rep.error = ret;
693 fence_rep.fence_seq = (uint64_t) sequence; 693 fence_rep.fence_seq = (uint64_t) sequence;
694 fence_rep.pad64 = 0;
694 695
695 user_fence_rep = (struct drm_vmw_fence_rep __user *) 696 user_fence_rep = (struct drm_vmw_fence_rep __user *)
696 (unsigned long)arg->fence_rep; 697 (unsigned long)arg->fence_rep;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 87c6e6156d7..cceeb42789b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -720,6 +720,8 @@ static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
720 &vmw_vram_ne_placement, 720 &vmw_vram_ne_placement,
721 false, &vmw_dmabuf_bo_free); 721 false, &vmw_dmabuf_bo_free);
722 vmw_overlay_resume_all(dev_priv); 722 vmw_overlay_resume_all(dev_priv);
723 if (unlikely(ret != 0))
724 vfbs->buffer = NULL;
723 725
724 return ret; 726 return ret;
725} 727}
@@ -730,6 +732,9 @@ static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
730 struct vmw_framebuffer_surface *vfbs = 732 struct vmw_framebuffer_surface *vfbs =
731 vmw_framebuffer_to_vfbs(&vfb->base); 733 vmw_framebuffer_to_vfbs(&vfb->base);
732 734
735 if (unlikely(vfbs->buffer == NULL))
736 return 0;
737
733 bo = &vfbs->buffer->base; 738 bo = &vfbs->buffer->base;
734 ttm_bo_unref(&bo); 739 ttm_bo_unref(&bo);
735 vfbs->buffer = NULL; 740 vfbs->buffer = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index a01c47ddb5b..29113c9b26a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -557,7 +557,7 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
557 return -EINVAL; 557 return -EINVAL;
558 } 558 }
559 559
560 dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv)); 560 dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL);
561 561
562 if (!dev_priv->ldu_priv) 562 if (!dev_priv->ldu_priv)
563 return -ENOMEM; 563 return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index df2036ed18d..f1a52f9e729 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -585,7 +585,7 @@ int vmw_overlay_init(struct vmw_private *dev_priv)
585 return -ENOSYS; 585 return -ENOSYS;
586 } 586 }
587 587
588 overlay = kmalloc(GFP_KERNEL, sizeof(*overlay)); 588 overlay = kmalloc(sizeof(*overlay), GFP_KERNEL);
589 if (!overlay) 589 if (!overlay)
590 return -ENOMEM; 590 return -ENOMEM;
591 591
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 36e129f0023..5408b1b7996 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -862,7 +862,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
862 &vmw_vram_sys_placement, true, 862 &vmw_vram_sys_placement, true,
863 &vmw_user_dmabuf_destroy); 863 &vmw_user_dmabuf_destroy);
864 if (unlikely(ret != 0)) 864 if (unlikely(ret != 0))
865 return ret; 865 goto out_no_dmabuf;
866 866
867 tmp = ttm_bo_reference(&vmw_user_bo->dma.base); 867 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
868 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, 868 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
@@ -870,19 +870,21 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
870 false, 870 false,
871 ttm_buffer_type, 871 ttm_buffer_type,
872 &vmw_user_dmabuf_release, NULL); 872 &vmw_user_dmabuf_release, NULL);
873 if (unlikely(ret != 0)) { 873 if (unlikely(ret != 0))
874 ttm_bo_unref(&tmp); 874 goto out_no_base_object;
875 } else { 875 else {
876 rep->handle = vmw_user_bo->base.hash.key; 876 rep->handle = vmw_user_bo->base.hash.key;
877 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset; 877 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
878 rep->cur_gmr_id = vmw_user_bo->base.hash.key; 878 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
879 rep->cur_gmr_offset = 0; 879 rep->cur_gmr_offset = 0;
880 } 880 }
881 ttm_bo_unref(&tmp);
882 881
882out_no_base_object:
883 ttm_bo_unref(&tmp);
884out_no_dmabuf:
883 ttm_read_unlock(&vmaster->lock); 885 ttm_read_unlock(&vmaster->lock);
884 886
885 return 0; 887 return ret;
886} 888}
887 889
888int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, 890int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
index 742c423567c..0e1edd7311f 100644
--- a/drivers/gpu/stub/Kconfig
+++ b/drivers/gpu/stub/Kconfig
@@ -3,6 +3,9 @@ config STUB_POULSBO
3 depends on PCI 3 depends on PCI
4 # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled 4 # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
5 # but for select to work, need to select ACPI_VIDEO's dependencies, ick 5 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
6 select VIDEO_OUTPUT_CONTROL if ACPI
7 select BACKLIGHT_CLASS_DEVICE if ACPI
8 select INPUT if ACPI
6 select ACPI_VIDEO if ACPI 9 select ACPI_VIDEO if ACPI
7 help 10 help
8 Choose this option if you have a system that has Intel GMA500 11 Choose this option if you have a system that has Intel GMA500