aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2012-06-01 04:49:16 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-06-01 04:52:54 -0400
commite269f90f3d3f7c70cf661c660bf445597261f54a (patch)
treef09685517e9efa7d675dad8fd13694157ce50521 /drivers/gpu/drm/i915
parent112abd291db7d47974f166e742104d761bc76977 (diff)
parent63bc620b45af8c743ac291c8725933278c712692 (diff)
Merge remote-tracking branch 'airlied/drm-prime-vmap' into drm-intel-next-queued
We need the latest dma-buf code from Dave Airlie so that we can pimp the backing storage handling code in drm/i915 with Chris Wilson's unbound tracking and stolen mem backed gem object code. Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h14
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c53
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c232
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c15
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c37
-rw-r--r--drivers/gpu/drm/i915/intel_display.c58
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c43
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c21
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c1
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c64
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c18
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h5
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c53
17 files changed, 528 insertions, 109 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0ca7f7646ab5..2e9268da58d8 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -38,7 +38,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
38 dvo_ch7017.o \ 38 dvo_ch7017.o \
39 dvo_ivch.o \ 39 dvo_ivch.o \
40 dvo_tfp410.o \ 40 dvo_tfp410.o \
41 dvo_sil164.o 41 dvo_sil164.o \
42 i915_gem_dmabuf.o
42 43
43i915-$(CONFIG_COMPAT) += i915_ioc32.o 44i915-$(CONFIG_COMPAT) += i915_ioc32.o
44 45
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index eb2b3c25b9e1..5363e9c66c27 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2032,6 +2032,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
2032 1, minor); 2032 1, minor);
2033 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops, 2033 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
2034 1, minor); 2034 1, minor);
2035 drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
2036 1, minor);
2035} 2037}
2036 2038
2037#endif /* CONFIG_DEBUG_FS */ 2039#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index d3e194853061..238a52165833 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1012,7 +1012,7 @@ static const struct dev_pm_ops i915_pm_ops = {
1012 .restore = i915_pm_resume, 1012 .restore = i915_pm_resume,
1013}; 1013};
1014 1014
1015static struct vm_operations_struct i915_gem_vm_ops = { 1015static const struct vm_operations_struct i915_gem_vm_ops = {
1016 .fault = i915_gem_fault, 1016 .fault = i915_gem_fault,
1017 .open = drm_gem_vm_open, 1017 .open = drm_gem_vm_open,
1018 .close = drm_gem_vm_close, 1018 .close = drm_gem_vm_close,
@@ -1039,7 +1039,7 @@ static struct drm_driver driver = {
1039 */ 1039 */
1040 .driver_features = 1040 .driver_features =
1041 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ 1041 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
1042 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM, 1042 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
1043 .load = i915_driver_load, 1043 .load = i915_driver_load,
1044 .unload = i915_driver_unload, 1044 .unload = i915_driver_unload,
1045 .open = i915_driver_open, 1045 .open = i915_driver_open,
@@ -1062,6 +1062,12 @@ static struct drm_driver driver = {
1062 .gem_init_object = i915_gem_init_object, 1062 .gem_init_object = i915_gem_init_object,
1063 .gem_free_object = i915_gem_free_object, 1063 .gem_free_object = i915_gem_free_object,
1064 .gem_vm_ops = &i915_gem_vm_ops, 1064 .gem_vm_ops = &i915_gem_vm_ops,
1065
1066 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1067 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1068 .gem_prime_export = i915_gem_prime_export,
1069 .gem_prime_import = i915_gem_prime_import,
1070
1065 .dumb_create = i915_gem_dumb_create, 1071 .dumb_create = i915_gem_dumb_create,
1066 .dumb_map_offset = i915_gem_mmap_gtt, 1072 .dumb_map_offset = i915_gem_mmap_gtt,
1067 .dumb_destroy = i915_gem_dumb_destroy, 1073 .dumb_destroy = i915_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 470c73219e6b..ccabadd2b6c3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -944,6 +944,11 @@ struct drm_i915_gem_object {
944 struct scatterlist *sg_list; 944 struct scatterlist *sg_list;
945 int num_sg; 945 int num_sg;
946 946
947 /* prime dma-buf support */
948 struct sg_table *sg_table;
949 void *dma_buf_vmapping;
950 int vmapping_count;
951
947 /** 952 /**
948 * Used for performing relocations during execbuffer insertion. 953 * Used for performing relocations during execbuffer insertion.
949 */ 954 */
@@ -1251,6 +1256,8 @@ int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
1251void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 1256void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1252void i915_gem_lastclose(struct drm_device *dev); 1257void i915_gem_lastclose(struct drm_device *dev);
1253 1258
1259int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1260 gfp_t gfpmask);
1254int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 1261int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1255int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); 1262int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
1256int i915_gem_object_sync(struct drm_i915_gem_object *obj, 1263int i915_gem_object_sync(struct drm_i915_gem_object *obj,
@@ -1349,6 +1356,13 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1349int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 1356int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1350 enum i915_cache_level cache_level); 1357 enum i915_cache_level cache_level);
1351 1358
1359struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1360 struct dma_buf *dma_buf);
1361
1362struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
1363 struct drm_gem_object *gem_obj, int flags);
1364
1365
1352/* i915_gem_gtt.c */ 1366/* i915_gem_gtt.c */
1353int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); 1367int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
1354void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); 1368void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1c08e0900eff..a20ac438b8ef 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,6 +35,7 @@
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/swap.h> 36#include <linux/swap.h>
37#include <linux/pci.h> 37#include <linux/pci.h>
38#include <linux/dma-buf.h>
38 39
39static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); 40static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
40static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); 41static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
@@ -538,6 +539,14 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
538 goto out; 539 goto out;
539 } 540 }
540 541
542 /* prime objects have no backing filp to GEM pread/pwrite
543 * pages from.
544 */
545 if (!obj->base.filp) {
546 ret = -EINVAL;
547 goto out;
548 }
549
541 trace_i915_gem_object_pread(obj, args->offset, args->size); 550 trace_i915_gem_object_pread(obj, args->offset, args->size);
542 551
543 ret = i915_gem_shmem_pread(dev, obj, args, file); 552 ret = i915_gem_shmem_pread(dev, obj, args, file);
@@ -880,6 +889,14 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
880 goto out; 889 goto out;
881 } 890 }
882 891
892 /* prime objects have no backing filp to GEM pread/pwrite
893 * pages from.
894 */
895 if (!obj->base.filp) {
896 ret = -EINVAL;
897 goto out;
898 }
899
883 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 900 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
884 901
885 ret = -EFAULT; 902 ret = -EFAULT;
@@ -1021,6 +1038,14 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1021 if (obj == NULL) 1038 if (obj == NULL)
1022 return -ENOENT; 1039 return -ENOENT;
1023 1040
1041 /* prime objects have no backing filp to GEM mmap
1042 * pages from.
1043 */
1044 if (!obj->filp) {
1045 drm_gem_object_unreference_unlocked(obj);
1046 return -EINVAL;
1047 }
1048
1024 addr = vm_mmap(obj->filp, 0, args->size, 1049 addr = vm_mmap(obj->filp, 0, args->size,
1025 PROT_READ | PROT_WRITE, MAP_SHARED, 1050 PROT_READ | PROT_WRITE, MAP_SHARED,
1026 args->offset); 1051 args->offset);
@@ -1302,8 +1327,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1302 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); 1327 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1303} 1328}
1304 1329
1305 1330int
1306static int
1307i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, 1331i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1308 gfp_t gfpmask) 1332 gfp_t gfpmask)
1309{ 1333{
@@ -1312,6 +1336,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1312 struct inode *inode; 1336 struct inode *inode;
1313 struct page *page; 1337 struct page *page;
1314 1338
1339 if (obj->pages || obj->sg_table)
1340 return 0;
1341
1315 /* Get the list of pages out of our struct file. They'll be pinned 1342 /* Get the list of pages out of our struct file. They'll be pinned
1316 * at this point until we release them. 1343 * at this point until we release them.
1317 */ 1344 */
@@ -1353,6 +1380,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1353 int page_count = obj->base.size / PAGE_SIZE; 1380 int page_count = obj->base.size / PAGE_SIZE;
1354 int i; 1381 int i;
1355 1382
1383 if (!obj->pages)
1384 return;
1385
1356 BUG_ON(obj->madv == __I915_MADV_PURGED); 1386 BUG_ON(obj->madv == __I915_MADV_PURGED);
1357 1387
1358 if (i915_gem_object_needs_bit17_swizzle(obj)) 1388 if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -2164,10 +2194,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2164 if (obj->gtt_space == NULL) 2194 if (obj->gtt_space == NULL)
2165 return 0; 2195 return 0;
2166 2196
2167 if (obj->pin_count != 0) { 2197 if (obj->pin_count)
2168 DRM_ERROR("Attempting to unbind pinned buffer\n"); 2198 return -EBUSY;
2169 return -EINVAL;
2170 }
2171 2199
2172 ret = i915_gem_object_finish_gpu(obj); 2200 ret = i915_gem_object_finish_gpu(obj);
2173 if (ret) 2201 if (ret)
@@ -3394,6 +3422,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3394 struct drm_i915_private *dev_priv = dev->dev_private; 3422 struct drm_i915_private *dev_priv = dev->dev_private;
3395 struct drm_i915_gem_object *obj; 3423 struct drm_i915_gem_object *obj;
3396 struct address_space *mapping; 3424 struct address_space *mapping;
3425 u32 mask;
3397 3426
3398 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 3427 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3399 if (obj == NULL) 3428 if (obj == NULL)
@@ -3404,8 +3433,15 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3404 return NULL; 3433 return NULL;
3405 } 3434 }
3406 3435
3436 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3437 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3438 /* 965gm cannot relocate objects above 4GiB. */
3439 mask &= ~__GFP_HIGHMEM;
3440 mask |= __GFP_DMA32;
3441 }
3442
3407 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; 3443 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3408 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); 3444 mapping_set_gfp_mask(mapping, mask);
3409 3445
3410 i915_gem_info_add_obj(dev_priv, size); 3446 i915_gem_info_add_obj(dev_priv, size);
3411 3447
@@ -3458,6 +3494,9 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3458 3494
3459 trace_i915_gem_object_destroy(obj); 3495 trace_i915_gem_object_destroy(obj);
3460 3496
3497 if (gem_obj->import_attach)
3498 drm_prime_gem_destroy(gem_obj, obj->sg_table);
3499
3461 if (obj->phys_obj) 3500 if (obj->phys_obj)
3462 i915_gem_detach_phys_object(dev, obj); 3501 i915_gem_detach_phys_object(dev, obj);
3463 3502
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
new file mode 100644
index 000000000000..aa308e1337db
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -0,0 +1,232 @@
1/*
2 * Copyright 2012 Red Hat Inc
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 */
26#include "drmP.h"
27#include "i915_drv.h"
28#include <linux/dma-buf.h>
29
30static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
31 enum dma_data_direction dir)
32{
33 struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
34 struct drm_device *dev = obj->base.dev;
35 int npages = obj->base.size / PAGE_SIZE;
36 struct sg_table *sg = NULL;
37 int ret;
38 int nents;
39
40 ret = i915_mutex_lock_interruptible(dev);
41 if (ret)
42 return ERR_PTR(ret);
43
44 if (!obj->pages) {
45 ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
46 if (ret)
47 goto out;
48 }
49
50 /* link the pages into an SG then map the sg */
51 sg = drm_prime_pages_to_sg(obj->pages, npages);
52 nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
53out:
54 mutex_unlock(&dev->struct_mutex);
55 return sg;
56}
57
58static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
59 struct sg_table *sg, enum dma_data_direction dir)
60{
61 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
62 sg_free_table(sg);
63 kfree(sg);
64}
65
66static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
67{
68 struct drm_i915_gem_object *obj = dma_buf->priv;
69
70 if (obj->base.export_dma_buf == dma_buf) {
71 /* drop the reference on the export fd holds */
72 obj->base.export_dma_buf = NULL;
73 drm_gem_object_unreference_unlocked(&obj->base);
74 }
75}
76
77static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
78{
79 struct drm_i915_gem_object *obj = dma_buf->priv;
80 struct drm_device *dev = obj->base.dev;
81 int ret;
82
83 ret = i915_mutex_lock_interruptible(dev);
84 if (ret)
85 return ERR_PTR(ret);
86
87 if (obj->dma_buf_vmapping) {
88 obj->vmapping_count++;
89 goto out_unlock;
90 }
91
92 if (!obj->pages) {
93 ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
94 if (ret) {
95 mutex_unlock(&dev->struct_mutex);
96 return ERR_PTR(ret);
97 }
98 }
99
100 obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
101 if (!obj->dma_buf_vmapping) {
102 DRM_ERROR("failed to vmap object\n");
103 goto out_unlock;
104 }
105
106 obj->vmapping_count = 1;
107out_unlock:
108 mutex_unlock(&dev->struct_mutex);
109 return obj->dma_buf_vmapping;
110}
111
112static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
113{
114 struct drm_i915_gem_object *obj = dma_buf->priv;
115 struct drm_device *dev = obj->base.dev;
116 int ret;
117
118 ret = i915_mutex_lock_interruptible(dev);
119 if (ret)
120 return;
121
122 --obj->vmapping_count;
123 if (obj->vmapping_count == 0) {
124 vunmap(obj->dma_buf_vmapping);
125 obj->dma_buf_vmapping = NULL;
126 }
127 mutex_unlock(&dev->struct_mutex);
128}
129
130static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
131{
132 return NULL;
133}
134
135static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
136{
137
138}
139static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
140{
141 return NULL;
142}
143
144static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
145{
146
147}
148
149static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
150{
151 return -EINVAL;
152}
153
154static const struct dma_buf_ops i915_dmabuf_ops = {
155 .map_dma_buf = i915_gem_map_dma_buf,
156 .unmap_dma_buf = i915_gem_unmap_dma_buf,
157 .release = i915_gem_dmabuf_release,
158 .kmap = i915_gem_dmabuf_kmap,
159 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
160 .kunmap = i915_gem_dmabuf_kunmap,
161 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
162 .mmap = i915_gem_dmabuf_mmap,
163 .vmap = i915_gem_dmabuf_vmap,
164 .vunmap = i915_gem_dmabuf_vunmap,
165};
166
167struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
168 struct drm_gem_object *gem_obj, int flags)
169{
170 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
171
172 return dma_buf_export(obj, &i915_dmabuf_ops,
173 obj->base.size, 0600);
174}
175
176struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
177 struct dma_buf *dma_buf)
178{
179 struct dma_buf_attachment *attach;
180 struct sg_table *sg;
181 struct drm_i915_gem_object *obj;
182 int npages;
183 int size;
184 int ret;
185
186 /* is this one of own objects? */
187 if (dma_buf->ops == &i915_dmabuf_ops) {
188 obj = dma_buf->priv;
189 /* is it from our device? */
190 if (obj->base.dev == dev) {
191 drm_gem_object_reference(&obj->base);
192 return &obj->base;
193 }
194 }
195
196 /* need to attach */
197 attach = dma_buf_attach(dma_buf, dev->dev);
198 if (IS_ERR(attach))
199 return ERR_CAST(attach);
200
201 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
202 if (IS_ERR(sg)) {
203 ret = PTR_ERR(sg);
204 goto fail_detach;
205 }
206
207 size = dma_buf->size;
208 npages = size / PAGE_SIZE;
209
210 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
211 if (obj == NULL) {
212 ret = -ENOMEM;
213 goto fail_unmap;
214 }
215
216 ret = drm_gem_private_object_init(dev, &obj->base, size);
217 if (ret) {
218 kfree(obj);
219 goto fail_unmap;
220 }
221
222 obj->sg_table = sg;
223 obj->base.import_attach = attach;
224
225 return &obj->base;
226
227fail_unmap:
228 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
229fail_detach:
230 dma_buf_detach(dma_buf, attach);
231 return ERR_PTR(ret);
232}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 29d573c27b35..9fd25a435536 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -267,7 +267,13 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
267 BUG(); 267 BUG();
268 } 268 }
269 269
270 if (dev_priv->mm.gtt->needs_dmar) { 270 if (obj->sg_table) {
271 i915_ppgtt_insert_sg_entries(ppgtt,
272 obj->sg_table->sgl,
273 obj->sg_table->nents,
274 obj->gtt_space->start >> PAGE_SHIFT,
275 pte_flags);
276 } else if (dev_priv->mm.gtt->needs_dmar) {
271 BUG_ON(!obj->sg_list); 277 BUG_ON(!obj->sg_list);
272 278
273 i915_ppgtt_insert_sg_entries(ppgtt, 279 i915_ppgtt_insert_sg_entries(ppgtt,
@@ -371,7 +377,12 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
371 struct drm_i915_private *dev_priv = dev->dev_private; 377 struct drm_i915_private *dev_priv = dev->dev_private;
372 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); 378 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
373 379
374 if (dev_priv->mm.gtt->needs_dmar) { 380 if (obj->sg_table) {
381 intel_gtt_insert_sg_entries(obj->sg_table->sgl,
382 obj->sg_table->nents,
383 obj->gtt_space->start >> PAGE_SHIFT,
384 agp_type);
385 } else if (dev_priv->mm.gtt->needs_dmar) {
375 BUG_ON(!obj->sg_list); 386 BUG_ON(!obj->sg_list);
376 387
377 intel_gtt_insert_sg_entries(obj->sg_list, 388 intel_gtt_insert_sg_entries(obj->sg_list,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 6553dcc2ca79..0e876646d769 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -350,8 +350,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
350{ 350{
351 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 351 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
352 rps_work); 352 rps_work);
353 u8 new_delay = dev_priv->cur_delay;
354 u32 pm_iir, pm_imr; 353 u32 pm_iir, pm_imr;
354 u8 new_delay;
355 355
356 spin_lock_irq(&dev_priv->rps_lock); 356 spin_lock_irq(&dev_priv->rps_lock);
357 pm_iir = dev_priv->pm_iir; 357 pm_iir = dev_priv->pm_iir;
@@ -360,41 +360,18 @@ static void gen6_pm_rps_work(struct work_struct *work)
360 I915_WRITE(GEN6_PMIMR, 0); 360 I915_WRITE(GEN6_PMIMR, 0);
361 spin_unlock_irq(&dev_priv->rps_lock); 361 spin_unlock_irq(&dev_priv->rps_lock);
362 362
363 if (!pm_iir) 363 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
364 return; 364 return;
365 365
366 mutex_lock(&dev_priv->dev->struct_mutex); 366 mutex_lock(&dev_priv->dev->struct_mutex);
367 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 367
368 if (dev_priv->cur_delay != dev_priv->max_delay) 368 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
369 new_delay = dev_priv->cur_delay + 1; 369 new_delay = dev_priv->cur_delay + 1;
370 if (new_delay > dev_priv->max_delay) 370 else
371 new_delay = dev_priv->max_delay; 371 new_delay = dev_priv->cur_delay - 1;
372 } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
373 gen6_gt_force_wake_get(dev_priv);
374 if (dev_priv->cur_delay != dev_priv->min_delay)
375 new_delay = dev_priv->cur_delay - 1;
376 if (new_delay < dev_priv->min_delay) {
377 new_delay = dev_priv->min_delay;
378 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
379 I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
380 ((new_delay << 16) & 0x3f0000));
381 } else {
382 /* Make sure we continue to get down interrupts
383 * until we hit the minimum frequency */
384 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
385 I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
386 }
387 gen6_gt_force_wake_put(dev_priv);
388 }
389 372
390 gen6_set_rps(dev_priv->dev, new_delay); 373 gen6_set_rps(dev_priv->dev, new_delay);
391 dev_priv->cur_delay = new_delay;
392 374
393 /*
394 * rps_lock not held here because clearing is non-destructive. There is
395 * an *extremely* unlikely race with gen6_rps_enable() that is prevented
396 * by holding struct_mutex for the duration of the write.
397 */
398 mutex_unlock(&dev_priv->dev->struct_mutex); 375 mutex_unlock(&dev_priv->dev->struct_mutex);
399} 376}
400 377
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1d801724c1db..9f5148acf73c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -910,9 +910,10 @@ static void assert_pll(struct drm_i915_private *dev_priv,
910 910
911/* For ILK+ */ 911/* For ILK+ */
912static void assert_pch_pll(struct drm_i915_private *dev_priv, 912static void assert_pch_pll(struct drm_i915_private *dev_priv,
913 struct intel_crtc *intel_crtc, bool state) 913 struct intel_pch_pll *pll,
914 struct intel_crtc *crtc,
915 bool state)
914{ 916{
915 int reg;
916 u32 val; 917 u32 val;
917 bool cur_state; 918 bool cur_state;
918 919
@@ -921,30 +922,37 @@ static void assert_pch_pll(struct drm_i915_private *dev_priv,
921 return; 922 return;
922 } 923 }
923 924
924 if (!intel_crtc->pch_pll) { 925 if (WARN (!pll,
925 WARN(1, "asserting PCH PLL enabled with no PLL\n"); 926 "asserting PCH PLL %s with no PLL\n", state_string(state)))
926 return; 927 return;
927 }
928 928
929 if (HAS_PCH_CPT(dev_priv->dev)) { 929 val = I915_READ(pll->pll_reg);
930 cur_state = !!(val & DPLL_VCO_ENABLE);
931 WARN(cur_state != state,
932 "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
933 pll->pll_reg, state_string(state), state_string(cur_state), val);
934
935 /* Make sure the selected PLL is correctly attached to the transcoder */
936 if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
930 u32 pch_dpll; 937 u32 pch_dpll;
931 938
932 pch_dpll = I915_READ(PCH_DPLL_SEL); 939 pch_dpll = I915_READ(PCH_DPLL_SEL);
933 940 cur_state = pll->pll_reg == _PCH_DPLL_B;
934 /* Make sure the selected PLL is enabled to the transcoder */ 941 if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
935 WARN(!((pch_dpll >> (4 * intel_crtc->pipe)) & 8), 942 "PLL[%d] not attached to this transcoder %d: %08x\n",
936 "transcoder %d PLL not enabled\n", intel_crtc->pipe); 943 cur_state, crtc->pipe, pch_dpll)) {
944 cur_state = !!(val >> (4*crtc->pipe + 3));
945 WARN(cur_state != state,
946 "PLL[%d] not %s on this transcoder %d: %08x\n",
947 pll->pll_reg == _PCH_DPLL_B,
948 state_string(state),
949 crtc->pipe,
950 val);
951 }
937 } 952 }
938
939 reg = intel_crtc->pch_pll->pll_reg;
940 val = I915_READ(reg);
941 cur_state = !!(val & DPLL_VCO_ENABLE);
942 WARN(cur_state != state,
943 "PCH PLL state assertion failure (expected %s, current %s)\n",
944 state_string(state), state_string(cur_state));
945} 953}
946#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true) 954#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
947#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false) 955#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
948 956
949static void assert_fdi_tx(struct drm_i915_private *dev_priv, 957static void assert_fdi_tx(struct drm_i915_private *dev_priv,
950 enum pipe pipe, bool state) 958 enum pipe pipe, bool state)
@@ -1424,7 +1432,7 @@ static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
1424 assert_pch_refclk_enabled(dev_priv); 1432 assert_pch_refclk_enabled(dev_priv);
1425 1433
1426 if (pll->active++ && pll->on) { 1434 if (pll->active++ && pll->on) {
1427 assert_pch_pll_enabled(dev_priv, intel_crtc); 1435 assert_pch_pll_enabled(dev_priv, pll, NULL);
1428 return; 1436 return;
1429 } 1437 }
1430 1438
@@ -1460,12 +1468,12 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
1460 intel_crtc->base.base.id); 1468 intel_crtc->base.base.id);
1461 1469
1462 if (WARN_ON(pll->active == 0)) { 1470 if (WARN_ON(pll->active == 0)) {
1463 assert_pch_pll_disabled(dev_priv, intel_crtc); 1471 assert_pch_pll_disabled(dev_priv, pll, NULL);
1464 return; 1472 return;
1465 } 1473 }
1466 1474
1467 if (--pll->active) { 1475 if (--pll->active) {
1468 assert_pch_pll_enabled(dev_priv, intel_crtc); 1476 assert_pch_pll_enabled(dev_priv, pll, NULL);
1469 return; 1477 return;
1470 } 1478 }
1471 1479
@@ -1495,7 +1503,9 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1495 BUG_ON(dev_priv->info->gen < 5); 1503 BUG_ON(dev_priv->info->gen < 5);
1496 1504
1497 /* Make sure PCH DPLL is enabled */ 1505 /* Make sure PCH DPLL is enabled */
1498 assert_pch_pll_enabled(dev_priv, to_intel_crtc(crtc)); 1506 assert_pch_pll_enabled(dev_priv,
1507 to_intel_crtc(crtc)->pch_pll,
1508 to_intel_crtc(crtc));
1499 1509
1500 /* FDI must be feeding us bits for PCH ports */ 1510 /* FDI must be feeding us bits for PCH ports */
1501 assert_fdi_tx_enabled(dev_priv, pipe); 1511 assert_fdi_tx_enabled(dev_priv, pipe);
@@ -6918,7 +6928,7 @@ void intel_modeset_init(struct drm_device *dev)
6918 dev->mode_config.preferred_depth = 24; 6928 dev->mode_config.preferred_depth = 24;
6919 dev->mode_config.prefer_shadow = 1; 6929 dev->mode_config.prefer_shadow = 1;
6920 6930
6921 dev->mode_config.funcs = (void *)&intel_mode_funcs; 6931 dev->mode_config.funcs = &intel_mode_funcs;
6922 6932
6923 intel_init_quirks(dev); 6933 intel_init_quirks(dev);
6924 6934
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 9b2effcc90e5..c71e7890e6f6 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -266,6 +266,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
266 if (mode->clock < 10000) 266 if (mode->clock < 10000)
267 return MODE_CLOCK_LOW; 267 return MODE_CLOCK_LOW;
268 268
269 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
270 return MODE_H_ILLEGAL;
271
269 return MODE_OK; 272 return MODE_OK;
270} 273}
271 274
@@ -702,6 +705,9 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
702 mode->clock = intel_dp->panel_fixed_mode->clock; 705 mode->clock = intel_dp->panel_fixed_mode->clock;
703 } 706 }
704 707
708 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
709 return false;
710
705 DRM_DEBUG_KMS("DP link computation with max lane count %i " 711 DRM_DEBUG_KMS("DP link computation with max lane count %i "
706 "max bw %02x pixel clock %iKHz\n", 712 "max bw %02x pixel clock %iKHz\n",
707 max_lane_count, bws[max_clock], mode->clock); 713 max_lane_count, bws[max_clock], mode->clock);
@@ -1154,11 +1160,10 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1154 1160
1155 DRM_DEBUG_KMS("Turn eDP power off\n"); 1161 DRM_DEBUG_KMS("Turn eDP power off\n");
1156 1162
1157 WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n"); 1163 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1158 ironlake_panel_vdd_off_sync(intel_dp); /* finish any pending work */
1159 1164
1160 pp = ironlake_get_pp_control(dev_priv); 1165 pp = ironlake_get_pp_control(dev_priv);
1161 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1166 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1162 I915_WRITE(PCH_PP_CONTROL, pp); 1167 I915_WRITE(PCH_PP_CONTROL, pp);
1163 POSTING_READ(PCH_PP_CONTROL); 1168 POSTING_READ(PCH_PP_CONTROL);
1164 1169
@@ -1266,18 +1271,16 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
1266{ 1271{
1267 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1272 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1268 1273
1274
1275 /* Make sure the panel is off before trying to change the mode. But also
1276 * ensure that we have vdd while we switch off the panel. */
1277 ironlake_edp_panel_vdd_on(intel_dp);
1269 ironlake_edp_backlight_off(intel_dp); 1278 ironlake_edp_backlight_off(intel_dp);
1270 ironlake_edp_panel_off(intel_dp); 1279 ironlake_edp_panel_off(intel_dp);
1271 1280
1272 /* Wake up the sink first */
1273 ironlake_edp_panel_vdd_on(intel_dp);
1274 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1281 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1275 intel_dp_link_down(intel_dp); 1282 intel_dp_link_down(intel_dp);
1276 ironlake_edp_panel_vdd_off(intel_dp, false); 1283 ironlake_edp_panel_vdd_off(intel_dp, false);
1277
1278 /* Make sure the panel is off before trying to
1279 * change the mode
1280 */
1281} 1284}
1282 1285
1283static void intel_dp_commit(struct drm_encoder *encoder) 1286static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1309,10 +1312,11 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
1309 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1312 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1310 1313
1311 if (mode != DRM_MODE_DPMS_ON) { 1314 if (mode != DRM_MODE_DPMS_ON) {
1315 /* Switching the panel off requires vdd. */
1316 ironlake_edp_panel_vdd_on(intel_dp);
1312 ironlake_edp_backlight_off(intel_dp); 1317 ironlake_edp_backlight_off(intel_dp);
1313 ironlake_edp_panel_off(intel_dp); 1318 ironlake_edp_panel_off(intel_dp);
1314 1319
1315 ironlake_edp_panel_vdd_on(intel_dp);
1316 intel_dp_sink_dpms(intel_dp, mode); 1320 intel_dp_sink_dpms(intel_dp, mode);
1317 intel_dp_link_down(intel_dp); 1321 intel_dp_link_down(intel_dp);
1318 ironlake_edp_panel_vdd_off(intel_dp, false); 1322 ironlake_edp_panel_vdd_off(intel_dp, false);
@@ -1961,6 +1965,23 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
1961 return false; 1965 return false;
1962} 1966}
1963 1967
1968static void
1969intel_dp_probe_oui(struct intel_dp *intel_dp)
1970{
1971 u8 buf[3];
1972
1973 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
1974 return;
1975
1976 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
1977 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
1978 buf[0], buf[1], buf[2]);
1979
1980 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
1981 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
1982 buf[0], buf[1], buf[2]);
1983}
1984
1964static bool 1985static bool
1965intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) 1986intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
1966{ 1987{
@@ -2142,6 +2163,8 @@ intel_dp_detect(struct drm_connector *connector, bool force)
2142 if (status != connector_status_connected) 2163 if (status != connector_status_connected)
2143 return status; 2164 return status;
2144 2165
2166 intel_dp_probe_oui(intel_dp);
2167
2145 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { 2168 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
2146 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); 2169 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
2147 } else { 2170 } else {
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 4a9707dd0f9c..1991a4408cf9 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -396,11 +396,22 @@ clear_err:
396 * Wait for bus to IDLE before clearing NAK. 396 * Wait for bus to IDLE before clearing NAK.
397 * If we clear the NAK while bus is still active, then it will stay 397 * If we clear the NAK while bus is still active, then it will stay
398 * active and the next transaction may fail. 398 * active and the next transaction may fail.
399 *
400 * If no ACK is received during the address phase of a transaction, the
401 * adapter must report -ENXIO. It is not clear what to return if no ACK
402 * is received at other times. But we have to be careful to not return
403 * spurious -ENXIO because that will prevent i2c and drm edid functions
404 * from retrying. So return -ENXIO only when gmbus properly quiescents -
405 * timing out seems to happen when there _is_ a ddc chip present, but
406 * it's slow responding and only answers on the 2nd retry.
399 */ 407 */
408 ret = -ENXIO;
400 if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 409 if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
401 10)) 410 10)) {
402 DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n", 411 DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
403 adapter->name); 412 adapter->name);
413 ret = -ETIMEDOUT;
414 }
404 415
405 /* Toggle the Software Clear Interrupt bit. This has the effect 416 /* Toggle the Software Clear Interrupt bit. This has the effect
406 * of resetting the GMBUS controller and so clearing the 417 * of resetting the GMBUS controller and so clearing the
@@ -414,14 +425,6 @@ clear_err:
414 adapter->name, msgs[i].addr, 425 adapter->name, msgs[i].addr,
415 (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); 426 (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
416 427
417 /*
418 * If no ACK is received during the address phase of a transaction,
419 * the adapter must report -ENXIO.
420 * It is not clear what to return if no ACK is received at other times.
421 * So, we always return -ENXIO in all NAK cases, to ensure we send
422 * it at least during the one case that is specified.
423 */
424 ret = -ENXIO;
425 goto out; 428 goto out;
426 429
427timeout: 430timeout:
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 9dee82350def..08eb04c787e8 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -747,6 +747,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
747 }, 747 },
748 { 748 {
749 .callback = intel_no_lvds_dmi_callback, 749 .callback = intel_no_lvds_dmi_callback,
750 .ident = "Hewlett-Packard HP t5740e Thin Client",
751 .matches = {
752 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
753 DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
754 },
755 },
756 {
757 .callback = intel_no_lvds_dmi_callback,
750 .ident = "Hewlett-Packard t5745", 758 .ident = "Hewlett-Packard t5745",
751 .matches = { 759 .matches = {
752 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), 760 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 2b2e011e9055..2a1625d84a69 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -368,6 +368,7 @@ int intel_panel_setup_backlight(struct drm_device *dev)
368 else 368 else
369 return -ENODEV; 369 return -ENODEV;
370 370
371 memset(&props, 0, sizeof(props));
371 props.type = BACKLIGHT_RAW; 372 props.type = BACKLIGHT_RAW;
372 props.max_brightness = intel_panel_get_max_backlight(dev); 373 props.max_brightness = intel_panel_get_max_backlight(dev);
373 dev_priv->backlight = 374 dev_priv->backlight =
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 8e79ff67ec98..d0ce2a5b1d3f 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2270,10 +2270,33 @@ void ironlake_disable_drps(struct drm_device *dev)
2270void gen6_set_rps(struct drm_device *dev, u8 val) 2270void gen6_set_rps(struct drm_device *dev, u8 val)
2271{ 2271{
2272 struct drm_i915_private *dev_priv = dev->dev_private; 2272 struct drm_i915_private *dev_priv = dev->dev_private;
2273 u32 swreq; 2273 u32 limits;
2274 2274
2275 swreq = (val & 0x3ff) << 25; 2275 limits = 0;
2276 I915_WRITE(GEN6_RPNSWREQ, swreq); 2276 if (val >= dev_priv->max_delay)
2277 val = dev_priv->max_delay;
2278 else
2279 limits |= dev_priv->max_delay << 24;
2280
2281 if (val <= dev_priv->min_delay)
2282 val = dev_priv->min_delay;
2283 else
2284 limits |= dev_priv->min_delay << 16;
2285
2286 if (val == dev_priv->cur_delay)
2287 return;
2288
2289 I915_WRITE(GEN6_RPNSWREQ,
2290 GEN6_FREQUENCY(val) |
2291 GEN6_OFFSET(0) |
2292 GEN6_AGGRESSIVE_TURBO);
2293
2294 /* Make sure we continue to get interrupts
2295 * until we hit the minimum or maximum frequencies.
2296 */
2297 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
2298
2299 dev_priv->cur_delay = val;
2277} 2300}
2278 2301
2279void gen6_disable_rps(struct drm_device *dev) 2302void gen6_disable_rps(struct drm_device *dev)
@@ -2327,11 +2350,10 @@ int intel_enable_rc6(const struct drm_device *dev)
2327void gen6_enable_rps(struct drm_i915_private *dev_priv) 2350void gen6_enable_rps(struct drm_i915_private *dev_priv)
2328{ 2351{
2329 struct intel_ring_buffer *ring; 2352 struct intel_ring_buffer *ring;
2330 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 2353 u32 rp_state_cap;
2331 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 2354 u32 gt_perf_status;
2332 u32 pcu_mbox, rc6_mask = 0; 2355 u32 pcu_mbox, rc6_mask = 0;
2333 u32 gtfifodbg; 2356 u32 gtfifodbg;
2334 int cur_freq, min_freq, max_freq;
2335 int rc6_mode; 2357 int rc6_mode;
2336 int i; 2358 int i;
2337 2359
@@ -2352,6 +2374,14 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
2352 2374
2353 gen6_gt_force_wake_get(dev_priv); 2375 gen6_gt_force_wake_get(dev_priv);
2354 2376
2377 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
2378 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
2379
2380 /* In units of 100MHz */
2381 dev_priv->max_delay = rp_state_cap & 0xff;
2382 dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
2383 dev_priv->cur_delay = 0;
2384
2355 /* disable the counters and set deterministic thresholds */ 2385 /* disable the counters and set deterministic thresholds */
2356 I915_WRITE(GEN6_RC_CONTROL, 0); 2386 I915_WRITE(GEN6_RC_CONTROL, 0);
2357 2387
@@ -2399,8 +2429,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
2399 2429
2400 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); 2430 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2401 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 2431 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
2402 18 << 24 | 2432 dev_priv->max_delay << 24 |
2403 6 << 16); 2433 dev_priv->min_delay << 16);
2404 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); 2434 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
2405 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); 2435 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
2406 I915_WRITE(GEN6_RP_UP_EI, 100000); 2436 I915_WRITE(GEN6_RP_UP_EI, 100000);
@@ -2408,7 +2438,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
2408 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 2438 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
2409 I915_WRITE(GEN6_RP_CONTROL, 2439 I915_WRITE(GEN6_RP_CONTROL,
2410 GEN6_RP_MEDIA_TURBO | 2440 GEN6_RP_MEDIA_TURBO |
2411 GEN6_RP_MEDIA_HW_MODE | 2441 GEN6_RP_MEDIA_HW_NORMAL_MODE |
2412 GEN6_RP_MEDIA_IS_GFX | 2442 GEN6_RP_MEDIA_IS_GFX |
2413 GEN6_RP_ENABLE | 2443 GEN6_RP_ENABLE |
2414 GEN6_RP_UP_BUSY_AVG | 2444 GEN6_RP_UP_BUSY_AVG |
@@ -2426,10 +2456,6 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
2426 500)) 2456 500))
2427 DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); 2457 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2428 2458
2429 min_freq = (rp_state_cap & 0xff0000) >> 16;
2430 max_freq = rp_state_cap & 0xff;
2431 cur_freq = (gt_perf_status & 0xff00) >> 8;
2432
2433 /* Check for overclock support */ 2459 /* Check for overclock support */
2434 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 2460 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2435 500)) 2461 500))
@@ -2440,14 +2466,11 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
2440 500)) 2466 500))
2441 DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); 2467 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2442 if (pcu_mbox & (1<<31)) { /* OC supported */ 2468 if (pcu_mbox & (1<<31)) { /* OC supported */
2443 max_freq = pcu_mbox & 0xff; 2469 dev_priv->max_delay = pcu_mbox & 0xff;
2444 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); 2470 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
2445 } 2471 }
2446 2472
2447 /* In units of 100MHz */ 2473 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
2448 dev_priv->max_delay = max_freq;
2449 dev_priv->min_delay = min_freq;
2450 dev_priv->cur_delay = cur_freq;
2451 2474
2452 /* requires MSI enabled */ 2475 /* requires MSI enabled */
2453 I915_WRITE(GEN6_PMIER, 2476 I915_WRITE(GEN6_PMIER,
@@ -3580,8 +3603,9 @@ static void gen6_sanitize_pm(struct drm_device *dev)
3580 limits |= (dev_priv->min_delay & 0x3f) << 16; 3603 limits |= (dev_priv->min_delay & 0x3f) << 16;
3581 3604
3582 if (old != limits) { 3605 if (old != limits) {
3583 DRM_ERROR("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS expected %08x, was %08x\n", 3606 /* Note that the known failure case is to read back 0. */
3584 limits, old); 3607 DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS "
3608 "expected %08x, was %08x\n", limits, old);
3585 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); 3609 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
3586 } 3610 }
3587 3611
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index ca3c6e128594..2f5106a488c5 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -780,10 +780,12 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
780 ((v_sync_len & 0x30) >> 4); 780 ((v_sync_len & 0x30) >> 4);
781 781
782 dtd->part2.dtd_flags = 0x18; 782 dtd->part2.dtd_flags = 0x18;
783 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
784 dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
783 if (mode->flags & DRM_MODE_FLAG_PHSYNC) 785 if (mode->flags & DRM_MODE_FLAG_PHSYNC)
784 dtd->part2.dtd_flags |= 0x2; 786 dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
785 if (mode->flags & DRM_MODE_FLAG_PVSYNC) 787 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
786 dtd->part2.dtd_flags |= 0x4; 788 dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
787 789
788 dtd->part2.sdvo_flags = 0; 790 dtd->part2.sdvo_flags = 0;
789 dtd->part2.v_sync_off_high = v_sync_offset & 0xc0; 791 dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
@@ -817,9 +819,11 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
817 mode->clock = dtd->part1.clock * 10; 819 mode->clock = dtd->part1.clock * 10;
818 820
819 mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); 821 mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
820 if (dtd->part2.dtd_flags & 0x2) 822 if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
823 mode->flags |= DRM_MODE_FLAG_INTERLACE;
824 if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
821 mode->flags |= DRM_MODE_FLAG_PHSYNC; 825 mode->flags |= DRM_MODE_FLAG_PHSYNC;
822 if (dtd->part2.dtd_flags & 0x4) 826 if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
823 mode->flags |= DRM_MODE_FLAG_PVSYNC; 827 mode->flags |= DRM_MODE_FLAG_PVSYNC;
824} 828}
825 829
@@ -1246,8 +1250,14 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
1246 1250
1247static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) 1251static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
1248{ 1252{
1253 struct drm_device *dev = intel_sdvo->base.base.dev;
1249 u8 response[2]; 1254 u8 response[2];
1250 1255
1256 /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
1257 * on the line. */
1258 if (IS_I945G(dev) || IS_I945GM(dev))
1259 return false;
1260
1251 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, 1261 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
1252 &response, 2) && response[0]; 1262 &response, 2) && response[0];
1253} 1263}
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 6b7b22f4d63e..9d030142ee43 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -61,6 +61,11 @@ struct intel_sdvo_caps {
61 u16 output_flags; 61 u16 output_flags;
62} __attribute__((packed)); 62} __attribute__((packed));
63 63
64/* Note: SDVO detailed timing flags match EDID misc flags. */
65#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
66#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
67#define DTD_FLAG_INTERLACE (1 << 7)
68
64/** This matches the EDID DTD structure, more or less */ 69/** This matches the EDID DTD structure, more or less */
65struct intel_sdvo_dtd { 70struct intel_sdvo_dtd {
66 struct { 71 struct {
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 3346612d2953..a233a51fd7e6 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -674,6 +674,54 @@ static const struct tv_mode tv_modes[] = {
674 .filter_table = filter_table, 674 .filter_table = filter_table,
675 }, 675 },
676 { 676 {
677 .name = "480p",
678 .clock = 107520,
679 .refresh = 59940,
680 .oversample = TV_OVERSAMPLE_4X,
681 .component_only = 1,
682
683 .hsync_end = 64, .hblank_end = 122,
684 .hblank_start = 842, .htotal = 857,
685
686 .progressive = true, .trilevel_sync = false,
687
688 .vsync_start_f1 = 12, .vsync_start_f2 = 12,
689 .vsync_len = 12,
690
691 .veq_ena = false,
692
693 .vi_end_f1 = 44, .vi_end_f2 = 44,
694 .nbr_end = 479,
695
696 .burst_ena = false,
697
698 .filter_table = filter_table,
699 },
700 {
701 .name = "576p",
702 .clock = 107520,
703 .refresh = 50000,
704 .oversample = TV_OVERSAMPLE_4X,
705 .component_only = 1,
706
707 .hsync_end = 64, .hblank_end = 139,
708 .hblank_start = 859, .htotal = 863,
709
710 .progressive = true, .trilevel_sync = false,
711
712 .vsync_start_f1 = 10, .vsync_start_f2 = 10,
713 .vsync_len = 10,
714
715 .veq_ena = false,
716
717 .vi_end_f1 = 48, .vi_end_f2 = 48,
718 .nbr_end = 575,
719
720 .burst_ena = false,
721
722 .filter_table = filter_table,
723 },
724 {
677 .name = "720p@60Hz", 725 .name = "720p@60Hz",
678 .clock = 148800, 726 .clock = 148800,
679 .refresh = 60000, 727 .refresh = 60000,
@@ -1194,6 +1242,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
1194 1242
1195 I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); 1243 I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
1196 I915_WRITE(TV_CTL, save_tv_ctl); 1244 I915_WRITE(TV_CTL, save_tv_ctl);
1245 POSTING_READ(TV_CTL);
1246
1247 /* For unknown reasons the hw barfs if we don't do this vblank wait. */
1248 intel_wait_for_vblank(intel_tv->base.base.dev,
1249 to_intel_crtc(intel_tv->base.base.crtc)->pipe);
1197 1250
1198 /* Restore interrupt config */ 1251 /* Restore interrupt config */
1199 if (connector->polled & DRM_CONNECTOR_POLL_HPD) { 1252 if (connector->polled & DRM_CONNECTOR_POLL_HPD) {