aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c8
-rw-r--r--drivers/gpu/drm/drm_fops.c7
-rw-r--r--drivers/gpu/drm/drm_gem.c9
-rw-r--r--drivers/gpu/drm/drm_prime.c304
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c14
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c21
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c13
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h23
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c37
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c9
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c23
-rw-r--r--drivers/gpu/drm/i915/intel_display.c37
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c3
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c2
-rw-r--r--drivers/gpu/drm/radeon/atom.c15
-rw-r--r--drivers/gpu/drm/radeon/atom.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c2
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h1
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c14
31 files changed, 525 insertions, 61 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index cc1148837e24..e354bc0b052a 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -9,6 +9,7 @@ menuconfig DRM
9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU 9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
10 select I2C 10 select I2C
11 select I2C_ALGOBIT 11 select I2C_ALGOBIT
12 select DMA_SHARED_BUFFER
12 help 13 help
13 Kernel-level support for the Direct Rendering Infrastructure (DRI) 14 Kernel-level support for the Direct Rendering Infrastructure (DRI)
14 introduced in XFree86 4.0. If you say Y here, you need to select 15 introduced in XFree86 4.0. If you say Y here, you need to select
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index a858532806ae..c20da5bda355 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -12,7 +12,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
12 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ 12 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
13 drm_crtc.o drm_modes.o drm_edid.o \ 13 drm_crtc.o drm_modes.o drm_edid.o \
14 drm_info.o drm_debugfs.o drm_encoder_slave.o \ 14 drm_info.o drm_debugfs.o drm_encoder_slave.o \
15 drm_trace_points.o drm_global.o 15 drm_trace_points.o drm_global.o drm_prime.o
16 16
17drm-$(CONFIG_COMPAT) += drm_ioc32.o 17drm-$(CONFIG_COMPAT) += drm_ioc32.o
18 18
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 0b65fbc8a630..6116e3b75393 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -136,6 +136,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
136 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), 136 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
137 137
138 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 138 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
139
140 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED),
141 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED),
142
139 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 143 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
140 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 144 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
141 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 145 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 7740dd26f007..a0d6e894d97c 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -559,9 +559,13 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
559 return -EINVAL; 559 return -EINVAL;
560 560
561 /* Need to resize the fb object !!! */ 561 /* Need to resize the fb object !!! */
562 if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) { 562 if (var->bits_per_pixel > fb->bits_per_pixel ||
563 var->xres > fb->width || var->yres > fb->height ||
564 var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
563 DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb " 565 DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
564 "object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel, 566 "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
567 var->xres, var->yres, var->bits_per_pixel,
568 var->xres_virtual, var->yres_virtual,
565 fb->width, fb->height, fb->bits_per_pixel); 569 fb->width, fb->height, fb->bits_per_pixel);
566 return -EINVAL; 570 return -EINVAL;
567 } 571 }
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 7348a3dab250..cdfbf27b2b3c 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -271,6 +271,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
271 if (dev->driver->driver_features & DRIVER_GEM) 271 if (dev->driver->driver_features & DRIVER_GEM)
272 drm_gem_open(dev, priv); 272 drm_gem_open(dev, priv);
273 273
274 if (drm_core_check_feature(dev, DRIVER_PRIME))
275 drm_prime_init_file_private(&priv->prime);
276
274 if (dev->driver->open) { 277 if (dev->driver->open) {
275 ret = dev->driver->open(dev, priv); 278 ret = dev->driver->open(dev, priv);
276 if (ret < 0) 279 if (ret < 0)
@@ -571,6 +574,10 @@ int drm_release(struct inode *inode, struct file *filp)
571 574
572 if (dev->driver->postclose) 575 if (dev->driver->postclose)
573 dev->driver->postclose(dev, file_priv); 576 dev->driver->postclose(dev, file_priv);
577
578 if (drm_core_check_feature(dev, DRIVER_PRIME))
579 drm_prime_destroy_file_private(&file_priv->prime);
580
574 kfree(file_priv); 581 kfree(file_priv);
575 582
576 /* ======================================================== 583 /* ========================================================
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 0ef358e53245..83114b5e3cee 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -35,6 +35,7 @@
35#include <linux/mman.h> 35#include <linux/mman.h>
36#include <linux/pagemap.h> 36#include <linux/pagemap.h>
37#include <linux/shmem_fs.h> 37#include <linux/shmem_fs.h>
38#include <linux/dma-buf.h>
38#include "drmP.h" 39#include "drmP.h"
39 40
40/** @file drm_gem.c 41/** @file drm_gem.c
@@ -232,6 +233,10 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
232 idr_remove(&filp->object_idr, handle); 233 idr_remove(&filp->object_idr, handle);
233 spin_unlock(&filp->table_lock); 234 spin_unlock(&filp->table_lock);
234 235
236 if (obj->import_attach)
237 drm_prime_remove_imported_buf_handle(&filp->prime,
238 obj->import_attach->dmabuf);
239
235 if (dev->driver->gem_close_object) 240 if (dev->driver->gem_close_object)
236 dev->driver->gem_close_object(obj, filp); 241 dev->driver->gem_close_object(obj, filp);
237 drm_gem_object_handle_unreference_unlocked(obj); 242 drm_gem_object_handle_unreference_unlocked(obj);
@@ -527,6 +532,10 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
527 struct drm_gem_object *obj = ptr; 532 struct drm_gem_object *obj = ptr;
528 struct drm_device *dev = obj->dev; 533 struct drm_device *dev = obj->dev;
529 534
535 if (obj->import_attach)
536 drm_prime_remove_imported_buf_handle(&file_priv->prime,
537 obj->import_attach->dmabuf);
538
530 if (dev->driver->gem_close_object) 539 if (dev->driver->gem_close_object)
531 dev->driver->gem_close_object(obj, file_priv); 540 dev->driver->gem_close_object(obj, file_priv);
532 541
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
new file mode 100644
index 000000000000..1bdf2b54eaf6
--- /dev/null
+++ b/drivers/gpu/drm/drm_prime.c
@@ -0,0 +1,304 @@
1/*
2 * Copyright © 2012 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 * Rob Clark <rob.clark@linaro.org>
26 *
27 */
28
29#include <linux/export.h>
30#include <linux/dma-buf.h>
31#include "drmP.h"
32
33/*
34 * DMA-BUF/GEM Object references and lifetime overview:
35 *
36 * On the export the dma_buf holds a reference to the exporting GEM
37 * object. It takes this reference in handle_to_fd_ioctl, when it
38 * first calls .prime_export and stores the exporting GEM object in
39 * the dma_buf priv. This reference is released when the dma_buf
40 * object goes away in the driver .release function.
41 *
42 * On the import the importing GEM object holds a reference to the
43 * dma_buf (which in turn holds a ref to the exporting GEM object).
44 * It takes that reference in the fd_to_handle ioctl.
45 * It calls dma_buf_get, creates an attachment to it and stores the
46 * attachment in the GEM object. When this attachment is destroyed
47 * when the imported object is destroyed, we remove the attachment
48 * and drop the reference to the dma_buf.
49 *
50 * Thus the chain of references always flows in one direction
51 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
52 *
53 * Self-importing: if userspace is using PRIME as a replacement for flink
54 * then it will get a fd->handle request for a GEM object that it created.
55 * Drivers should detect this situation and return back the gem object
56 * from the dma-buf private.
57 */
58
59struct drm_prime_member {
60 struct list_head entry;
61 struct dma_buf *dma_buf;
62 uint32_t handle;
63};
64
65int drm_gem_prime_handle_to_fd(struct drm_device *dev,
66 struct drm_file *file_priv, uint32_t handle, uint32_t flags,
67 int *prime_fd)
68{
69 struct drm_gem_object *obj;
70 void *buf;
71
72 obj = drm_gem_object_lookup(dev, file_priv, handle);
73 if (!obj)
74 return -ENOENT;
75
76 mutex_lock(&file_priv->prime.lock);
77 /* re-export the original imported object */
78 if (obj->import_attach) {
79 get_dma_buf(obj->import_attach->dmabuf);
80 *prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags);
81 drm_gem_object_unreference_unlocked(obj);
82 mutex_unlock(&file_priv->prime.lock);
83 return 0;
84 }
85
86 if (obj->export_dma_buf) {
87 get_dma_buf(obj->export_dma_buf);
88 *prime_fd = dma_buf_fd(obj->export_dma_buf, flags);
89 drm_gem_object_unreference_unlocked(obj);
90 } else {
91 buf = dev->driver->gem_prime_export(dev, obj, flags);
92 if (IS_ERR(buf)) {
93 /* normally the created dma-buf takes ownership of the ref,
94 * but if that fails then drop the ref
95 */
96 drm_gem_object_unreference_unlocked(obj);
97 mutex_unlock(&file_priv->prime.lock);
98 return PTR_ERR(buf);
99 }
100 obj->export_dma_buf = buf;
101 *prime_fd = dma_buf_fd(buf, flags);
102 }
103 mutex_unlock(&file_priv->prime.lock);
104 return 0;
105}
106EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
107
108int drm_gem_prime_fd_to_handle(struct drm_device *dev,
109 struct drm_file *file_priv, int prime_fd, uint32_t *handle)
110{
111 struct dma_buf *dma_buf;
112 struct drm_gem_object *obj;
113 int ret;
114
115 dma_buf = dma_buf_get(prime_fd);
116 if (IS_ERR(dma_buf))
117 return PTR_ERR(dma_buf);
118
119 mutex_lock(&file_priv->prime.lock);
120
121 ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime,
122 dma_buf, handle);
123 if (!ret) {
124 ret = 0;
125 goto out_put;
126 }
127
128 /* never seen this one, need to import */
129 obj = dev->driver->gem_prime_import(dev, dma_buf);
130 if (IS_ERR(obj)) {
131 ret = PTR_ERR(obj);
132 goto out_put;
133 }
134
135 ret = drm_gem_handle_create(file_priv, obj, handle);
136 drm_gem_object_unreference_unlocked(obj);
137 if (ret)
138 goto out_put;
139
140 ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
141 dma_buf, *handle);
142 if (ret)
143 goto fail;
144
145 mutex_unlock(&file_priv->prime.lock);
146 return 0;
147
148fail:
149 /* hmm, if driver attached, we are relying on the free-object path
150 * to detach.. which seems ok..
151 */
152 drm_gem_object_handle_unreference_unlocked(obj);
153out_put:
154 dma_buf_put(dma_buf);
155 mutex_unlock(&file_priv->prime.lock);
156 return ret;
157}
158EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
159
160int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
161 struct drm_file *file_priv)
162{
163 struct drm_prime_handle *args = data;
164 uint32_t flags;
165
166 if (!drm_core_check_feature(dev, DRIVER_PRIME))
167 return -EINVAL;
168
169 if (!dev->driver->prime_handle_to_fd)
170 return -ENOSYS;
171
172 /* check flags are valid */
173 if (args->flags & ~DRM_CLOEXEC)
174 return -EINVAL;
175
176 /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
177 flags = args->flags & DRM_CLOEXEC;
178
179 return dev->driver->prime_handle_to_fd(dev, file_priv,
180 args->handle, flags, &args->fd);
181}
182
183int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
184 struct drm_file *file_priv)
185{
186 struct drm_prime_handle *args = data;
187
188 if (!drm_core_check_feature(dev, DRIVER_PRIME))
189 return -EINVAL;
190
191 if (!dev->driver->prime_fd_to_handle)
192 return -ENOSYS;
193
194 return dev->driver->prime_fd_to_handle(dev, file_priv,
195 args->fd, &args->handle);
196}
197
198/*
199 * drm_prime_pages_to_sg
200 *
201 * this helper creates an sg table object from a set of pages
202 * the driver is responsible for mapping the pages into the
203 * importers address space
204 */
205struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
206{
207 struct sg_table *sg = NULL;
208 struct scatterlist *iter;
209 int i;
210 int ret;
211
212 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
213 if (!sg)
214 goto out;
215
216 ret = sg_alloc_table(sg, nr_pages, GFP_KERNEL);
217 if (ret)
218 goto out;
219
220 for_each_sg(sg->sgl, iter, nr_pages, i)
221 sg_set_page(iter, pages[i], PAGE_SIZE, 0);
222
223 return sg;
224out:
225 kfree(sg);
226 return NULL;
227}
228EXPORT_SYMBOL(drm_prime_pages_to_sg);
229
230/* helper function to cleanup a GEM/prime object */
231void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
232{
233 struct dma_buf_attachment *attach;
234 struct dma_buf *dma_buf;
235 attach = obj->import_attach;
236 if (sg)
237 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
238 dma_buf = attach->dmabuf;
239 dma_buf_detach(attach->dmabuf, attach);
240 /* remove the reference */
241 dma_buf_put(dma_buf);
242}
243EXPORT_SYMBOL(drm_prime_gem_destroy);
244
245void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
246{
247 INIT_LIST_HEAD(&prime_fpriv->head);
248 mutex_init(&prime_fpriv->lock);
249}
250EXPORT_SYMBOL(drm_prime_init_file_private);
251
252void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
253{
254 struct drm_prime_member *member, *safe;
255 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
256 list_del(&member->entry);
257 kfree(member);
258 }
259}
260EXPORT_SYMBOL(drm_prime_destroy_file_private);
261
262int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
263{
264 struct drm_prime_member *member;
265
266 member = kmalloc(sizeof(*member), GFP_KERNEL);
267 if (!member)
268 return -ENOMEM;
269
270 member->dma_buf = dma_buf;
271 member->handle = handle;
272 list_add(&member->entry, &prime_fpriv->head);
273 return 0;
274}
275EXPORT_SYMBOL(drm_prime_add_imported_buf_handle);
276
277int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
278{
279 struct drm_prime_member *member;
280
281 list_for_each_entry(member, &prime_fpriv->head, entry) {
282 if (member->dma_buf == dma_buf) {
283 *handle = member->handle;
284 return 0;
285 }
286 }
287 return -ENOENT;
288}
289EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle);
290
291void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
292{
293 struct drm_prime_member *member, *safe;
294
295 mutex_lock(&prime_fpriv->lock);
296 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
297 if (member->dma_buf == dma_buf) {
298 list_del(&member->entry);
299 kfree(member);
300 }
301 }
302 mutex_unlock(&prime_fpriv->lock);
303}
304EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index fdb7ccefffbd..b505b70dba05 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1502,14 +1502,6 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
1502 return 0; 1502 return 0;
1503} 1503}
1504 1504
1505static int
1506i915_debugfs_common_open(struct inode *inode,
1507 struct file *filp)
1508{
1509 filp->private_data = inode->i_private;
1510 return 0;
1511}
1512
1513static ssize_t 1505static ssize_t
1514i915_wedged_read(struct file *filp, 1506i915_wedged_read(struct file *filp,
1515 char __user *ubuf, 1507 char __user *ubuf,
@@ -1560,7 +1552,7 @@ i915_wedged_write(struct file *filp,
1560 1552
1561static const struct file_operations i915_wedged_fops = { 1553static const struct file_operations i915_wedged_fops = {
1562 .owner = THIS_MODULE, 1554 .owner = THIS_MODULE,
1563 .open = i915_debugfs_common_open, 1555 .open = simple_open,
1564 .read = i915_wedged_read, 1556 .read = i915_wedged_read,
1565 .write = i915_wedged_write, 1557 .write = i915_wedged_write,
1566 .llseek = default_llseek, 1558 .llseek = default_llseek,
@@ -1622,7 +1614,7 @@ i915_max_freq_write(struct file *filp,
1622 1614
1623static const struct file_operations i915_max_freq_fops = { 1615static const struct file_operations i915_max_freq_fops = {
1624 .owner = THIS_MODULE, 1616 .owner = THIS_MODULE,
1625 .open = i915_debugfs_common_open, 1617 .open = simple_open,
1626 .read = i915_max_freq_read, 1618 .read = i915_max_freq_read,
1627 .write = i915_max_freq_write, 1619 .write = i915_max_freq_write,
1628 .llseek = default_llseek, 1620 .llseek = default_llseek,
@@ -1693,7 +1685,7 @@ i915_cache_sharing_write(struct file *filp,
1693 1685
1694static const struct file_operations i915_cache_sharing_fops = { 1686static const struct file_operations i915_cache_sharing_fops = {
1695 .owner = THIS_MODULE, 1687 .owner = THIS_MODULE,
1696 .open = i915_debugfs_common_open, 1688 .open = simple_open,
1697 .read = i915_cache_sharing_read, 1689 .read = i915_cache_sharing_read,
1698 .write = i915_cache_sharing_write, 1690 .write = i915_cache_sharing_write,
1699 .llseek = default_llseek, 1691 .llseek = default_llseek,
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 9341eb8ce93b..785f67f963ef 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1183,6 +1183,21 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1183 return can_switch; 1183 return can_switch;
1184} 1184}
1185 1185
1186static bool
1187intel_enable_ppgtt(struct drm_device *dev)
1188{
1189 if (i915_enable_ppgtt >= 0)
1190 return i915_enable_ppgtt;
1191
1192#ifdef CONFIG_INTEL_IOMMU
1193 /* Disable ppgtt on SNB if VT-d is on. */
1194 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
1195 return false;
1196#endif
1197
1198 return true;
1199}
1200
1186static int i915_load_gem_init(struct drm_device *dev) 1201static int i915_load_gem_init(struct drm_device *dev)
1187{ 1202{
1188 struct drm_i915_private *dev_priv = dev->dev_private; 1203 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1197,7 +1212,7 @@ static int i915_load_gem_init(struct drm_device *dev)
1197 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); 1212 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
1198 1213
1199 mutex_lock(&dev->struct_mutex); 1214 mutex_lock(&dev->struct_mutex);
1200 if (i915_enable_ppgtt && HAS_ALIASING_PPGTT(dev)) { 1215 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
1201 /* PPGTT pdes are stolen from global gtt ptes, so shrink the 1216 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
1202 * aperture accordingly when using aliasing ppgtt. */ 1217 * aperture accordingly when using aliasing ppgtt. */
1203 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; 1218 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
@@ -1207,8 +1222,10 @@ static int i915_load_gem_init(struct drm_device *dev)
1207 i915_gem_do_init(dev, 0, mappable_size, gtt_size); 1222 i915_gem_do_init(dev, 0, mappable_size, gtt_size);
1208 1223
1209 ret = i915_gem_init_aliasing_ppgtt(dev); 1224 ret = i915_gem_init_aliasing_ppgtt(dev);
1210 if (ret) 1225 if (ret) {
1226 mutex_unlock(&dev->struct_mutex);
1211 return ret; 1227 return ret;
1228 }
1212 } else { 1229 } else {
1213 /* Let GEM Manage all of the aperture. 1230 /* Let GEM Manage all of the aperture.
1214 * 1231 *
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 1a7559b59997..dfa55e7478fb 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -66,7 +66,11 @@ MODULE_PARM_DESC(semaphores,
66int i915_enable_rc6 __read_mostly = -1; 66int i915_enable_rc6 __read_mostly = -1;
67module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); 67module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
68MODULE_PARM_DESC(i915_enable_rc6, 68MODULE_PARM_DESC(i915_enable_rc6,
69 "Enable power-saving render C-state 6 (default: -1 (use per-chip default)"); 69 "Enable power-saving render C-state 6. "
70 "Different stages can be selected via bitmask values "
71 "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
72 "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
73 "default: -1 (use per-chip default)");
70 74
71int i915_enable_fbc __read_mostly = -1; 75int i915_enable_fbc __read_mostly = -1;
72module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); 76module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
@@ -103,8 +107,8 @@ MODULE_PARM_DESC(enable_hangcheck,
103 "WARNING: Disabling this can cause system wide hangs. " 107 "WARNING: Disabling this can cause system wide hangs. "
104 "(default: true)"); 108 "(default: true)");
105 109
106bool i915_enable_ppgtt __read_mostly = 1; 110int i915_enable_ppgtt __read_mostly = -1;
107module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, bool, 0600); 111module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
108MODULE_PARM_DESC(i915_enable_ppgtt, 112MODULE_PARM_DESC(i915_enable_ppgtt,
109 "Enable PPGTT (default: true)"); 113 "Enable PPGTT (default: true)");
110 114
@@ -292,6 +296,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */
292 INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ 296 INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
293 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ 297 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
294 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ 298 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
299 INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
295 {0, 0, 0} 300 {0, 0, 0}
296}; 301};
297 302
@@ -533,7 +538,9 @@ static int i915_drm_thaw(struct drm_device *dev)
533 drm_irq_install(dev); 538 drm_irq_install(dev);
534 539
535 /* Resume the modeset for every activated CRTC */ 540 /* Resume the modeset for every activated CRTC */
541 mutex_lock(&dev->mode_config.mutex);
536 drm_helper_resume_force_mode(dev); 542 drm_helper_resume_force_mode(dev);
543 mutex_unlock(&dev->mode_config.mutex);
537 544
538 if (IS_IRONLAKE_M(dev)) 545 if (IS_IRONLAKE_M(dev))
539 ironlake_enable_rc6(dev); 546 ironlake_enable_rc6(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c0f19f572004..5fabc6c31fec 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1053,6 +1053,27 @@ struct drm_i915_file_private {
1053 1053
1054#include "i915_trace.h" 1054#include "i915_trace.h"
1055 1055
1056/**
1057 * RC6 is a special power stage which allows the GPU to enter an very
1058 * low-voltage mode when idle, using down to 0V while at this stage. This
1059 * stage is entered automatically when the GPU is idle when RC6 support is
1060 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
1061 *
1062 * There are different RC6 modes available in Intel GPU, which differentiate
1063 * among each other with the latency required to enter and leave RC6 and
1064 * voltage consumed by the GPU in different states.
1065 *
1066 * The combination of the following flags define which states GPU is allowed
1067 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
1068 * RC6pp is deepest RC6. Their support by hardware varies according to the
1069 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
1070 * which brings the most power savings; deeper states save more power, but
1071 * require higher latency to switch to and wake up.
1072 */
1073#define INTEL_RC6_ENABLE (1<<0)
1074#define INTEL_RC6p_ENABLE (1<<1)
1075#define INTEL_RC6pp_ENABLE (1<<2)
1076
1056extern struct drm_ioctl_desc i915_ioctls[]; 1077extern struct drm_ioctl_desc i915_ioctls[];
1057extern int i915_max_ioctl; 1078extern int i915_max_ioctl;
1058extern unsigned int i915_fbpercrtc __always_unused; 1079extern unsigned int i915_fbpercrtc __always_unused;
@@ -1065,7 +1086,7 @@ extern int i915_vbt_sdvo_panel_type __read_mostly;
1065extern int i915_enable_rc6 __read_mostly; 1086extern int i915_enable_rc6 __read_mostly;
1066extern int i915_enable_fbc __read_mostly; 1087extern int i915_enable_fbc __read_mostly;
1067extern bool i915_enable_hangcheck __read_mostly; 1088extern bool i915_enable_hangcheck __read_mostly;
1068extern bool i915_enable_ppgtt __read_mostly; 1089extern int i915_enable_ppgtt __read_mostly;
1069 1090
1070extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1091extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1071extern int i915_resume(struct drm_device *dev); 1092extern int i915_resume(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1f441f5c2405..4c65c639f772 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1472,16 +1472,19 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1472 list_move_tail(&obj->ring_list, &ring->active_list); 1472 list_move_tail(&obj->ring_list, &ring->active_list);
1473 1473
1474 obj->last_rendering_seqno = seqno; 1474 obj->last_rendering_seqno = seqno;
1475 if (obj->fenced_gpu_access) {
1476 struct drm_i915_fence_reg *reg;
1477
1478 BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
1479 1475
1476 if (obj->fenced_gpu_access) {
1480 obj->last_fenced_seqno = seqno; 1477 obj->last_fenced_seqno = seqno;
1481 obj->last_fenced_ring = ring; 1478 obj->last_fenced_ring = ring;
1482 1479
1483 reg = &dev_priv->fence_regs[obj->fence_reg]; 1480 /* Bump MRU to take account of the delayed flush */
1484 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list); 1481 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1482 struct drm_i915_fence_reg *reg;
1483
1484 reg = &dev_priv->fence_regs[obj->fence_reg];
1485 list_move_tail(&reg->lru_list,
1486 &dev_priv->mm.fence_list);
1487 }
1485 } 1488 }
1486} 1489}
1487 1490
@@ -3754,12 +3757,32 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
3754 drm_i915_private_t *dev_priv = dev->dev_private; 3757 drm_i915_private_t *dev_priv = dev->dev_private;
3755 uint32_t pd_offset; 3758 uint32_t pd_offset;
3756 struct intel_ring_buffer *ring; 3759 struct intel_ring_buffer *ring;
3760 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
3761 uint32_t __iomem *pd_addr;
3762 uint32_t pd_entry;
3757 int i; 3763 int i;
3758 3764
3759 if (!dev_priv->mm.aliasing_ppgtt) 3765 if (!dev_priv->mm.aliasing_ppgtt)
3760 return; 3766 return;
3761 3767
3762 pd_offset = dev_priv->mm.aliasing_ppgtt->pd_offset; 3768
3769 pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
3770 for (i = 0; i < ppgtt->num_pd_entries; i++) {
3771 dma_addr_t pt_addr;
3772
3773 if (dev_priv->mm.gtt->needs_dmar)
3774 pt_addr = ppgtt->pt_dma_addr[i];
3775 else
3776 pt_addr = page_to_phys(ppgtt->pt_pages[i]);
3777
3778 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
3779 pd_entry |= GEN6_PDE_VALID;
3780
3781 writel(pd_entry, pd_addr + i);
3782 }
3783 readl(pd_addr);
3784
3785 pd_offset = ppgtt->pd_offset;
3763 pd_offset /= 64; /* in cachelines, */ 3786 pd_offset /= 64; /* in cachelines, */
3764 pd_offset <<= 16; 3787 pd_offset <<= 16;
3765 3788
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 81687af00893..f51a696486cb 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -498,8 +498,8 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
498 if (ret) 498 if (ret)
499 goto err_unpin; 499 goto err_unpin;
500 } 500 }
501 obj->pending_fenced_gpu_access = true;
501 } 502 }
502 obj->pending_fenced_gpu_access = need_fence;
503 } 503 }
504 504
505 entry->offset = obj->gtt_offset; 505 entry->offset = obj->gtt_offset;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2eacd78bb93b..a135c61f4119 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -65,9 +65,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
65{ 65{
66 struct drm_i915_private *dev_priv = dev->dev_private; 66 struct drm_i915_private *dev_priv = dev->dev_private;
67 struct i915_hw_ppgtt *ppgtt; 67 struct i915_hw_ppgtt *ppgtt;
68 uint32_t pd_entry;
69 unsigned first_pd_entry_in_global_pt; 68 unsigned first_pd_entry_in_global_pt;
70 uint32_t __iomem *pd_addr;
71 int i; 69 int i;
72 int ret = -ENOMEM; 70 int ret = -ENOMEM;
73 71
@@ -100,7 +98,6 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
100 goto err_pt_alloc; 98 goto err_pt_alloc;
101 } 99 }
102 100
103 pd_addr = dev_priv->mm.gtt->gtt + first_pd_entry_in_global_pt;
104 for (i = 0; i < ppgtt->num_pd_entries; i++) { 101 for (i = 0; i < ppgtt->num_pd_entries; i++) {
105 dma_addr_t pt_addr; 102 dma_addr_t pt_addr;
106 if (dev_priv->mm.gtt->needs_dmar) { 103 if (dev_priv->mm.gtt->needs_dmar) {
@@ -117,13 +114,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
117 ppgtt->pt_dma_addr[i] = pt_addr; 114 ppgtt->pt_dma_addr[i] = pt_addr;
118 } else 115 } else
119 pt_addr = page_to_phys(ppgtt->pt_pages[i]); 116 pt_addr = page_to_phys(ppgtt->pt_pages[i]);
120
121 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
122 pd_entry |= GEN6_PDE_VALID;
123
124 writel(pd_entry, pd_addr + i);
125 } 117 }
126 readl(pd_addr);
127 118
128 ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma; 119 ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
129 120
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3886cf051bac..2abf4eb94039 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2385,6 +2385,7 @@
2385#define PIPECONF_DISABLE 0 2385#define PIPECONF_DISABLE 0
2386#define PIPECONF_DOUBLE_WIDE (1<<30) 2386#define PIPECONF_DOUBLE_WIDE (1<<30)
2387#define I965_PIPECONF_ACTIVE (1<<30) 2387#define I965_PIPECONF_ACTIVE (1<<30)
2388#define PIPECONF_FRAME_START_DELAY_MASK (3<<27)
2388#define PIPECONF_SINGLE_WIDE 0 2389#define PIPECONF_SINGLE_WIDE 0
2389#define PIPECONF_PIPE_UNLOCKED 0 2390#define PIPECONF_PIPE_UNLOCKED 0
2390#define PIPECONF_PIPE_LOCKED (1<<25) 2391#define PIPECONF_PIPE_LOCKED (1<<25)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 8168d8f8a634..b48fc2a8410c 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -24,6 +24,7 @@
24 * Eric Anholt <eric@anholt.net> 24 * Eric Anholt <eric@anholt.net>
25 * 25 *
26 */ 26 */
27#include <linux/dmi.h>
27#include <drm/drm_dp_helper.h> 28#include <drm/drm_dp_helper.h>
28#include "drmP.h" 29#include "drmP.h"
29#include "drm.h" 30#include "drm.h"
@@ -621,6 +622,26 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
621 dev_priv->edp.bpp = 18; 622 dev_priv->edp.bpp = 18;
622} 623}
623 624
625static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
626{
627 DRM_DEBUG_KMS("Falling back to manually reading VBT from "
628 "VBIOS ROM for %s\n",
629 id->ident);
630 return 1;
631}
632
633static const struct dmi_system_id intel_no_opregion_vbt[] = {
634 {
635 .callback = intel_no_opregion_vbt_callback,
636 .ident = "ThinkCentre A57",
637 .matches = {
638 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
639 DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"),
640 },
641 },
642 { }
643};
644
624/** 645/**
625 * intel_parse_bios - find VBT and initialize settings from the BIOS 646 * intel_parse_bios - find VBT and initialize settings from the BIOS
626 * @dev: DRM device 647 * @dev: DRM device
@@ -641,7 +662,7 @@ intel_parse_bios(struct drm_device *dev)
641 init_vbt_defaults(dev_priv); 662 init_vbt_defaults(dev_priv);
642 663
643 /* XXX Should this validation be moved to intel_opregion.c? */ 664 /* XXX Should this validation be moved to intel_opregion.c? */
644 if (dev_priv->opregion.vbt) { 665 if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) {
645 struct vbt_header *vbt = dev_priv->opregion.vbt; 666 struct vbt_header *vbt = dev_priv->opregion.vbt;
646 if (memcmp(vbt->signature, "$VBT", 4) == 0) { 667 if (memcmp(vbt->signature, "$VBT", 4) == 0) {
647 DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n", 668 DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d514719f65e2..91b35fd1db8c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5539,7 +5539,8 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
5539 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 5539 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5540 DRM_DEBUG_KMS("Using SSC on panel\n"); 5540 DRM_DEBUG_KMS("Using SSC on panel\n");
5541 temp |= DREF_SSC1_ENABLE; 5541 temp |= DREF_SSC1_ENABLE;
5542 } 5542 } else
5543 temp &= ~DREF_SSC1_ENABLE;
5543 5544
5544 /* Get SSC going before enabling the outputs */ 5545 /* Get SSC going before enabling the outputs */
5545 I915_WRITE(PCH_DREF_CONTROL, temp); 5546 I915_WRITE(PCH_DREF_CONTROL, temp);
@@ -7580,6 +7581,12 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
7580 struct drm_i915_private *dev_priv = dev->dev_private; 7581 struct drm_i915_private *dev_priv = dev->dev_private;
7581 u32 reg, val; 7582 u32 reg, val;
7582 7583
7584 /* Clear any frame start delays used for debugging left by the BIOS */
7585 for_each_pipe(pipe) {
7586 reg = PIPECONF(pipe);
7587 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
7588 }
7589
7583 if (HAS_PCH_SPLIT(dev)) 7590 if (HAS_PCH_SPLIT(dev))
7584 return; 7591 return;
7585 7592
@@ -8215,7 +8222,7 @@ void intel_init_emon(struct drm_device *dev)
8215 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); 8222 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
8216} 8223}
8217 8224
8218static bool intel_enable_rc6(struct drm_device *dev) 8225static int intel_enable_rc6(struct drm_device *dev)
8219{ 8226{
8220 /* 8227 /*
8221 * Respect the kernel parameter if it is set 8228 * Respect the kernel parameter if it is set
@@ -8233,11 +8240,11 @@ static bool intel_enable_rc6(struct drm_device *dev)
8233 * Disable rc6 on Sandybridge 8240 * Disable rc6 on Sandybridge
8234 */ 8241 */
8235 if (INTEL_INFO(dev)->gen == 6) { 8242 if (INTEL_INFO(dev)->gen == 6) {
8236 DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n"); 8243 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
8237 return 0; 8244 return INTEL_RC6_ENABLE;
8238 } 8245 }
8239 DRM_DEBUG_DRIVER("RC6 enabled\n"); 8246 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
8240 return 1; 8247 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
8241} 8248}
8242 8249
8243void gen6_enable_rps(struct drm_i915_private *dev_priv) 8250void gen6_enable_rps(struct drm_i915_private *dev_priv)
@@ -8247,6 +8254,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
8247 u32 pcu_mbox, rc6_mask = 0; 8254 u32 pcu_mbox, rc6_mask = 0;
8248 u32 gtfifodbg; 8255 u32 gtfifodbg;
8249 int cur_freq, min_freq, max_freq; 8256 int cur_freq, min_freq, max_freq;
8257 int rc6_mode;
8250 int i; 8258 int i;
8251 8259
8252 /* Here begins a magic sequence of register writes to enable 8260 /* Here begins a magic sequence of register writes to enable
@@ -8284,9 +8292,20 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
8284 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); 8292 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
8285 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 8293 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
8286 8294
8287 if (intel_enable_rc6(dev_priv->dev)) 8295 rc6_mode = intel_enable_rc6(dev_priv->dev);
8288 rc6_mask = GEN6_RC_CTL_RC6_ENABLE | 8296 if (rc6_mode & INTEL_RC6_ENABLE)
8289 ((IS_GEN7(dev_priv->dev)) ? GEN6_RC_CTL_RC6p_ENABLE : 0); 8297 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
8298
8299 if (rc6_mode & INTEL_RC6p_ENABLE)
8300 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
8301
8302 if (rc6_mode & INTEL_RC6pp_ENABLE)
8303 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
8304
8305 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
8306 (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
8307 (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
8308 (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
8290 8309
8291 I915_WRITE(GEN6_RC_CONTROL, 8310 I915_WRITE(GEN6_RC_CONTROL,
8292 rc6_mask | 8311 rc6_mask |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c5c0973af8a1..95db2e988227 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -755,6 +755,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
755 DMI_MATCH(DMI_BOARD_NAME, "hp st5747"), 755 DMI_MATCH(DMI_BOARD_NAME, "hp st5747"),
756 }, 756 },
757 }, 757 },
758 {
759 .callback = intel_no_lvds_dmi_callback,
760 .ident = "MSI Wind Box DC500",
761 .matches = {
762 DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
763 DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
764 },
765 },
758 766
759 { } /* terminating entry */ 767 { } /* terminating entry */
760}; 768};
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index fc66af6a9448..e25581a9f60f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -626,7 +626,7 @@ gen6_ring_get_seqno(struct intel_ring_buffer *ring)
626 /* Workaround to force correct ordering between irq and seqno writes on 626 /* Workaround to force correct ordering between irq and seqno writes on
627 * ivb (and maybe also on snb) by reading from a CS register (like 627 * ivb (and maybe also on snb) by reading from a CS register (like
628 * ACTHD) before reading the status page. */ 628 * ACTHD) before reading the status page. */
629 if (IS_GEN7(dev)) 629 if (IS_GEN6(dev) || IS_GEN7(dev))
630 intel_ring_get_active_head(ring); 630 intel_ring_get_active_head(ring);
631 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 631 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
632} 632}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 7aa0450399a1..a464771a7240 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -411,6 +411,9 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
411 411
412 old_obj = intel_plane->obj; 412 old_obj = intel_plane->obj;
413 413
414 src_w = src_w >> 16;
415 src_h = src_h >> 16;
416
414 /* Pipe must be running... */ 417 /* Pipe must be running... */
415 if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE)) 418 if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
416 return -EINVAL; 419 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index ca1639918f57..97a81260485a 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -13,6 +13,7 @@ config DRM_NOUVEAU
13 select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT 13 select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
14 select ACPI_WMI if ACPI 14 select ACPI_WMI if ACPI
15 select MXM_WMI if ACPI 15 select MXM_WMI if ACPI
16 select POWER_SUPPLY
16 help 17 help
17 Choose this option for open-source nVidia support. 18 Choose this option for open-source nVidia support.
18 19
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 637afe71de56..80963d05b54a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -177,14 +177,15 @@ bios_shadow_pci(struct nvbios *bios)
177 177
178 if (!pci_enable_rom(pdev)) { 178 if (!pci_enable_rom(pdev)) {
179 void __iomem *rom = pci_map_rom(pdev, &length); 179 void __iomem *rom = pci_map_rom(pdev, &length);
180 if (rom) { 180 if (rom && length) {
181 bios->data = kmalloc(length, GFP_KERNEL); 181 bios->data = kmalloc(length, GFP_KERNEL);
182 if (bios->data) { 182 if (bios->data) {
183 memcpy_fromio(bios->data, rom, length); 183 memcpy_fromio(bios->data, rom, length);
184 bios->length = length; 184 bios->length = length;
185 } 185 }
186 pci_unmap_rom(pdev, rom);
187 } 186 }
187 if (rom)
188 pci_unmap_rom(pdev, rom);
188 189
189 pci_disable_rom(pdev); 190 pci_disable_rom(pdev);
190 } 191 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 44e6416d4a33..846afb0bfef4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -436,11 +436,11 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
436 } 436 }
437 437
438 if (dev_priv->card_type < NV_C0) { 438 if (dev_priv->card_type < NV_C0) {
439 init->subchan[0].handle = NvSw; 439 init->subchan[0].handle = 0x00000000;
440 init->subchan[0].grclass = NV_SW; 440 init->subchan[0].grclass = 0x0000;
441 init->nr_subchan = 1; 441 init->subchan[1].handle = NvSw;
442 } else { 442 init->subchan[1].grclass = NV_SW;
443 init->nr_subchan = 0; 443 init->nr_subchan = 2;
444 } 444 }
445 445
446 /* Named memory object area */ 446 /* Named memory object area */
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index bcf0fd9e313e..23d4edf992b7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -48,8 +48,8 @@ void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
48 48
49/* Hardcoded object assignments to subchannels (subchannel id). */ 49/* Hardcoded object assignments to subchannels (subchannel id). */
50enum { 50enum {
51 NvSubSw = 0, 51 NvSubM2MF = 0,
52 NvSubM2MF = 1, 52 NvSubSw = 1,
53 NvSub2D = 2, 53 NvSub2D = 2,
54 NvSubCtxSurf2D = 2, 54 NvSubCtxSurf2D = 2,
55 NvSubGdiRect = 3, 55 NvSubGdiRect = 3,
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index a4886b36d0fa..c2a8511e855a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -642,7 +642,7 @@ nouveau_card_channel_init(struct drm_device *dev)
642 OUT_RING (chan, chan->vram_handle); 642 OUT_RING (chan, chan->vram_handle);
643 OUT_RING (chan, chan->gart_handle); 643 OUT_RING (chan, chan->gart_handle);
644 } else 644 } else
645 if (dev_priv->card_type <= NV_C0) { 645 if (dev_priv->card_type <= NV_D0) {
646 ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039); 646 ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
647 if (ret) 647 if (ret)
648 goto error; 648 goto error;
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index d1bd239cd9e9..5ce9bf51a8de 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1306,8 +1306,11 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
1306 1306
1307int atom_asic_init(struct atom_context *ctx) 1307int atom_asic_init(struct atom_context *ctx)
1308{ 1308{
1309 struct radeon_device *rdev = ctx->card->dev->dev_private;
1309 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR); 1310 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
1310 uint32_t ps[16]; 1311 uint32_t ps[16];
1312 int ret;
1313
1311 memset(ps, 0, 64); 1314 memset(ps, 0, 64);
1312 1315
1313 ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR)); 1316 ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
@@ -1317,7 +1320,17 @@ int atom_asic_init(struct atom_context *ctx)
1317 1320
1318 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) 1321 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
1319 return 1; 1322 return 1;
1320 return atom_execute_table(ctx, ATOM_CMD_INIT, ps); 1323 ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps);
1324 if (ret)
1325 return ret;
1326
1327 memset(ps, 0, 64);
1328
1329 if (rdev->family < CHIP_R600) {
1330 if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
1331 atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps);
1332 }
1333 return ret;
1321} 1334}
1322 1335
1323void atom_destroy(struct atom_context *ctx) 1336void atom_destroy(struct atom_context *ctx)
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index 93cfe2086ba0..25fea631dad2 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -44,6 +44,7 @@
44#define ATOM_CMD_SETSCLK 0x0A 44#define ATOM_CMD_SETSCLK 0x0A
45#define ATOM_CMD_SETMCLK 0x0B 45#define ATOM_CMD_SETMCLK 0x0B
46#define ATOM_CMD_SETPCLK 0x0C 46#define ATOM_CMD_SETPCLK 0x0C
47#define ATOM_CMD_SPDFANCNTL 0x39
47 48
48#define ATOM_DATA_FWI_PTR 0xC 49#define ATOM_DATA_FWI_PTR 0xC
49#define ATOM_DATA_IIO_PTR 0x32 50#define ATOM_DATA_IIO_PTR 0x32
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 6f70158d34e4..df6a4dbd93f8 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -241,7 +241,8 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
241 domain_start = bo->rdev->mc.vram_start; 241 domain_start = bo->rdev->mc.vram_start;
242 else 242 else
243 domain_start = bo->rdev->mc.gtt_start; 243 domain_start = bo->rdev->mc.gtt_start;
244 WARN_ON_ONCE((*gpu_addr - domain_start) > max_offset); 244 WARN_ON_ONCE(max_offset <
245 (radeon_bo_gpu_offset(bo) - domain_start));
245 } 246 }
246 247
247 return 0; 248 return 0;
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 5340c5f3987b..53673907a6a0 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -47,7 +47,7 @@ static struct vm_operations_struct udl_gem_vm_ops = {
47static const struct file_operations udl_driver_fops = { 47static const struct file_operations udl_driver_fops = {
48 .owner = THIS_MODULE, 48 .owner = THIS_MODULE,
49 .open = drm_open, 49 .open = drm_open,
50 .mmap = drm_gem_mmap, 50 .mmap = udl_drm_gem_mmap,
51 .poll = drm_poll, 51 .poll = drm_poll,
52 .read = drm_read, 52 .read = drm_read,
53 .unlocked_ioctl = drm_ioctl, 53 .unlocked_ioctl = drm_ioctl,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 1612954a5bc4..96820d03a303 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -121,6 +121,7 @@ struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
121 121
122int udl_gem_vmap(struct udl_gem_object *obj); 122int udl_gem_vmap(struct udl_gem_object *obj);
123void udl_gem_vunmap(struct udl_gem_object *obj); 123void udl_gem_vunmap(struct udl_gem_object *obj);
124int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
124int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 125int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
125 126
126int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, 127int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 852642dc1187..92f19ef329b0 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -71,6 +71,20 @@ int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
71 return drm_gem_handle_delete(file, handle); 71 return drm_gem_handle_delete(file, handle);
72} 72}
73 73
74int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
75{
76 int ret;
77
78 ret = drm_gem_mmap(filp, vma);
79 if (ret)
80 return ret;
81
82 vma->vm_flags &= ~VM_PFNMAP;
83 vma->vm_flags |= VM_MIXEDMAP;
84
85 return ret;
86}
87
74int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 88int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
75{ 89{
76 struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data); 90 struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);