diff options
author | John W. Linville <linville@tuxdriver.com> | 2012-04-12 13:49:28 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2012-04-12 13:49:28 -0400 |
commit | 8065248069097dddf9945acfb2081025e9618c16 (patch) | |
tree | eddf3fb0372ba0f65c01382d386942ea8d18932d /drivers/gpu/drm | |
parent | e66a8ddff72e85605f2212a0ebc666c7e9116641 (diff) | |
parent | b4838d12e1f3cb48c2489a0b08733b5dbf848297 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless
Diffstat (limited to 'drivers/gpu/drm')
46 files changed, 962 insertions, 198 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index cc1148837e24..e354bc0b052a 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -9,6 +9,7 @@ menuconfig DRM | |||
9 | depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU | 9 | depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU |
10 | select I2C | 10 | select I2C |
11 | select I2C_ALGOBIT | 11 | select I2C_ALGOBIT |
12 | select DMA_SHARED_BUFFER | ||
12 | help | 13 | help |
13 | Kernel-level support for the Direct Rendering Infrastructure (DRI) | 14 | Kernel-level support for the Direct Rendering Infrastructure (DRI) |
14 | introduced in XFree86 4.0. If you say Y here, you need to select | 15 | introduced in XFree86 4.0. If you say Y here, you need to select |
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index a858532806ae..c20da5bda355 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
@@ -12,7 +12,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ | |||
12 | drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ | 12 | drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ |
13 | drm_crtc.o drm_modes.o drm_edid.o \ | 13 | drm_crtc.o drm_modes.o drm_edid.o \ |
14 | drm_info.o drm_debugfs.o drm_encoder_slave.o \ | 14 | drm_info.o drm_debugfs.o drm_encoder_slave.o \ |
15 | drm_trace_points.o drm_global.o | 15 | drm_trace_points.o drm_global.o drm_prime.o |
16 | 16 | ||
17 | drm-$(CONFIG_COMPAT) += drm_ioc32.o | 17 | drm-$(CONFIG_COMPAT) += drm_ioc32.o |
18 | 18 | ||
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 0b65fbc8a630..6116e3b75393 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -136,6 +136,10 @@ static struct drm_ioctl_desc drm_ioctls[] = { | |||
136 | DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), | 136 | DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), |
137 | 137 | ||
138 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 138 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
139 | |||
140 | DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED), | ||
141 | DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED), | ||
142 | |||
139 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 143 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
140 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 144 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
141 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 145 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 7740dd26f007..a0d6e894d97c 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -559,9 +559,13 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, | |||
559 | return -EINVAL; | 559 | return -EINVAL; |
560 | 560 | ||
561 | /* Need to resize the fb object !!! */ | 561 | /* Need to resize the fb object !!! */ |
562 | if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) { | 562 | if (var->bits_per_pixel > fb->bits_per_pixel || |
563 | var->xres > fb->width || var->yres > fb->height || | ||
564 | var->xres_virtual > fb->width || var->yres_virtual > fb->height) { | ||
563 | DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb " | 565 | DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb " |
564 | "object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel, | 566 | "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n", |
567 | var->xres, var->yres, var->bits_per_pixel, | ||
568 | var->xres_virtual, var->yres_virtual, | ||
565 | fb->width, fb->height, fb->bits_per_pixel); | 569 | fb->width, fb->height, fb->bits_per_pixel); |
566 | return -EINVAL; | 570 | return -EINVAL; |
567 | } | 571 | } |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 7348a3dab250..cdfbf27b2b3c 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -271,6 +271,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | |||
271 | if (dev->driver->driver_features & DRIVER_GEM) | 271 | if (dev->driver->driver_features & DRIVER_GEM) |
272 | drm_gem_open(dev, priv); | 272 | drm_gem_open(dev, priv); |
273 | 273 | ||
274 | if (drm_core_check_feature(dev, DRIVER_PRIME)) | ||
275 | drm_prime_init_file_private(&priv->prime); | ||
276 | |||
274 | if (dev->driver->open) { | 277 | if (dev->driver->open) { |
275 | ret = dev->driver->open(dev, priv); | 278 | ret = dev->driver->open(dev, priv); |
276 | if (ret < 0) | 279 | if (ret < 0) |
@@ -571,6 +574,10 @@ int drm_release(struct inode *inode, struct file *filp) | |||
571 | 574 | ||
572 | if (dev->driver->postclose) | 575 | if (dev->driver->postclose) |
573 | dev->driver->postclose(dev, file_priv); | 576 | dev->driver->postclose(dev, file_priv); |
577 | |||
578 | if (drm_core_check_feature(dev, DRIVER_PRIME)) | ||
579 | drm_prime_destroy_file_private(&file_priv->prime); | ||
580 | |||
574 | kfree(file_priv); | 581 | kfree(file_priv); |
575 | 582 | ||
576 | /* ======================================================== | 583 | /* ======================================================== |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 0ef358e53245..83114b5e3cee 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/mman.h> | 35 | #include <linux/mman.h> |
36 | #include <linux/pagemap.h> | 36 | #include <linux/pagemap.h> |
37 | #include <linux/shmem_fs.h> | 37 | #include <linux/shmem_fs.h> |
38 | #include <linux/dma-buf.h> | ||
38 | #include "drmP.h" | 39 | #include "drmP.h" |
39 | 40 | ||
40 | /** @file drm_gem.c | 41 | /** @file drm_gem.c |
@@ -232,6 +233,10 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle) | |||
232 | idr_remove(&filp->object_idr, handle); | 233 | idr_remove(&filp->object_idr, handle); |
233 | spin_unlock(&filp->table_lock); | 234 | spin_unlock(&filp->table_lock); |
234 | 235 | ||
236 | if (obj->import_attach) | ||
237 | drm_prime_remove_imported_buf_handle(&filp->prime, | ||
238 | obj->import_attach->dmabuf); | ||
239 | |||
235 | if (dev->driver->gem_close_object) | 240 | if (dev->driver->gem_close_object) |
236 | dev->driver->gem_close_object(obj, filp); | 241 | dev->driver->gem_close_object(obj, filp); |
237 | drm_gem_object_handle_unreference_unlocked(obj); | 242 | drm_gem_object_handle_unreference_unlocked(obj); |
@@ -527,6 +532,10 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) | |||
527 | struct drm_gem_object *obj = ptr; | 532 | struct drm_gem_object *obj = ptr; |
528 | struct drm_device *dev = obj->dev; | 533 | struct drm_device *dev = obj->dev; |
529 | 534 | ||
535 | if (obj->import_attach) | ||
536 | drm_prime_remove_imported_buf_handle(&file_priv->prime, | ||
537 | obj->import_attach->dmabuf); | ||
538 | |||
530 | if (dev->driver->gem_close_object) | 539 | if (dev->driver->gem_close_object) |
531 | dev->driver->gem_close_object(obj, file_priv); | 540 | dev->driver->gem_close_object(obj, file_priv); |
532 | 541 | ||
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c new file mode 100644 index 000000000000..1bdf2b54eaf6 --- /dev/null +++ b/drivers/gpu/drm/drm_prime.c | |||
@@ -0,0 +1,304 @@ | |||
1 | /* | ||
2 | * Copyright © 2012 Red Hat | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Dave Airlie <airlied@redhat.com> | ||
25 | * Rob Clark <rob.clark@linaro.org> | ||
26 | * | ||
27 | */ | ||
28 | |||
29 | #include <linux/export.h> | ||
30 | #include <linux/dma-buf.h> | ||
31 | #include "drmP.h" | ||
32 | |||
33 | /* | ||
34 | * DMA-BUF/GEM Object references and lifetime overview: | ||
35 | * | ||
36 | * On the export the dma_buf holds a reference to the exporting GEM | ||
37 | * object. It takes this reference in handle_to_fd_ioctl, when it | ||
38 | * first calls .prime_export and stores the exporting GEM object in | ||
39 | * the dma_buf priv. This reference is released when the dma_buf | ||
40 | * object goes away in the driver .release function. | ||
41 | * | ||
42 | * On the import the importing GEM object holds a reference to the | ||
43 | * dma_buf (which in turn holds a ref to the exporting GEM object). | ||
44 | * It takes that reference in the fd_to_handle ioctl. | ||
45 | * It calls dma_buf_get, creates an attachment to it and stores the | ||
46 | * attachment in the GEM object. When this attachment is destroyed | ||
47 | * when the imported object is destroyed, we remove the attachment | ||
48 | * and drop the reference to the dma_buf. | ||
49 | * | ||
50 | * Thus the chain of references always flows in one direction | ||
51 | * (avoiding loops): importing_gem -> dmabuf -> exporting_gem | ||
52 | * | ||
53 | * Self-importing: if userspace is using PRIME as a replacement for flink | ||
54 | * then it will get a fd->handle request for a GEM object that it created. | ||
55 | * Drivers should detect this situation and return back the gem object | ||
56 | * from the dma-buf private. | ||
57 | */ | ||
58 | |||
59 | struct drm_prime_member { | ||
60 | struct list_head entry; | ||
61 | struct dma_buf *dma_buf; | ||
62 | uint32_t handle; | ||
63 | }; | ||
64 | |||
65 | int drm_gem_prime_handle_to_fd(struct drm_device *dev, | ||
66 | struct drm_file *file_priv, uint32_t handle, uint32_t flags, | ||
67 | int *prime_fd) | ||
68 | { | ||
69 | struct drm_gem_object *obj; | ||
70 | void *buf; | ||
71 | |||
72 | obj = drm_gem_object_lookup(dev, file_priv, handle); | ||
73 | if (!obj) | ||
74 | return -ENOENT; | ||
75 | |||
76 | mutex_lock(&file_priv->prime.lock); | ||
77 | /* re-export the original imported object */ | ||
78 | if (obj->import_attach) { | ||
79 | get_dma_buf(obj->import_attach->dmabuf); | ||
80 | *prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags); | ||
81 | drm_gem_object_unreference_unlocked(obj); | ||
82 | mutex_unlock(&file_priv->prime.lock); | ||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | if (obj->export_dma_buf) { | ||
87 | get_dma_buf(obj->export_dma_buf); | ||
88 | *prime_fd = dma_buf_fd(obj->export_dma_buf, flags); | ||
89 | drm_gem_object_unreference_unlocked(obj); | ||
90 | } else { | ||
91 | buf = dev->driver->gem_prime_export(dev, obj, flags); | ||
92 | if (IS_ERR(buf)) { | ||
93 | /* normally the created dma-buf takes ownership of the ref, | ||
94 | * but if that fails then drop the ref | ||
95 | */ | ||
96 | drm_gem_object_unreference_unlocked(obj); | ||
97 | mutex_unlock(&file_priv->prime.lock); | ||
98 | return PTR_ERR(buf); | ||
99 | } | ||
100 | obj->export_dma_buf = buf; | ||
101 | *prime_fd = dma_buf_fd(buf, flags); | ||
102 | } | ||
103 | mutex_unlock(&file_priv->prime.lock); | ||
104 | return 0; | ||
105 | } | ||
106 | EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); | ||
107 | |||
108 | int drm_gem_prime_fd_to_handle(struct drm_device *dev, | ||
109 | struct drm_file *file_priv, int prime_fd, uint32_t *handle) | ||
110 | { | ||
111 | struct dma_buf *dma_buf; | ||
112 | struct drm_gem_object *obj; | ||
113 | int ret; | ||
114 | |||
115 | dma_buf = dma_buf_get(prime_fd); | ||
116 | if (IS_ERR(dma_buf)) | ||
117 | return PTR_ERR(dma_buf); | ||
118 | |||
119 | mutex_lock(&file_priv->prime.lock); | ||
120 | |||
121 | ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime, | ||
122 | dma_buf, handle); | ||
123 | if (!ret) { | ||
124 | ret = 0; | ||
125 | goto out_put; | ||
126 | } | ||
127 | |||
128 | /* never seen this one, need to import */ | ||
129 | obj = dev->driver->gem_prime_import(dev, dma_buf); | ||
130 | if (IS_ERR(obj)) { | ||
131 | ret = PTR_ERR(obj); | ||
132 | goto out_put; | ||
133 | } | ||
134 | |||
135 | ret = drm_gem_handle_create(file_priv, obj, handle); | ||
136 | drm_gem_object_unreference_unlocked(obj); | ||
137 | if (ret) | ||
138 | goto out_put; | ||
139 | |||
140 | ret = drm_prime_add_imported_buf_handle(&file_priv->prime, | ||
141 | dma_buf, *handle); | ||
142 | if (ret) | ||
143 | goto fail; | ||
144 | |||
145 | mutex_unlock(&file_priv->prime.lock); | ||
146 | return 0; | ||
147 | |||
148 | fail: | ||
149 | /* hmm, if driver attached, we are relying on the free-object path | ||
150 | * to detach.. which seems ok.. | ||
151 | */ | ||
152 | drm_gem_object_handle_unreference_unlocked(obj); | ||
153 | out_put: | ||
154 | dma_buf_put(dma_buf); | ||
155 | mutex_unlock(&file_priv->prime.lock); | ||
156 | return ret; | ||
157 | } | ||
158 | EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); | ||
159 | |||
160 | int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, | ||
161 | struct drm_file *file_priv) | ||
162 | { | ||
163 | struct drm_prime_handle *args = data; | ||
164 | uint32_t flags; | ||
165 | |||
166 | if (!drm_core_check_feature(dev, DRIVER_PRIME)) | ||
167 | return -EINVAL; | ||
168 | |||
169 | if (!dev->driver->prime_handle_to_fd) | ||
170 | return -ENOSYS; | ||
171 | |||
172 | /* check flags are valid */ | ||
173 | if (args->flags & ~DRM_CLOEXEC) | ||
174 | return -EINVAL; | ||
175 | |||
176 | /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */ | ||
177 | flags = args->flags & DRM_CLOEXEC; | ||
178 | |||
179 | return dev->driver->prime_handle_to_fd(dev, file_priv, | ||
180 | args->handle, flags, &args->fd); | ||
181 | } | ||
182 | |||
183 | int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, | ||
184 | struct drm_file *file_priv) | ||
185 | { | ||
186 | struct drm_prime_handle *args = data; | ||
187 | |||
188 | if (!drm_core_check_feature(dev, DRIVER_PRIME)) | ||
189 | return -EINVAL; | ||
190 | |||
191 | if (!dev->driver->prime_fd_to_handle) | ||
192 | return -ENOSYS; | ||
193 | |||
194 | return dev->driver->prime_fd_to_handle(dev, file_priv, | ||
195 | args->fd, &args->handle); | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * drm_prime_pages_to_sg | ||
200 | * | ||
201 | * this helper creates an sg table object from a set of pages | ||
202 | * the driver is responsible for mapping the pages into the | ||
203 | * importers address space | ||
204 | */ | ||
205 | struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages) | ||
206 | { | ||
207 | struct sg_table *sg = NULL; | ||
208 | struct scatterlist *iter; | ||
209 | int i; | ||
210 | int ret; | ||
211 | |||
212 | sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); | ||
213 | if (!sg) | ||
214 | goto out; | ||
215 | |||
216 | ret = sg_alloc_table(sg, nr_pages, GFP_KERNEL); | ||
217 | if (ret) | ||
218 | goto out; | ||
219 | |||
220 | for_each_sg(sg->sgl, iter, nr_pages, i) | ||
221 | sg_set_page(iter, pages[i], PAGE_SIZE, 0); | ||
222 | |||
223 | return sg; | ||
224 | out: | ||
225 | kfree(sg); | ||
226 | return NULL; | ||
227 | } | ||
228 | EXPORT_SYMBOL(drm_prime_pages_to_sg); | ||
229 | |||
230 | /* helper function to cleanup a GEM/prime object */ | ||
231 | void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) | ||
232 | { | ||
233 | struct dma_buf_attachment *attach; | ||
234 | struct dma_buf *dma_buf; | ||
235 | attach = obj->import_attach; | ||
236 | if (sg) | ||
237 | dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); | ||
238 | dma_buf = attach->dmabuf; | ||
239 | dma_buf_detach(attach->dmabuf, attach); | ||
240 | /* remove the reference */ | ||
241 | dma_buf_put(dma_buf); | ||
242 | } | ||
243 | EXPORT_SYMBOL(drm_prime_gem_destroy); | ||
244 | |||
245 | void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) | ||
246 | { | ||
247 | INIT_LIST_HEAD(&prime_fpriv->head); | ||
248 | mutex_init(&prime_fpriv->lock); | ||
249 | } | ||
250 | EXPORT_SYMBOL(drm_prime_init_file_private); | ||
251 | |||
252 | void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) | ||
253 | { | ||
254 | struct drm_prime_member *member, *safe; | ||
255 | list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) { | ||
256 | list_del(&member->entry); | ||
257 | kfree(member); | ||
258 | } | ||
259 | } | ||
260 | EXPORT_SYMBOL(drm_prime_destroy_file_private); | ||
261 | |||
262 | int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) | ||
263 | { | ||
264 | struct drm_prime_member *member; | ||
265 | |||
266 | member = kmalloc(sizeof(*member), GFP_KERNEL); | ||
267 | if (!member) | ||
268 | return -ENOMEM; | ||
269 | |||
270 | member->dma_buf = dma_buf; | ||
271 | member->handle = handle; | ||
272 | list_add(&member->entry, &prime_fpriv->head); | ||
273 | return 0; | ||
274 | } | ||
275 | EXPORT_SYMBOL(drm_prime_add_imported_buf_handle); | ||
276 | |||
277 | int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle) | ||
278 | { | ||
279 | struct drm_prime_member *member; | ||
280 | |||
281 | list_for_each_entry(member, &prime_fpriv->head, entry) { | ||
282 | if (member->dma_buf == dma_buf) { | ||
283 | *handle = member->handle; | ||
284 | return 0; | ||
285 | } | ||
286 | } | ||
287 | return -ENOENT; | ||
288 | } | ||
289 | EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle); | ||
290 | |||
291 | void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf) | ||
292 | { | ||
293 | struct drm_prime_member *member, *safe; | ||
294 | |||
295 | mutex_lock(&prime_fpriv->lock); | ||
296 | list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) { | ||
297 | if (member->dma_buf == dma_buf) { | ||
298 | list_del(&member->entry); | ||
299 | kfree(member); | ||
300 | } | ||
301 | } | ||
302 | mutex_unlock(&prime_fpriv->lock); | ||
303 | } | ||
304 | EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle); | ||
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c index 4c2cb4a8ad98..5675d93b4205 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c +++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c | |||
@@ -244,7 +244,6 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector, | |||
244 | uint64_t value) | 244 | uint64_t value) |
245 | { | 245 | { |
246 | struct drm_encoder *encoder = connector->encoder; | 246 | struct drm_encoder *encoder = connector->encoder; |
247 | struct backlight_device *psb_bd; | ||
248 | 247 | ||
249 | if (!strcmp(property->name, "scaling mode") && encoder) { | 248 | if (!strcmp(property->name, "scaling mode") && encoder) { |
250 | struct psb_intel_crtc *psb_crtc = | 249 | struct psb_intel_crtc *psb_crtc = |
@@ -301,11 +300,15 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector, | |||
301 | value)) | 300 | value)) |
302 | goto set_prop_error; | 301 | goto set_prop_error; |
303 | else { | 302 | else { |
303 | #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE | ||
304 | struct backlight_device *psb_bd; | ||
305 | |||
304 | psb_bd = mdfld_get_backlight_device(); | 306 | psb_bd = mdfld_get_backlight_device(); |
305 | if (psb_bd) { | 307 | if (psb_bd) { |
306 | psb_bd->props.brightness = value; | 308 | psb_bd->props.brightness = value; |
307 | mdfld_set_brightness(psb_bd); | 309 | mdfld_set_brightness(psb_bd); |
308 | } | 310 | } |
311 | #endif | ||
309 | } | 312 | } |
310 | } | 313 | } |
311 | set_prop_done: | 314 | set_prop_done: |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index fdb7ccefffbd..b505b70dba05 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -1502,14 +1502,6 @@ static int i915_ppgtt_info(struct seq_file *m, void *data) | |||
1502 | return 0; | 1502 | return 0; |
1503 | } | 1503 | } |
1504 | 1504 | ||
1505 | static int | ||
1506 | i915_debugfs_common_open(struct inode *inode, | ||
1507 | struct file *filp) | ||
1508 | { | ||
1509 | filp->private_data = inode->i_private; | ||
1510 | return 0; | ||
1511 | } | ||
1512 | |||
1513 | static ssize_t | 1505 | static ssize_t |
1514 | i915_wedged_read(struct file *filp, | 1506 | i915_wedged_read(struct file *filp, |
1515 | char __user *ubuf, | 1507 | char __user *ubuf, |
@@ -1560,7 +1552,7 @@ i915_wedged_write(struct file *filp, | |||
1560 | 1552 | ||
1561 | static const struct file_operations i915_wedged_fops = { | 1553 | static const struct file_operations i915_wedged_fops = { |
1562 | .owner = THIS_MODULE, | 1554 | .owner = THIS_MODULE, |
1563 | .open = i915_debugfs_common_open, | 1555 | .open = simple_open, |
1564 | .read = i915_wedged_read, | 1556 | .read = i915_wedged_read, |
1565 | .write = i915_wedged_write, | 1557 | .write = i915_wedged_write, |
1566 | .llseek = default_llseek, | 1558 | .llseek = default_llseek, |
@@ -1622,7 +1614,7 @@ i915_max_freq_write(struct file *filp, | |||
1622 | 1614 | ||
1623 | static const struct file_operations i915_max_freq_fops = { | 1615 | static const struct file_operations i915_max_freq_fops = { |
1624 | .owner = THIS_MODULE, | 1616 | .owner = THIS_MODULE, |
1625 | .open = i915_debugfs_common_open, | 1617 | .open = simple_open, |
1626 | .read = i915_max_freq_read, | 1618 | .read = i915_max_freq_read, |
1627 | .write = i915_max_freq_write, | 1619 | .write = i915_max_freq_write, |
1628 | .llseek = default_llseek, | 1620 | .llseek = default_llseek, |
@@ -1693,7 +1685,7 @@ i915_cache_sharing_write(struct file *filp, | |||
1693 | 1685 | ||
1694 | static const struct file_operations i915_cache_sharing_fops = { | 1686 | static const struct file_operations i915_cache_sharing_fops = { |
1695 | .owner = THIS_MODULE, | 1687 | .owner = THIS_MODULE, |
1696 | .open = i915_debugfs_common_open, | 1688 | .open = simple_open, |
1697 | .read = i915_cache_sharing_read, | 1689 | .read = i915_cache_sharing_read, |
1698 | .write = i915_cache_sharing_write, | 1690 | .write = i915_cache_sharing_write, |
1699 | .llseek = default_llseek, | 1691 | .llseek = default_llseek, |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 9341eb8ce93b..785f67f963ef 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1183,6 +1183,21 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev) | |||
1183 | return can_switch; | 1183 | return can_switch; |
1184 | } | 1184 | } |
1185 | 1185 | ||
1186 | static bool | ||
1187 | intel_enable_ppgtt(struct drm_device *dev) | ||
1188 | { | ||
1189 | if (i915_enable_ppgtt >= 0) | ||
1190 | return i915_enable_ppgtt; | ||
1191 | |||
1192 | #ifdef CONFIG_INTEL_IOMMU | ||
1193 | /* Disable ppgtt on SNB if VT-d is on. */ | ||
1194 | if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) | ||
1195 | return false; | ||
1196 | #endif | ||
1197 | |||
1198 | return true; | ||
1199 | } | ||
1200 | |||
1186 | static int i915_load_gem_init(struct drm_device *dev) | 1201 | static int i915_load_gem_init(struct drm_device *dev) |
1187 | { | 1202 | { |
1188 | struct drm_i915_private *dev_priv = dev->dev_private; | 1203 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -1197,7 +1212,7 @@ static int i915_load_gem_init(struct drm_device *dev) | |||
1197 | drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); | 1212 | drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); |
1198 | 1213 | ||
1199 | mutex_lock(&dev->struct_mutex); | 1214 | mutex_lock(&dev->struct_mutex); |
1200 | if (i915_enable_ppgtt && HAS_ALIASING_PPGTT(dev)) { | 1215 | if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { |
1201 | /* PPGTT pdes are stolen from global gtt ptes, so shrink the | 1216 | /* PPGTT pdes are stolen from global gtt ptes, so shrink the |
1202 | * aperture accordingly when using aliasing ppgtt. */ | 1217 | * aperture accordingly when using aliasing ppgtt. */ |
1203 | gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; | 1218 | gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; |
@@ -1207,8 +1222,10 @@ static int i915_load_gem_init(struct drm_device *dev) | |||
1207 | i915_gem_do_init(dev, 0, mappable_size, gtt_size); | 1222 | i915_gem_do_init(dev, 0, mappable_size, gtt_size); |
1208 | 1223 | ||
1209 | ret = i915_gem_init_aliasing_ppgtt(dev); | 1224 | ret = i915_gem_init_aliasing_ppgtt(dev); |
1210 | if (ret) | 1225 | if (ret) { |
1226 | mutex_unlock(&dev->struct_mutex); | ||
1211 | return ret; | 1227 | return ret; |
1228 | } | ||
1212 | } else { | 1229 | } else { |
1213 | /* Let GEM Manage all of the aperture. | 1230 | /* Let GEM Manage all of the aperture. |
1214 | * | 1231 | * |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0694e170a338..dfa55e7478fb 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -66,7 +66,11 @@ MODULE_PARM_DESC(semaphores, | |||
66 | int i915_enable_rc6 __read_mostly = -1; | 66 | int i915_enable_rc6 __read_mostly = -1; |
67 | module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); | 67 | module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); |
68 | MODULE_PARM_DESC(i915_enable_rc6, | 68 | MODULE_PARM_DESC(i915_enable_rc6, |
69 | "Enable power-saving render C-state 6 (default: -1 (use per-chip default)"); | 69 | "Enable power-saving render C-state 6. " |
70 | "Different stages can be selected via bitmask values " | ||
71 | "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). " | ||
72 | "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. " | ||
73 | "default: -1 (use per-chip default)"); | ||
70 | 74 | ||
71 | int i915_enable_fbc __read_mostly = -1; | 75 | int i915_enable_fbc __read_mostly = -1; |
72 | module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); | 76 | module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); |
@@ -103,8 +107,8 @@ MODULE_PARM_DESC(enable_hangcheck, | |||
103 | "WARNING: Disabling this can cause system wide hangs. " | 107 | "WARNING: Disabling this can cause system wide hangs. " |
104 | "(default: true)"); | 108 | "(default: true)"); |
105 | 109 | ||
106 | bool i915_enable_ppgtt __read_mostly = 1; | 110 | int i915_enable_ppgtt __read_mostly = -1; |
107 | module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, bool, 0600); | 111 | module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600); |
108 | MODULE_PARM_DESC(i915_enable_ppgtt, | 112 | MODULE_PARM_DESC(i915_enable_ppgtt, |
109 | "Enable PPGTT (default: true)"); | 113 | "Enable PPGTT (default: true)"); |
110 | 114 | ||
@@ -292,6 +296,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */ | |||
292 | INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ | 296 | INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ |
293 | INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ | 297 | INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ |
294 | INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ | 298 | INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ |
299 | INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ | ||
295 | {0, 0, 0} | 300 | {0, 0, 0} |
296 | }; | 301 | }; |
297 | 302 | ||
@@ -467,6 +472,10 @@ static int i915_drm_freeze(struct drm_device *dev) | |||
467 | /* Modeset on resume, not lid events */ | 472 | /* Modeset on resume, not lid events */ |
468 | dev_priv->modeset_on_lid = 0; | 473 | dev_priv->modeset_on_lid = 0; |
469 | 474 | ||
475 | console_lock(); | ||
476 | intel_fbdev_set_suspend(dev, 1); | ||
477 | console_unlock(); | ||
478 | |||
470 | return 0; | 479 | return 0; |
471 | } | 480 | } |
472 | 481 | ||
@@ -529,7 +538,9 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
529 | drm_irq_install(dev); | 538 | drm_irq_install(dev); |
530 | 539 | ||
531 | /* Resume the modeset for every activated CRTC */ | 540 | /* Resume the modeset for every activated CRTC */ |
541 | mutex_lock(&dev->mode_config.mutex); | ||
532 | drm_helper_resume_force_mode(dev); | 542 | drm_helper_resume_force_mode(dev); |
543 | mutex_unlock(&dev->mode_config.mutex); | ||
533 | 544 | ||
534 | if (IS_IRONLAKE_M(dev)) | 545 | if (IS_IRONLAKE_M(dev)) |
535 | ironlake_enable_rc6(dev); | 546 | ironlake_enable_rc6(dev); |
@@ -539,6 +550,9 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
539 | 550 | ||
540 | dev_priv->modeset_on_lid = 0; | 551 | dev_priv->modeset_on_lid = 0; |
541 | 552 | ||
553 | console_lock(); | ||
554 | intel_fbdev_set_suspend(dev, 0); | ||
555 | console_unlock(); | ||
542 | return error; | 556 | return error; |
543 | } | 557 | } |
544 | 558 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c0f19f572004..5fabc6c31fec 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1053,6 +1053,27 @@ struct drm_i915_file_private { | |||
1053 | 1053 | ||
1054 | #include "i915_trace.h" | 1054 | #include "i915_trace.h" |
1055 | 1055 | ||
1056 | /** | ||
1057 | * RC6 is a special power stage which allows the GPU to enter an very | ||
1058 | * low-voltage mode when idle, using down to 0V while at this stage. This | ||
1059 | * stage is entered automatically when the GPU is idle when RC6 support is | ||
1060 | * enabled, and as soon as new workload arises GPU wakes up automatically as well. | ||
1061 | * | ||
1062 | * There are different RC6 modes available in Intel GPU, which differentiate | ||
1063 | * among each other with the latency required to enter and leave RC6 and | ||
1064 | * voltage consumed by the GPU in different states. | ||
1065 | * | ||
1066 | * The combination of the following flags define which states GPU is allowed | ||
1067 | * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and | ||
1068 | * RC6pp is deepest RC6. Their support by hardware varies according to the | ||
1069 | * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one | ||
1070 | * which brings the most power savings; deeper states save more power, but | ||
1071 | * require higher latency to switch to and wake up. | ||
1072 | */ | ||
1073 | #define INTEL_RC6_ENABLE (1<<0) | ||
1074 | #define INTEL_RC6p_ENABLE (1<<1) | ||
1075 | #define INTEL_RC6pp_ENABLE (1<<2) | ||
1076 | |||
1056 | extern struct drm_ioctl_desc i915_ioctls[]; | 1077 | extern struct drm_ioctl_desc i915_ioctls[]; |
1057 | extern int i915_max_ioctl; | 1078 | extern int i915_max_ioctl; |
1058 | extern unsigned int i915_fbpercrtc __always_unused; | 1079 | extern unsigned int i915_fbpercrtc __always_unused; |
@@ -1065,7 +1086,7 @@ extern int i915_vbt_sdvo_panel_type __read_mostly; | |||
1065 | extern int i915_enable_rc6 __read_mostly; | 1086 | extern int i915_enable_rc6 __read_mostly; |
1066 | extern int i915_enable_fbc __read_mostly; | 1087 | extern int i915_enable_fbc __read_mostly; |
1067 | extern bool i915_enable_hangcheck __read_mostly; | 1088 | extern bool i915_enable_hangcheck __read_mostly; |
1068 | extern bool i915_enable_ppgtt __read_mostly; | 1089 | extern int i915_enable_ppgtt __read_mostly; |
1069 | 1090 | ||
1070 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); | 1091 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); |
1071 | extern int i915_resume(struct drm_device *dev); | 1092 | extern int i915_resume(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1f441f5c2405..4c65c639f772 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1472,16 +1472,19 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, | |||
1472 | list_move_tail(&obj->ring_list, &ring->active_list); | 1472 | list_move_tail(&obj->ring_list, &ring->active_list); |
1473 | 1473 | ||
1474 | obj->last_rendering_seqno = seqno; | 1474 | obj->last_rendering_seqno = seqno; |
1475 | if (obj->fenced_gpu_access) { | ||
1476 | struct drm_i915_fence_reg *reg; | ||
1477 | |||
1478 | BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE); | ||
1479 | 1475 | ||
1476 | if (obj->fenced_gpu_access) { | ||
1480 | obj->last_fenced_seqno = seqno; | 1477 | obj->last_fenced_seqno = seqno; |
1481 | obj->last_fenced_ring = ring; | 1478 | obj->last_fenced_ring = ring; |
1482 | 1479 | ||
1483 | reg = &dev_priv->fence_regs[obj->fence_reg]; | 1480 | /* Bump MRU to take account of the delayed flush */ |
1484 | list_move_tail(®->lru_list, &dev_priv->mm.fence_list); | 1481 | if (obj->fence_reg != I915_FENCE_REG_NONE) { |
1482 | struct drm_i915_fence_reg *reg; | ||
1483 | |||
1484 | reg = &dev_priv->fence_regs[obj->fence_reg]; | ||
1485 | list_move_tail(®->lru_list, | ||
1486 | &dev_priv->mm.fence_list); | ||
1487 | } | ||
1485 | } | 1488 | } |
1486 | } | 1489 | } |
1487 | 1490 | ||
@@ -3754,12 +3757,32 @@ void i915_gem_init_ppgtt(struct drm_device *dev) | |||
3754 | drm_i915_private_t *dev_priv = dev->dev_private; | 3757 | drm_i915_private_t *dev_priv = dev->dev_private; |
3755 | uint32_t pd_offset; | 3758 | uint32_t pd_offset; |
3756 | struct intel_ring_buffer *ring; | 3759 | struct intel_ring_buffer *ring; |
3760 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; | ||
3761 | uint32_t __iomem *pd_addr; | ||
3762 | uint32_t pd_entry; | ||
3757 | int i; | 3763 | int i; |
3758 | 3764 | ||
3759 | if (!dev_priv->mm.aliasing_ppgtt) | 3765 | if (!dev_priv->mm.aliasing_ppgtt) |
3760 | return; | 3766 | return; |
3761 | 3767 | ||
3762 | pd_offset = dev_priv->mm.aliasing_ppgtt->pd_offset; | 3768 | |
3769 | pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t); | ||
3770 | for (i = 0; i < ppgtt->num_pd_entries; i++) { | ||
3771 | dma_addr_t pt_addr; | ||
3772 | |||
3773 | if (dev_priv->mm.gtt->needs_dmar) | ||
3774 | pt_addr = ppgtt->pt_dma_addr[i]; | ||
3775 | else | ||
3776 | pt_addr = page_to_phys(ppgtt->pt_pages[i]); | ||
3777 | |||
3778 | pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); | ||
3779 | pd_entry |= GEN6_PDE_VALID; | ||
3780 | |||
3781 | writel(pd_entry, pd_addr + i); | ||
3782 | } | ||
3783 | readl(pd_addr); | ||
3784 | |||
3785 | pd_offset = ppgtt->pd_offset; | ||
3763 | pd_offset /= 64; /* in cachelines, */ | 3786 | pd_offset /= 64; /* in cachelines, */ |
3764 | pd_offset <<= 16; | 3787 | pd_offset <<= 16; |
3765 | 3788 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 81687af00893..f51a696486cb 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -498,8 +498,8 @@ pin_and_fence_object(struct drm_i915_gem_object *obj, | |||
498 | if (ret) | 498 | if (ret) |
499 | goto err_unpin; | 499 | goto err_unpin; |
500 | } | 500 | } |
501 | obj->pending_fenced_gpu_access = true; | ||
501 | } | 502 | } |
502 | obj->pending_fenced_gpu_access = need_fence; | ||
503 | } | 503 | } |
504 | 504 | ||
505 | entry->offset = obj->gtt_offset; | 505 | entry->offset = obj->gtt_offset; |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 2eacd78bb93b..a135c61f4119 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -65,9 +65,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) | |||
65 | { | 65 | { |
66 | struct drm_i915_private *dev_priv = dev->dev_private; | 66 | struct drm_i915_private *dev_priv = dev->dev_private; |
67 | struct i915_hw_ppgtt *ppgtt; | 67 | struct i915_hw_ppgtt *ppgtt; |
68 | uint32_t pd_entry; | ||
69 | unsigned first_pd_entry_in_global_pt; | 68 | unsigned first_pd_entry_in_global_pt; |
70 | uint32_t __iomem *pd_addr; | ||
71 | int i; | 69 | int i; |
72 | int ret = -ENOMEM; | 70 | int ret = -ENOMEM; |
73 | 71 | ||
@@ -100,7 +98,6 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) | |||
100 | goto err_pt_alloc; | 98 | goto err_pt_alloc; |
101 | } | 99 | } |
102 | 100 | ||
103 | pd_addr = dev_priv->mm.gtt->gtt + first_pd_entry_in_global_pt; | ||
104 | for (i = 0; i < ppgtt->num_pd_entries; i++) { | 101 | for (i = 0; i < ppgtt->num_pd_entries; i++) { |
105 | dma_addr_t pt_addr; | 102 | dma_addr_t pt_addr; |
106 | if (dev_priv->mm.gtt->needs_dmar) { | 103 | if (dev_priv->mm.gtt->needs_dmar) { |
@@ -117,13 +114,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) | |||
117 | ppgtt->pt_dma_addr[i] = pt_addr; | 114 | ppgtt->pt_dma_addr[i] = pt_addr; |
118 | } else | 115 | } else |
119 | pt_addr = page_to_phys(ppgtt->pt_pages[i]); | 116 | pt_addr = page_to_phys(ppgtt->pt_pages[i]); |
120 | |||
121 | pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); | ||
122 | pd_entry |= GEN6_PDE_VALID; | ||
123 | |||
124 | writel(pd_entry, pd_addr + i); | ||
125 | } | 117 | } |
126 | readl(pd_addr); | ||
127 | 118 | ||
128 | ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma; | 119 | ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma; |
129 | 120 | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3886cf051bac..2abf4eb94039 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -2385,6 +2385,7 @@ | |||
2385 | #define PIPECONF_DISABLE 0 | 2385 | #define PIPECONF_DISABLE 0 |
2386 | #define PIPECONF_DOUBLE_WIDE (1<<30) | 2386 | #define PIPECONF_DOUBLE_WIDE (1<<30) |
2387 | #define I965_PIPECONF_ACTIVE (1<<30) | 2387 | #define I965_PIPECONF_ACTIVE (1<<30) |
2388 | #define PIPECONF_FRAME_START_DELAY_MASK (3<<27) | ||
2388 | #define PIPECONF_SINGLE_WIDE 0 | 2389 | #define PIPECONF_SINGLE_WIDE 0 |
2389 | #define PIPECONF_PIPE_UNLOCKED 0 | 2390 | #define PIPECONF_PIPE_UNLOCKED 0 |
2390 | #define PIPECONF_PIPE_LOCKED (1<<25) | 2391 | #define PIPECONF_PIPE_LOCKED (1<<25) |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 8168d8f8a634..b48fc2a8410c 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -24,6 +24,7 @@ | |||
24 | * Eric Anholt <eric@anholt.net> | 24 | * Eric Anholt <eric@anholt.net> |
25 | * | 25 | * |
26 | */ | 26 | */ |
27 | #include <linux/dmi.h> | ||
27 | #include <drm/drm_dp_helper.h> | 28 | #include <drm/drm_dp_helper.h> |
28 | #include "drmP.h" | 29 | #include "drmP.h" |
29 | #include "drm.h" | 30 | #include "drm.h" |
@@ -621,6 +622,26 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) | |||
621 | dev_priv->edp.bpp = 18; | 622 | dev_priv->edp.bpp = 18; |
622 | } | 623 | } |
623 | 624 | ||
625 | static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) | ||
626 | { | ||
627 | DRM_DEBUG_KMS("Falling back to manually reading VBT from " | ||
628 | "VBIOS ROM for %s\n", | ||
629 | id->ident); | ||
630 | return 1; | ||
631 | } | ||
632 | |||
633 | static const struct dmi_system_id intel_no_opregion_vbt[] = { | ||
634 | { | ||
635 | .callback = intel_no_opregion_vbt_callback, | ||
636 | .ident = "ThinkCentre A57", | ||
637 | .matches = { | ||
638 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
639 | DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"), | ||
640 | }, | ||
641 | }, | ||
642 | { } | ||
643 | }; | ||
644 | |||
624 | /** | 645 | /** |
625 | * intel_parse_bios - find VBT and initialize settings from the BIOS | 646 | * intel_parse_bios - find VBT and initialize settings from the BIOS |
626 | * @dev: DRM device | 647 | * @dev: DRM device |
@@ -641,7 +662,7 @@ intel_parse_bios(struct drm_device *dev) | |||
641 | init_vbt_defaults(dev_priv); | 662 | init_vbt_defaults(dev_priv); |
642 | 663 | ||
643 | /* XXX Should this validation be moved to intel_opregion.c? */ | 664 | /* XXX Should this validation be moved to intel_opregion.c? */ |
644 | if (dev_priv->opregion.vbt) { | 665 | if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) { |
645 | struct vbt_header *vbt = dev_priv->opregion.vbt; | 666 | struct vbt_header *vbt = dev_priv->opregion.vbt; |
646 | if (memcmp(vbt->signature, "$VBT", 4) == 0) { | 667 | if (memcmp(vbt->signature, "$VBT", 4) == 0) { |
647 | DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n", | 668 | DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n", |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d514719f65e2..91b35fd1db8c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -5539,7 +5539,8 @@ void ironlake_init_pch_refclk(struct drm_device *dev) | |||
5539 | if (intel_panel_use_ssc(dev_priv) && can_ssc) { | 5539 | if (intel_panel_use_ssc(dev_priv) && can_ssc) { |
5540 | DRM_DEBUG_KMS("Using SSC on panel\n"); | 5540 | DRM_DEBUG_KMS("Using SSC on panel\n"); |
5541 | temp |= DREF_SSC1_ENABLE; | 5541 | temp |= DREF_SSC1_ENABLE; |
5542 | } | 5542 | } else |
5543 | temp &= ~DREF_SSC1_ENABLE; | ||
5543 | 5544 | ||
5544 | /* Get SSC going before enabling the outputs */ | 5545 | /* Get SSC going before enabling the outputs */ |
5545 | I915_WRITE(PCH_DREF_CONTROL, temp); | 5546 | I915_WRITE(PCH_DREF_CONTROL, temp); |
@@ -7580,6 +7581,12 @@ static void intel_sanitize_modesetting(struct drm_device *dev, | |||
7580 | struct drm_i915_private *dev_priv = dev->dev_private; | 7581 | struct drm_i915_private *dev_priv = dev->dev_private; |
7581 | u32 reg, val; | 7582 | u32 reg, val; |
7582 | 7583 | ||
7584 | /* Clear any frame start delays used for debugging left by the BIOS */ | ||
7585 | for_each_pipe(pipe) { | ||
7586 | reg = PIPECONF(pipe); | ||
7587 | I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); | ||
7588 | } | ||
7589 | |||
7583 | if (HAS_PCH_SPLIT(dev)) | 7590 | if (HAS_PCH_SPLIT(dev)) |
7584 | return; | 7591 | return; |
7585 | 7592 | ||
@@ -8215,7 +8222,7 @@ void intel_init_emon(struct drm_device *dev) | |||
8215 | dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); | 8222 | dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); |
8216 | } | 8223 | } |
8217 | 8224 | ||
8218 | static bool intel_enable_rc6(struct drm_device *dev) | 8225 | static int intel_enable_rc6(struct drm_device *dev) |
8219 | { | 8226 | { |
8220 | /* | 8227 | /* |
8221 | * Respect the kernel parameter if it is set | 8228 | * Respect the kernel parameter if it is set |
@@ -8233,11 +8240,11 @@ static bool intel_enable_rc6(struct drm_device *dev) | |||
8233 | * Disable rc6 on Sandybridge | 8240 | * Disable rc6 on Sandybridge |
8234 | */ | 8241 | */ |
8235 | if (INTEL_INFO(dev)->gen == 6) { | 8242 | if (INTEL_INFO(dev)->gen == 6) { |
8236 | DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n"); | 8243 | DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n"); |
8237 | return 0; | 8244 | return INTEL_RC6_ENABLE; |
8238 | } | 8245 | } |
8239 | DRM_DEBUG_DRIVER("RC6 enabled\n"); | 8246 | DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n"); |
8240 | return 1; | 8247 | return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); |
8241 | } | 8248 | } |
8242 | 8249 | ||
8243 | void gen6_enable_rps(struct drm_i915_private *dev_priv) | 8250 | void gen6_enable_rps(struct drm_i915_private *dev_priv) |
@@ -8247,6 +8254,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | |||
8247 | u32 pcu_mbox, rc6_mask = 0; | 8254 | u32 pcu_mbox, rc6_mask = 0; |
8248 | u32 gtfifodbg; | 8255 | u32 gtfifodbg; |
8249 | int cur_freq, min_freq, max_freq; | 8256 | int cur_freq, min_freq, max_freq; |
8257 | int rc6_mode; | ||
8250 | int i; | 8258 | int i; |
8251 | 8259 | ||
8252 | /* Here begins a magic sequence of register writes to enable | 8260 | /* Here begins a magic sequence of register writes to enable |
@@ -8284,9 +8292,20 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | |||
8284 | I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); | 8292 | I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); |
8285 | I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ | 8293 | I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ |
8286 | 8294 | ||
8287 | if (intel_enable_rc6(dev_priv->dev)) | 8295 | rc6_mode = intel_enable_rc6(dev_priv->dev); |
8288 | rc6_mask = GEN6_RC_CTL_RC6_ENABLE | | 8296 | if (rc6_mode & INTEL_RC6_ENABLE) |
8289 | ((IS_GEN7(dev_priv->dev)) ? GEN6_RC_CTL_RC6p_ENABLE : 0); | 8297 | rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; |
8298 | |||
8299 | if (rc6_mode & INTEL_RC6p_ENABLE) | ||
8300 | rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; | ||
8301 | |||
8302 | if (rc6_mode & INTEL_RC6pp_ENABLE) | ||
8303 | rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; | ||
8304 | |||
8305 | DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", | ||
8306 | (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off", | ||
8307 | (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off", | ||
8308 | (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off"); | ||
8290 | 8309 | ||
8291 | I915_WRITE(GEN6_RC_CONTROL, | 8310 | I915_WRITE(GEN6_RC_CONTROL, |
8292 | rc6_mask | | 8311 | rc6_mask | |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 9cec6c3937fa..5a14149b3794 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -382,7 +382,7 @@ extern int intel_framebuffer_init(struct drm_device *dev, | |||
382 | struct drm_i915_gem_object *obj); | 382 | struct drm_i915_gem_object *obj); |
383 | extern int intel_fbdev_init(struct drm_device *dev); | 383 | extern int intel_fbdev_init(struct drm_device *dev); |
384 | extern void intel_fbdev_fini(struct drm_device *dev); | 384 | extern void intel_fbdev_fini(struct drm_device *dev); |
385 | 385 | extern void intel_fbdev_set_suspend(struct drm_device *dev, int state); | |
386 | extern void intel_prepare_page_flip(struct drm_device *dev, int plane); | 386 | extern void intel_prepare_page_flip(struct drm_device *dev, int plane); |
387 | extern void intel_finish_page_flip(struct drm_device *dev, int pipe); | 387 | extern void intel_finish_page_flip(struct drm_device *dev, int pipe); |
388 | extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); | 388 | extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 2d8766978388..19ecd78b8a2c 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -254,6 +254,16 @@ void intel_fbdev_fini(struct drm_device *dev) | |||
254 | kfree(dev_priv->fbdev); | 254 | kfree(dev_priv->fbdev); |
255 | dev_priv->fbdev = NULL; | 255 | dev_priv->fbdev = NULL; |
256 | } | 256 | } |
257 | |||
258 | void intel_fbdev_set_suspend(struct drm_device *dev, int state) | ||
259 | { | ||
260 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
261 | if (!dev_priv->fbdev) | ||
262 | return; | ||
263 | |||
264 | fb_set_suspend(dev_priv->fbdev->helper.fbdev, state); | ||
265 | } | ||
266 | |||
257 | MODULE_LICENSE("GPL and additional rights"); | 267 | MODULE_LICENSE("GPL and additional rights"); |
258 | 268 | ||
259 | void intel_fb_output_poll_changed(struct drm_device *dev) | 269 | void intel_fb_output_poll_changed(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index c5c0973af8a1..95db2e988227 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -755,6 +755,14 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
755 | DMI_MATCH(DMI_BOARD_NAME, "hp st5747"), | 755 | DMI_MATCH(DMI_BOARD_NAME, "hp st5747"), |
756 | }, | 756 | }, |
757 | }, | 757 | }, |
758 | { | ||
759 | .callback = intel_no_lvds_dmi_callback, | ||
760 | .ident = "MSI Wind Box DC500", | ||
761 | .matches = { | ||
762 | DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), | ||
763 | DMI_MATCH(DMI_BOARD_NAME, "MS-7469"), | ||
764 | }, | ||
765 | }, | ||
758 | 766 | ||
759 | { } /* terminating entry */ | 767 | { } /* terminating entry */ |
760 | }; | 768 | }; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index fc66af6a9448..e25581a9f60f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -626,7 +626,7 @@ gen6_ring_get_seqno(struct intel_ring_buffer *ring) | |||
626 | /* Workaround to force correct ordering between irq and seqno writes on | 626 | /* Workaround to force correct ordering between irq and seqno writes on |
627 | * ivb (and maybe also on snb) by reading from a CS register (like | 627 | * ivb (and maybe also on snb) by reading from a CS register (like |
628 | * ACTHD) before reading the status page. */ | 628 | * ACTHD) before reading the status page. */ |
629 | if (IS_GEN7(dev)) | 629 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
630 | intel_ring_get_active_head(ring); | 630 | intel_ring_get_active_head(ring); |
631 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | 631 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
632 | } | 632 | } |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 7aa0450399a1..a464771a7240 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -411,6 +411,9 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
411 | 411 | ||
412 | old_obj = intel_plane->obj; | 412 | old_obj = intel_plane->obj; |
413 | 413 | ||
414 | src_w = src_w >> 16; | ||
415 | src_h = src_h >> 16; | ||
416 | |||
414 | /* Pipe must be running... */ | 417 | /* Pipe must be running... */ |
415 | if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE)) | 418 | if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE)) |
416 | return -EINVAL; | 419 | return -EINVAL; |
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index ca1639918f57..97a81260485a 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig | |||
@@ -13,6 +13,7 @@ config DRM_NOUVEAU | |||
13 | select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT | 13 | select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT |
14 | select ACPI_WMI if ACPI | 14 | select ACPI_WMI if ACPI |
15 | select MXM_WMI if ACPI | 15 | select MXM_WMI if ACPI |
16 | select POWER_SUPPLY | ||
16 | help | 17 | help |
17 | Choose this option for open-source nVidia support. | 18 | Choose this option for open-source nVidia support. |
18 | 19 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 637afe71de56..80963d05b54a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -177,14 +177,15 @@ bios_shadow_pci(struct nvbios *bios) | |||
177 | 177 | ||
178 | if (!pci_enable_rom(pdev)) { | 178 | if (!pci_enable_rom(pdev)) { |
179 | void __iomem *rom = pci_map_rom(pdev, &length); | 179 | void __iomem *rom = pci_map_rom(pdev, &length); |
180 | if (rom) { | 180 | if (rom && length) { |
181 | bios->data = kmalloc(length, GFP_KERNEL); | 181 | bios->data = kmalloc(length, GFP_KERNEL); |
182 | if (bios->data) { | 182 | if (bios->data) { |
183 | memcpy_fromio(bios->data, rom, length); | 183 | memcpy_fromio(bios->data, rom, length); |
184 | bios->length = length; | 184 | bios->length = length; |
185 | } | 185 | } |
186 | pci_unmap_rom(pdev, rom); | ||
187 | } | 186 | } |
187 | if (rom) | ||
188 | pci_unmap_rom(pdev, rom); | ||
188 | 189 | ||
189 | pci_disable_rom(pdev); | 190 | pci_disable_rom(pdev); |
190 | } | 191 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 44e6416d4a33..846afb0bfef4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
@@ -436,11 +436,11 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, | |||
436 | } | 436 | } |
437 | 437 | ||
438 | if (dev_priv->card_type < NV_C0) { | 438 | if (dev_priv->card_type < NV_C0) { |
439 | init->subchan[0].handle = NvSw; | 439 | init->subchan[0].handle = 0x00000000; |
440 | init->subchan[0].grclass = NV_SW; | 440 | init->subchan[0].grclass = 0x0000; |
441 | init->nr_subchan = 1; | 441 | init->subchan[1].handle = NvSw; |
442 | } else { | 442 | init->subchan[1].grclass = NV_SW; |
443 | init->nr_subchan = 0; | 443 | init->nr_subchan = 2; |
444 | } | 444 | } |
445 | 445 | ||
446 | /* Named memory object area */ | 446 | /* Named memory object area */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 8f510fd956b0..fa860358add1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -654,10 +654,13 @@ nouveau_connector_detect_depth(struct drm_connector *connector) | |||
654 | if (nv_connector->edid && connector->display_info.bpc) | 654 | if (nv_connector->edid && connector->display_info.bpc) |
655 | return; | 655 | return; |
656 | 656 | ||
657 | /* if not, we're out of options unless we're LVDS, default to 6bpc */ | 657 | /* if not, we're out of options unless we're LVDS, default to 8bpc */ |
658 | connector->display_info.bpc = 6; | 658 | if (nv_encoder->dcb->type != OUTPUT_LVDS) { |
659 | if (nv_encoder->dcb->type != OUTPUT_LVDS) | 659 | connector->display_info.bpc = 8; |
660 | return; | 660 | return; |
661 | } | ||
662 | |||
663 | connector->display_info.bpc = 6; | ||
661 | 664 | ||
662 | /* LVDS: panel straps */ | 665 | /* LVDS: panel straps */ |
663 | if (bios->fp_no_ddc) { | 666 | if (bios->fp_no_ddc) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h index bcf0fd9e313e..23d4edf992b7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.h +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h | |||
@@ -48,8 +48,8 @@ void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *, | |||
48 | 48 | ||
49 | /* Hardcoded object assignments to subchannels (subchannel id). */ | 49 | /* Hardcoded object assignments to subchannels (subchannel id). */ |
50 | enum { | 50 | enum { |
51 | NvSubSw = 0, | 51 | NvSubM2MF = 0, |
52 | NvSubM2MF = 1, | 52 | NvSubSw = 1, |
53 | NvSub2D = 2, | 53 | NvSub2D = 2, |
54 | NvSubCtxSurf2D = 2, | 54 | NvSubCtxSurf2D = 2, |
55 | NvSubGdiRect = 3, | 55 | NvSubGdiRect = 3, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c index 8f4f914d9eab..e2be95af2e52 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c | |||
@@ -315,8 +315,8 @@ nouveau_i2c_init(struct drm_device *dev) | |||
315 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 315 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
316 | struct nvbios *bios = &dev_priv->vbios; | 316 | struct nvbios *bios = &dev_priv->vbios; |
317 | struct nouveau_i2c_chan *port; | 317 | struct nouveau_i2c_chan *port; |
318 | u8 version = 0x00, entries, recordlen; | ||
318 | u8 *i2c, *entry, legacy[2][4] = {}; | 319 | u8 *i2c, *entry, legacy[2][4] = {}; |
319 | u8 version, entries, recordlen; | ||
320 | int ret, i; | 320 | int ret, i; |
321 | 321 | ||
322 | INIT_LIST_HEAD(&dev_priv->i2c_ports); | 322 | INIT_LIST_HEAD(&dev_priv->i2c_ports); |
@@ -346,12 +346,12 @@ nouveau_i2c_init(struct drm_device *dev) | |||
346 | if (i2c[7]) legacy[1][1] = i2c[7]; | 346 | if (i2c[7]) legacy[1][1] = i2c[7]; |
347 | } | 347 | } |
348 | 348 | ||
349 | if (i2c && version >= 0x30) { | 349 | if (version >= 0x30) { |
350 | entry = i2c[1] + i2c; | 350 | entry = i2c[1] + i2c; |
351 | entries = i2c[2]; | 351 | entries = i2c[2]; |
352 | recordlen = i2c[3]; | 352 | recordlen = i2c[3]; |
353 | } else | 353 | } else |
354 | if (i2c) { | 354 | if (version) { |
355 | entry = i2c; | 355 | entry = i2c; |
356 | entries = 16; | 356 | entries = 16; |
357 | recordlen = 4; | 357 | recordlen = 4; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index a3ae91fa8141..c2a8511e855a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -642,7 +642,7 @@ nouveau_card_channel_init(struct drm_device *dev) | |||
642 | OUT_RING (chan, chan->vram_handle); | 642 | OUT_RING (chan, chan->vram_handle); |
643 | OUT_RING (chan, chan->gart_handle); | 643 | OUT_RING (chan, chan->gart_handle); |
644 | } else | 644 | } else |
645 | if (dev_priv->card_type <= NV_C0) { | 645 | if (dev_priv->card_type <= NV_D0) { |
646 | ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039); | 646 | ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039); |
647 | if (ret) | 647 | if (ret) |
648 | goto error; | 648 | goto error; |
@@ -852,7 +852,7 @@ nouveau_card_init(struct drm_device *dev) | |||
852 | if (ret) | 852 | if (ret) |
853 | goto out_pm; | 853 | goto out_pm; |
854 | 854 | ||
855 | if (!dev_priv->noaccel) { | 855 | if (dev_priv->eng[NVOBJ_ENGINE_GR]) { |
856 | ret = nouveau_card_channel_init(dev); | 856 | ret = nouveau_card_channel_init(dev); |
857 | if (ret) | 857 | if (ret) |
858 | goto out_fence; | 858 | goto out_fence; |
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index d1bd239cd9e9..5ce9bf51a8de 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
@@ -1306,8 +1306,11 @@ struct atom_context *atom_parse(struct card_info *card, void *bios) | |||
1306 | 1306 | ||
1307 | int atom_asic_init(struct atom_context *ctx) | 1307 | int atom_asic_init(struct atom_context *ctx) |
1308 | { | 1308 | { |
1309 | struct radeon_device *rdev = ctx->card->dev->dev_private; | ||
1309 | int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR); | 1310 | int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR); |
1310 | uint32_t ps[16]; | 1311 | uint32_t ps[16]; |
1312 | int ret; | ||
1313 | |||
1311 | memset(ps, 0, 64); | 1314 | memset(ps, 0, 64); |
1312 | 1315 | ||
1313 | ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR)); | 1316 | ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR)); |
@@ -1317,7 +1320,17 @@ int atom_asic_init(struct atom_context *ctx) | |||
1317 | 1320 | ||
1318 | if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) | 1321 | if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) |
1319 | return 1; | 1322 | return 1; |
1320 | return atom_execute_table(ctx, ATOM_CMD_INIT, ps); | 1323 | ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps); |
1324 | if (ret) | ||
1325 | return ret; | ||
1326 | |||
1327 | memset(ps, 0, 64); | ||
1328 | |||
1329 | if (rdev->family < CHIP_R600) { | ||
1330 | if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL)) | ||
1331 | atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps); | ||
1332 | } | ||
1333 | return ret; | ||
1321 | } | 1334 | } |
1322 | 1335 | ||
1323 | void atom_destroy(struct atom_context *ctx) | 1336 | void atom_destroy(struct atom_context *ctx) |
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h index 93cfe2086ba0..25fea631dad2 100644 --- a/drivers/gpu/drm/radeon/atom.h +++ b/drivers/gpu/drm/radeon/atom.h | |||
@@ -44,6 +44,7 @@ | |||
44 | #define ATOM_CMD_SETSCLK 0x0A | 44 | #define ATOM_CMD_SETSCLK 0x0A |
45 | #define ATOM_CMD_SETMCLK 0x0B | 45 | #define ATOM_CMD_SETMCLK 0x0B |
46 | #define ATOM_CMD_SETPCLK 0x0C | 46 | #define ATOM_CMD_SETPCLK 0x0C |
47 | #define ATOM_CMD_SPDFANCNTL 0x39 | ||
47 | 48 | ||
48 | #define ATOM_DATA_FWI_PTR 0xC | 49 | #define ATOM_DATA_FWI_PTR 0xC |
49 | #define ATOM_DATA_IIO_PTR 0x32 | 50 | #define ATOM_DATA_IIO_PTR 0x32 |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 083b3eada001..b5ff1f7b6f7e 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -588,8 +588,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
588 | if (encoder->crtc == crtc) { | 588 | if (encoder->crtc == crtc) { |
589 | radeon_encoder = to_radeon_encoder(encoder); | 589 | radeon_encoder = to_radeon_encoder(encoder); |
590 | connector = radeon_get_connector_for_encoder(encoder); | 590 | connector = radeon_get_connector_for_encoder(encoder); |
591 | if (connector && connector->display_info.bpc) | 591 | /* if (connector && connector->display_info.bpc) |
592 | bpc = connector->display_info.bpc; | 592 | bpc = connector->display_info.bpc; */ |
593 | encoder_mode = atombios_get_encoder_mode(encoder); | 593 | encoder_mode = atombios_get_encoder_mode(encoder); |
594 | is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); | 594 | is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); |
595 | if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || | 595 | if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || |
@@ -965,7 +965,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode | |||
965 | struct radeon_connector_atom_dig *dig_connector = | 965 | struct radeon_connector_atom_dig *dig_connector = |
966 | radeon_connector->con_priv; | 966 | radeon_connector->con_priv; |
967 | int dp_clock; | 967 | int dp_clock; |
968 | bpc = connector->display_info.bpc; | 968 | |
969 | /* if (connector->display_info.bpc) | ||
970 | bpc = connector->display_info.bpc; */ | ||
969 | 971 | ||
970 | switch (encoder_mode) { | 972 | switch (encoder_mode) { |
971 | case ATOM_ENCODER_MODE_DP_MST: | 973 | case ATOM_ENCODER_MODE_DP_MST: |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 6c62be226804..c57d85664e77 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -405,10 +405,13 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], | |||
405 | /* get bpc from the EDID */ | 405 | /* get bpc from the EDID */ |
406 | static int convert_bpc_to_bpp(int bpc) | 406 | static int convert_bpc_to_bpp(int bpc) |
407 | { | 407 | { |
408 | #if 0 | ||
408 | if (bpc == 0) | 409 | if (bpc == 0) |
409 | return 24; | 410 | return 24; |
410 | else | 411 | else |
411 | return bpc * 3; | 412 | return bpc * 3; |
413 | #endif | ||
414 | return 24; | ||
412 | } | 415 | } |
413 | 416 | ||
414 | /* get the max pix clock supported by the link rate and lane num */ | 417 | /* get the max pix clock supported by the link rate and lane num */ |
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 468b874336f9..e607c4d7dd98 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c | |||
@@ -541,7 +541,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo | |||
541 | dp_clock = dig_connector->dp_clock; | 541 | dp_clock = dig_connector->dp_clock; |
542 | dp_lane_count = dig_connector->dp_lane_count; | 542 | dp_lane_count = dig_connector->dp_lane_count; |
543 | hpd_id = radeon_connector->hpd.hpd; | 543 | hpd_id = radeon_connector->hpd.hpd; |
544 | bpc = connector->display_info.bpc; | 544 | /* bpc = connector->display_info.bpc; */ |
545 | } | 545 | } |
546 | 546 | ||
547 | /* no dig encoder assigned */ | 547 | /* no dig encoder assigned */ |
@@ -1159,7 +1159,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder, | |||
1159 | dp_lane_count = dig_connector->dp_lane_count; | 1159 | dp_lane_count = dig_connector->dp_lane_count; |
1160 | connector_object_id = | 1160 | connector_object_id = |
1161 | (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | 1161 | (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; |
1162 | bpc = connector->display_info.bpc; | 1162 | /* bpc = connector->display_info.bpc; */ |
1163 | } | 1163 | } |
1164 | 1164 | ||
1165 | memset(&args, 0, sizeof(args)); | 1165 | memset(&args, 0, sizeof(args)); |
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index a58b37a2e65a..70089d32b80f 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
@@ -80,6 +80,9 @@ struct evergreen_cs_track { | |||
80 | bool cb_dirty; | 80 | bool cb_dirty; |
81 | bool db_dirty; | 81 | bool db_dirty; |
82 | bool streamout_dirty; | 82 | bool streamout_dirty; |
83 | u32 htile_offset; | ||
84 | u32 htile_surface; | ||
85 | struct radeon_bo *htile_bo; | ||
83 | }; | 86 | }; |
84 | 87 | ||
85 | static u32 evergreen_cs_get_aray_mode(u32 tiling_flags) | 88 | static u32 evergreen_cs_get_aray_mode(u32 tiling_flags) |
@@ -144,6 +147,9 @@ static void evergreen_cs_track_init(struct evergreen_cs_track *track) | |||
144 | track->db_s_read_bo = NULL; | 147 | track->db_s_read_bo = NULL; |
145 | track->db_s_write_bo = NULL; | 148 | track->db_s_write_bo = NULL; |
146 | track->db_dirty = true; | 149 | track->db_dirty = true; |
150 | track->htile_bo = NULL; | ||
151 | track->htile_offset = 0xFFFFFFFF; | ||
152 | track->htile_surface = 0; | ||
147 | 153 | ||
148 | for (i = 0; i < 4; i++) { | 154 | for (i = 0; i < 4; i++) { |
149 | track->vgt_strmout_size[i] = 0; | 155 | track->vgt_strmout_size[i] = 0; |
@@ -444,6 +450,62 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i | |||
444 | return 0; | 450 | return 0; |
445 | } | 451 | } |
446 | 452 | ||
453 | static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p, | ||
454 | unsigned nbx, unsigned nby) | ||
455 | { | ||
456 | struct evergreen_cs_track *track = p->track; | ||
457 | unsigned long size; | ||
458 | |||
459 | if (track->htile_bo == NULL) { | ||
460 | dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n", | ||
461 | __func__, __LINE__, track->db_z_info); | ||
462 | return -EINVAL; | ||
463 | } | ||
464 | |||
465 | if (G_028ABC_LINEAR(track->htile_surface)) { | ||
466 | /* pitch must be 16 htiles aligned == 16 * 8 pixel aligned */ | ||
467 | nbx = round_up(nbx, 16 * 8); | ||
468 | /* height is npipes htiles aligned == npipes * 8 pixel aligned */ | ||
469 | nby = round_up(nby, track->npipes * 8); | ||
470 | } else { | ||
471 | switch (track->npipes) { | ||
472 | case 8: | ||
473 | nbx = round_up(nbx, 64 * 8); | ||
474 | nby = round_up(nby, 64 * 8); | ||
475 | break; | ||
476 | case 4: | ||
477 | nbx = round_up(nbx, 64 * 8); | ||
478 | nby = round_up(nby, 32 * 8); | ||
479 | break; | ||
480 | case 2: | ||
481 | nbx = round_up(nbx, 32 * 8); | ||
482 | nby = round_up(nby, 32 * 8); | ||
483 | break; | ||
484 | case 1: | ||
485 | nbx = round_up(nbx, 32 * 8); | ||
486 | nby = round_up(nby, 16 * 8); | ||
487 | break; | ||
488 | default: | ||
489 | dev_warn(p->dev, "%s:%d invalid num pipes %d\n", | ||
490 | __func__, __LINE__, track->npipes); | ||
491 | return -EINVAL; | ||
492 | } | ||
493 | } | ||
494 | /* compute number of htile */ | ||
495 | nbx = nbx / 8; | ||
496 | nby = nby / 8; | ||
497 | size = nbx * nby * 4; | ||
498 | size += track->htile_offset; | ||
499 | |||
500 | if (size > radeon_bo_size(track->htile_bo)) { | ||
501 | dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n", | ||
502 | __func__, __LINE__, radeon_bo_size(track->htile_bo), | ||
503 | size, nbx, nby); | ||
504 | return -EINVAL; | ||
505 | } | ||
506 | return 0; | ||
507 | } | ||
508 | |||
447 | static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p) | 509 | static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p) |
448 | { | 510 | { |
449 | struct evergreen_cs_track *track = p->track; | 511 | struct evergreen_cs_track *track = p->track; |
@@ -530,6 +592,14 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p) | |||
530 | return -EINVAL; | 592 | return -EINVAL; |
531 | } | 593 | } |
532 | 594 | ||
595 | /* hyperz */ | ||
596 | if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) { | ||
597 | r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby); | ||
598 | if (r) { | ||
599 | return r; | ||
600 | } | ||
601 | } | ||
602 | |||
533 | return 0; | 603 | return 0; |
534 | } | 604 | } |
535 | 605 | ||
@@ -617,6 +687,14 @@ static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p) | |||
617 | return -EINVAL; | 687 | return -EINVAL; |
618 | } | 688 | } |
619 | 689 | ||
690 | /* hyperz */ | ||
691 | if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) { | ||
692 | r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby); | ||
693 | if (r) { | ||
694 | return r; | ||
695 | } | ||
696 | } | ||
697 | |||
620 | return 0; | 698 | return 0; |
621 | } | 699 | } |
622 | 700 | ||
@@ -850,7 +928,7 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p) | |||
850 | return r; | 928 | return r; |
851 | } | 929 | } |
852 | /* Check depth buffer */ | 930 | /* Check depth buffer */ |
853 | if (G_028800_Z_WRITE_ENABLE(track->db_depth_control)) { | 931 | if (G_028800_Z_ENABLE(track->db_depth_control)) { |
854 | r = evergreen_cs_track_validate_depth(p); | 932 | r = evergreen_cs_track_validate_depth(p); |
855 | if (r) | 933 | if (r) |
856 | return r; | 934 | return r; |
@@ -1616,6 +1694,23 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1616 | track->cb_color_bo[tmp] = reloc->robj; | 1694 | track->cb_color_bo[tmp] = reloc->robj; |
1617 | track->cb_dirty = true; | 1695 | track->cb_dirty = true; |
1618 | break; | 1696 | break; |
1697 | case DB_HTILE_DATA_BASE: | ||
1698 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
1699 | if (r) { | ||
1700 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
1701 | "0x%04X\n", reg); | ||
1702 | return -EINVAL; | ||
1703 | } | ||
1704 | track->htile_offset = radeon_get_ib_value(p, idx); | ||
1705 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
1706 | track->htile_bo = reloc->robj; | ||
1707 | track->db_dirty = true; | ||
1708 | break; | ||
1709 | case DB_HTILE_SURFACE: | ||
1710 | /* 8x8 only */ | ||
1711 | track->htile_surface = radeon_get_ib_value(p, idx); | ||
1712 | track->db_dirty = true; | ||
1713 | break; | ||
1619 | case CB_IMMED0_BASE: | 1714 | case CB_IMMED0_BASE: |
1620 | case CB_IMMED1_BASE: | 1715 | case CB_IMMED1_BASE: |
1621 | case CB_IMMED2_BASE: | 1716 | case CB_IMMED2_BASE: |
@@ -1628,7 +1723,6 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1628 | case CB_IMMED9_BASE: | 1723 | case CB_IMMED9_BASE: |
1629 | case CB_IMMED10_BASE: | 1724 | case CB_IMMED10_BASE: |
1630 | case CB_IMMED11_BASE: | 1725 | case CB_IMMED11_BASE: |
1631 | case DB_HTILE_DATA_BASE: | ||
1632 | case SQ_PGM_START_FS: | 1726 | case SQ_PGM_START_FS: |
1633 | case SQ_PGM_START_ES: | 1727 | case SQ_PGM_START_ES: |
1634 | case SQ_PGM_START_VS: | 1728 | case SQ_PGM_START_VS: |
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index eb5708c7159d..b4eefc355f16 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
@@ -991,6 +991,14 @@ | |||
991 | #define G_028008_SLICE_MAX(x) (((x) >> 13) & 0x7FF) | 991 | #define G_028008_SLICE_MAX(x) (((x) >> 13) & 0x7FF) |
992 | #define C_028008_SLICE_MAX 0xFF001FFF | 992 | #define C_028008_SLICE_MAX 0xFF001FFF |
993 | #define DB_HTILE_DATA_BASE 0x28014 | 993 | #define DB_HTILE_DATA_BASE 0x28014 |
994 | #define DB_HTILE_SURFACE 0x28abc | ||
995 | #define S_028ABC_HTILE_WIDTH(x) (((x) & 0x1) << 0) | ||
996 | #define G_028ABC_HTILE_WIDTH(x) (((x) >> 0) & 0x1) | ||
997 | #define C_028ABC_HTILE_WIDTH 0xFFFFFFFE | ||
998 | #define S_028ABC_HTILE_HEIGHT(x) (((x) & 0x1) << 1) | ||
999 | #define G_028ABC_HTILE_HEIGHT(x) (((x) >> 1) & 0x1) | ||
1000 | #define C_028ABC_HTILE_HEIGHT 0xFFFFFFFD | ||
1001 | #define G_028ABC_LINEAR(x) (((x) >> 2) & 0x1) | ||
994 | #define DB_Z_INFO 0x28040 | 1002 | #define DB_Z_INFO 0x28040 |
995 | # define Z_ARRAY_MODE(x) ((x) << 4) | 1003 | # define Z_ARRAY_MODE(x) ((x) << 4) |
996 | # define DB_TILE_SPLIT(x) (((x) & 0x7) << 8) | 1004 | # define DB_TILE_SPLIT(x) (((x) & 0x7) << 8) |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 0ec3f205f9c4..b8e12af304a9 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -78,6 +78,9 @@ struct r600_cs_track { | |||
78 | bool cb_dirty; | 78 | bool cb_dirty; |
79 | bool db_dirty; | 79 | bool db_dirty; |
80 | bool streamout_dirty; | 80 | bool streamout_dirty; |
81 | struct radeon_bo *htile_bo; | ||
82 | u64 htile_offset; | ||
83 | u32 htile_surface; | ||
81 | }; | 84 | }; |
82 | 85 | ||
83 | #define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 } | 86 | #define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 } |
@@ -321,6 +324,9 @@ static void r600_cs_track_init(struct r600_cs_track *track) | |||
321 | track->db_depth_size_idx = 0; | 324 | track->db_depth_size_idx = 0; |
322 | track->db_depth_control = 0xFFFFFFFF; | 325 | track->db_depth_control = 0xFFFFFFFF; |
323 | track->db_dirty = true; | 326 | track->db_dirty = true; |
327 | track->htile_bo = NULL; | ||
328 | track->htile_offset = 0xFFFFFFFF; | ||
329 | track->htile_surface = 0; | ||
324 | 330 | ||
325 | for (i = 0; i < 4; i++) { | 331 | for (i = 0; i < 4; i++) { |
326 | track->vgt_strmout_size[i] = 0; | 332 | track->vgt_strmout_size[i] = 0; |
@@ -455,12 +461,256 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
455 | return 0; | 461 | return 0; |
456 | } | 462 | } |
457 | 463 | ||
464 | static int r600_cs_track_validate_db(struct radeon_cs_parser *p) | ||
465 | { | ||
466 | struct r600_cs_track *track = p->track; | ||
467 | u32 nviews, bpe, ntiles, size, slice_tile_max, tmp; | ||
468 | u32 height_align, pitch_align, depth_align; | ||
469 | u32 pitch = 8192; | ||
470 | u32 height = 8192; | ||
471 | u64 base_offset, base_align; | ||
472 | struct array_mode_checker array_check; | ||
473 | int array_mode; | ||
474 | volatile u32 *ib = p->ib->ptr; | ||
475 | |||
476 | |||
477 | if (track->db_bo == NULL) { | ||
478 | dev_warn(p->dev, "z/stencil with no depth buffer\n"); | ||
479 | return -EINVAL; | ||
480 | } | ||
481 | switch (G_028010_FORMAT(track->db_depth_info)) { | ||
482 | case V_028010_DEPTH_16: | ||
483 | bpe = 2; | ||
484 | break; | ||
485 | case V_028010_DEPTH_X8_24: | ||
486 | case V_028010_DEPTH_8_24: | ||
487 | case V_028010_DEPTH_X8_24_FLOAT: | ||
488 | case V_028010_DEPTH_8_24_FLOAT: | ||
489 | case V_028010_DEPTH_32_FLOAT: | ||
490 | bpe = 4; | ||
491 | break; | ||
492 | case V_028010_DEPTH_X24_8_32_FLOAT: | ||
493 | bpe = 8; | ||
494 | break; | ||
495 | default: | ||
496 | dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info)); | ||
497 | return -EINVAL; | ||
498 | } | ||
499 | if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { | ||
500 | if (!track->db_depth_size_idx) { | ||
501 | dev_warn(p->dev, "z/stencil buffer size not set\n"); | ||
502 | return -EINVAL; | ||
503 | } | ||
504 | tmp = radeon_bo_size(track->db_bo) - track->db_offset; | ||
505 | tmp = (tmp / bpe) >> 6; | ||
506 | if (!tmp) { | ||
507 | dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n", | ||
508 | track->db_depth_size, bpe, track->db_offset, | ||
509 | radeon_bo_size(track->db_bo)); | ||
510 | return -EINVAL; | ||
511 | } | ||
512 | ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); | ||
513 | } else { | ||
514 | size = radeon_bo_size(track->db_bo); | ||
515 | /* pitch in pixels */ | ||
516 | pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8; | ||
517 | slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; | ||
518 | slice_tile_max *= 64; | ||
519 | height = slice_tile_max / pitch; | ||
520 | if (height > 8192) | ||
521 | height = 8192; | ||
522 | base_offset = track->db_bo_mc + track->db_offset; | ||
523 | array_mode = G_028010_ARRAY_MODE(track->db_depth_info); | ||
524 | array_check.array_mode = array_mode; | ||
525 | array_check.group_size = track->group_size; | ||
526 | array_check.nbanks = track->nbanks; | ||
527 | array_check.npipes = track->npipes; | ||
528 | array_check.nsamples = track->nsamples; | ||
529 | array_check.blocksize = bpe; | ||
530 | if (r600_get_array_mode_alignment(&array_check, | ||
531 | &pitch_align, &height_align, &depth_align, &base_align)) { | ||
532 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, | ||
533 | G_028010_ARRAY_MODE(track->db_depth_info), | ||
534 | track->db_depth_info); | ||
535 | return -EINVAL; | ||
536 | } | ||
537 | switch (array_mode) { | ||
538 | case V_028010_ARRAY_1D_TILED_THIN1: | ||
539 | /* don't break userspace */ | ||
540 | height &= ~0x7; | ||
541 | break; | ||
542 | case V_028010_ARRAY_2D_TILED_THIN1: | ||
543 | break; | ||
544 | default: | ||
545 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, | ||
546 | G_028010_ARRAY_MODE(track->db_depth_info), | ||
547 | track->db_depth_info); | ||
548 | return -EINVAL; | ||
549 | } | ||
550 | |||
551 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
552 | dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n", | ||
553 | __func__, __LINE__, pitch, pitch_align, array_mode); | ||
554 | return -EINVAL; | ||
555 | } | ||
556 | if (!IS_ALIGNED(height, height_align)) { | ||
557 | dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n", | ||
558 | __func__, __LINE__, height, height_align, array_mode); | ||
559 | return -EINVAL; | ||
560 | } | ||
561 | if (!IS_ALIGNED(base_offset, base_align)) { | ||
562 | dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__, | ||
563 | base_offset, base_align, array_mode); | ||
564 | return -EINVAL; | ||
565 | } | ||
566 | |||
567 | ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; | ||
568 | nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; | ||
569 | tmp = ntiles * bpe * 64 * nviews; | ||
570 | if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { | ||
571 | dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", | ||
572 | array_mode, | ||
573 | track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, | ||
574 | radeon_bo_size(track->db_bo)); | ||
575 | return -EINVAL; | ||
576 | } | ||
577 | } | ||
578 | |||
579 | /* hyperz */ | ||
580 | if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) { | ||
581 | unsigned long size; | ||
582 | unsigned nbx, nby; | ||
583 | |||
584 | if (track->htile_bo == NULL) { | ||
585 | dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n", | ||
586 | __func__, __LINE__, track->db_depth_info); | ||
587 | return -EINVAL; | ||
588 | } | ||
589 | if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { | ||
590 | dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n", | ||
591 | __func__, __LINE__, track->db_depth_size); | ||
592 | return -EINVAL; | ||
593 | } | ||
594 | |||
595 | nbx = pitch; | ||
596 | nby = height; | ||
597 | if (G_028D24_LINEAR(track->htile_surface)) { | ||
598 | /* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */ | ||
599 | nbx = round_up(nbx, 16 * 8); | ||
600 | /* nby is npipes htiles aligned == npipes * 8 pixel aligned */ | ||
601 | nby = round_up(nby, track->npipes * 8); | ||
602 | } else { | ||
603 | /* htile widht & nby (8 or 4) make 2 bits number */ | ||
604 | tmp = track->htile_surface & 3; | ||
605 | /* align is htile align * 8, htile align vary according to | ||
606 | * number of pipe and tile width and nby | ||
607 | */ | ||
608 | switch (track->npipes) { | ||
609 | case 8: | ||
610 | switch (tmp) { | ||
611 | case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ | ||
612 | nbx = round_up(nbx, 64 * 8); | ||
613 | nby = round_up(nby, 64 * 8); | ||
614 | break; | ||
615 | case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/ | ||
616 | case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/ | ||
617 | nbx = round_up(nbx, 64 * 8); | ||
618 | nby = round_up(nby, 32 * 8); | ||
619 | break; | ||
620 | case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/ | ||
621 | nbx = round_up(nbx, 32 * 8); | ||
622 | nby = round_up(nby, 32 * 8); | ||
623 | break; | ||
624 | default: | ||
625 | return -EINVAL; | ||
626 | } | ||
627 | break; | ||
628 | case 4: | ||
629 | switch (tmp) { | ||
630 | case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ | ||
631 | nbx = round_up(nbx, 64 * 8); | ||
632 | nby = round_up(nby, 32 * 8); | ||
633 | break; | ||
634 | case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/ | ||
635 | case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/ | ||
636 | nbx = round_up(nbx, 32 * 8); | ||
637 | nby = round_up(nby, 32 * 8); | ||
638 | break; | ||
639 | case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/ | ||
640 | nbx = round_up(nbx, 32 * 8); | ||
641 | nby = round_up(nby, 16 * 8); | ||
642 | break; | ||
643 | default: | ||
644 | return -EINVAL; | ||
645 | } | ||
646 | break; | ||
647 | case 2: | ||
648 | switch (tmp) { | ||
649 | case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ | ||
650 | nbx = round_up(nbx, 32 * 8); | ||
651 | nby = round_up(nby, 32 * 8); | ||
652 | break; | ||
653 | case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/ | ||
654 | case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/ | ||
655 | nbx = round_up(nbx, 32 * 8); | ||
656 | nby = round_up(nby, 16 * 8); | ||
657 | break; | ||
658 | case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/ | ||
659 | nbx = round_up(nbx, 16 * 8); | ||
660 | nby = round_up(nby, 16 * 8); | ||
661 | break; | ||
662 | default: | ||
663 | return -EINVAL; | ||
664 | } | ||
665 | break; | ||
666 | case 1: | ||
667 | switch (tmp) { | ||
668 | case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ | ||
669 | nbx = round_up(nbx, 32 * 8); | ||
670 | nby = round_up(nby, 16 * 8); | ||
671 | break; | ||
672 | case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/ | ||
673 | case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/ | ||
674 | nbx = round_up(nbx, 16 * 8); | ||
675 | nby = round_up(nby, 16 * 8); | ||
676 | break; | ||
677 | case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/ | ||
678 | nbx = round_up(nbx, 16 * 8); | ||
679 | nby = round_up(nby, 8 * 8); | ||
680 | break; | ||
681 | default: | ||
682 | return -EINVAL; | ||
683 | } | ||
684 | break; | ||
685 | default: | ||
686 | dev_warn(p->dev, "%s:%d invalid num pipes %d\n", | ||
687 | __func__, __LINE__, track->npipes); | ||
688 | return -EINVAL; | ||
689 | } | ||
690 | } | ||
691 | /* compute number of htile */ | ||
692 | nbx = G_028D24_HTILE_WIDTH(track->htile_surface) ? nbx / 8 : nbx / 4; | ||
693 | nby = G_028D24_HTILE_HEIGHT(track->htile_surface) ? nby / 8 : nby / 4; | ||
694 | size = nbx * nby * 4; | ||
695 | size += track->htile_offset; | ||
696 | |||
697 | if (size > radeon_bo_size(track->htile_bo)) { | ||
698 | dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n", | ||
699 | __func__, __LINE__, radeon_bo_size(track->htile_bo), | ||
700 | size, nbx, nby); | ||
701 | return -EINVAL; | ||
702 | } | ||
703 | } | ||
704 | |||
705 | track->db_dirty = false; | ||
706 | return 0; | ||
707 | } | ||
708 | |||
458 | static int r600_cs_track_check(struct radeon_cs_parser *p) | 709 | static int r600_cs_track_check(struct radeon_cs_parser *p) |
459 | { | 710 | { |
460 | struct r600_cs_track *track = p->track; | 711 | struct r600_cs_track *track = p->track; |
461 | u32 tmp; | 712 | u32 tmp; |
462 | int r, i; | 713 | int r, i; |
463 | volatile u32 *ib = p->ib->ptr; | ||
464 | 714 | ||
465 | /* on legacy kernel we don't perform advanced check */ | 715 | /* on legacy kernel we don't perform advanced check */ |
466 | if (p->rdev == NULL) | 716 | if (p->rdev == NULL) |
@@ -513,124 +763,14 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
513 | track->cb_dirty = false; | 763 | track->cb_dirty = false; |
514 | } | 764 | } |
515 | 765 | ||
516 | if (track->db_dirty) { | 766 | /* Check depth buffer */ |
517 | /* Check depth buffer */ | 767 | if (track->db_dirty && (G_028800_STENCIL_ENABLE(track->db_depth_control) || |
518 | if (G_028800_STENCIL_ENABLE(track->db_depth_control) || | 768 | G_028800_Z_ENABLE(track->db_depth_control))) { |
519 | G_028800_Z_ENABLE(track->db_depth_control)) { | 769 | r = r600_cs_track_validate_db(p); |
520 | u32 nviews, bpe, ntiles, size, slice_tile_max; | 770 | if (r) |
521 | u32 height, height_align, pitch, pitch_align, depth_align; | 771 | return r; |
522 | u64 base_offset, base_align; | ||
523 | struct array_mode_checker array_check; | ||
524 | int array_mode; | ||
525 | |||
526 | if (track->db_bo == NULL) { | ||
527 | dev_warn(p->dev, "z/stencil with no depth buffer\n"); | ||
528 | return -EINVAL; | ||
529 | } | ||
530 | if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) { | ||
531 | dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n"); | ||
532 | return -EINVAL; | ||
533 | } | ||
534 | switch (G_028010_FORMAT(track->db_depth_info)) { | ||
535 | case V_028010_DEPTH_16: | ||
536 | bpe = 2; | ||
537 | break; | ||
538 | case V_028010_DEPTH_X8_24: | ||
539 | case V_028010_DEPTH_8_24: | ||
540 | case V_028010_DEPTH_X8_24_FLOAT: | ||
541 | case V_028010_DEPTH_8_24_FLOAT: | ||
542 | case V_028010_DEPTH_32_FLOAT: | ||
543 | bpe = 4; | ||
544 | break; | ||
545 | case V_028010_DEPTH_X24_8_32_FLOAT: | ||
546 | bpe = 8; | ||
547 | break; | ||
548 | default: | ||
549 | dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info)); | ||
550 | return -EINVAL; | ||
551 | } | ||
552 | if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { | ||
553 | if (!track->db_depth_size_idx) { | ||
554 | dev_warn(p->dev, "z/stencil buffer size not set\n"); | ||
555 | return -EINVAL; | ||
556 | } | ||
557 | tmp = radeon_bo_size(track->db_bo) - track->db_offset; | ||
558 | tmp = (tmp / bpe) >> 6; | ||
559 | if (!tmp) { | ||
560 | dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n", | ||
561 | track->db_depth_size, bpe, track->db_offset, | ||
562 | radeon_bo_size(track->db_bo)); | ||
563 | return -EINVAL; | ||
564 | } | ||
565 | ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); | ||
566 | } else { | ||
567 | size = radeon_bo_size(track->db_bo); | ||
568 | /* pitch in pixels */ | ||
569 | pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8; | ||
570 | slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; | ||
571 | slice_tile_max *= 64; | ||
572 | height = slice_tile_max / pitch; | ||
573 | if (height > 8192) | ||
574 | height = 8192; | ||
575 | base_offset = track->db_bo_mc + track->db_offset; | ||
576 | array_mode = G_028010_ARRAY_MODE(track->db_depth_info); | ||
577 | array_check.array_mode = array_mode; | ||
578 | array_check.group_size = track->group_size; | ||
579 | array_check.nbanks = track->nbanks; | ||
580 | array_check.npipes = track->npipes; | ||
581 | array_check.nsamples = track->nsamples; | ||
582 | array_check.blocksize = bpe; | ||
583 | if (r600_get_array_mode_alignment(&array_check, | ||
584 | &pitch_align, &height_align, &depth_align, &base_align)) { | ||
585 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, | ||
586 | G_028010_ARRAY_MODE(track->db_depth_info), | ||
587 | track->db_depth_info); | ||
588 | return -EINVAL; | ||
589 | } | ||
590 | switch (array_mode) { | ||
591 | case V_028010_ARRAY_1D_TILED_THIN1: | ||
592 | /* don't break userspace */ | ||
593 | height &= ~0x7; | ||
594 | break; | ||
595 | case V_028010_ARRAY_2D_TILED_THIN1: | ||
596 | break; | ||
597 | default: | ||
598 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, | ||
599 | G_028010_ARRAY_MODE(track->db_depth_info), | ||
600 | track->db_depth_info); | ||
601 | return -EINVAL; | ||
602 | } | ||
603 | |||
604 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
605 | dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n", | ||
606 | __func__, __LINE__, pitch, pitch_align, array_mode); | ||
607 | return -EINVAL; | ||
608 | } | ||
609 | if (!IS_ALIGNED(height, height_align)) { | ||
610 | dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n", | ||
611 | __func__, __LINE__, height, height_align, array_mode); | ||
612 | return -EINVAL; | ||
613 | } | ||
614 | if (!IS_ALIGNED(base_offset, base_align)) { | ||
615 | dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i, | ||
616 | base_offset, base_align, array_mode); | ||
617 | return -EINVAL; | ||
618 | } | ||
619 | |||
620 | ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; | ||
621 | nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; | ||
622 | tmp = ntiles * bpe * 64 * nviews; | ||
623 | if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { | ||
624 | dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", | ||
625 | array_mode, | ||
626 | track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, | ||
627 | radeon_bo_size(track->db_bo)); | ||
628 | return -EINVAL; | ||
629 | } | ||
630 | } | ||
631 | } | ||
632 | track->db_dirty = false; | ||
633 | } | 772 | } |
773 | |||
634 | return 0; | 774 | return 0; |
635 | } | 775 | } |
636 | 776 | ||
@@ -1244,6 +1384,21 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1244 | track->db_dirty = true; | 1384 | track->db_dirty = true; |
1245 | break; | 1385 | break; |
1246 | case DB_HTILE_DATA_BASE: | 1386 | case DB_HTILE_DATA_BASE: |
1387 | r = r600_cs_packet_next_reloc(p, &reloc); | ||
1388 | if (r) { | ||
1389 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
1390 | "0x%04X\n", reg); | ||
1391 | return -EINVAL; | ||
1392 | } | ||
1393 | track->htile_offset = radeon_get_ib_value(p, idx) << 8; | ||
1394 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
1395 | track->htile_bo = reloc->robj; | ||
1396 | track->db_dirty = true; | ||
1397 | break; | ||
1398 | case DB_HTILE_SURFACE: | ||
1399 | track->htile_surface = radeon_get_ib_value(p, idx); | ||
1400 | track->db_dirty = true; | ||
1401 | break; | ||
1247 | case SQ_PGM_START_FS: | 1402 | case SQ_PGM_START_FS: |
1248 | case SQ_PGM_START_ES: | 1403 | case SQ_PGM_START_ES: |
1249 | case SQ_PGM_START_VS: | 1404 | case SQ_PGM_START_VS: |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 3568a2e345fa..59f9c993cc31 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -195,6 +195,14 @@ | |||
195 | #define PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31) | 195 | #define PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31) |
196 | #define DB_DEPTH_BASE 0x2800C | 196 | #define DB_DEPTH_BASE 0x2800C |
197 | #define DB_HTILE_DATA_BASE 0x28014 | 197 | #define DB_HTILE_DATA_BASE 0x28014 |
198 | #define DB_HTILE_SURFACE 0x28D24 | ||
199 | #define S_028D24_HTILE_WIDTH(x) (((x) & 0x1) << 0) | ||
200 | #define G_028D24_HTILE_WIDTH(x) (((x) >> 0) & 0x1) | ||
201 | #define C_028D24_HTILE_WIDTH 0xFFFFFFFE | ||
202 | #define S_028D24_HTILE_HEIGHT(x) (((x) & 0x1) << 1) | ||
203 | #define G_028D24_HTILE_HEIGHT(x) (((x) >> 1) & 0x1) | ||
204 | #define C_028D24_HTILE_HEIGHT 0xFFFFFFFD | ||
205 | #define G_028D24_LINEAR(x) (((x) >> 2) & 0x1) | ||
198 | #define DB_WATERMARKS 0x9838 | 206 | #define DB_WATERMARKS 0x9838 |
199 | #define DEPTH_FREE(x) ((x) << 0) | 207 | #define DEPTH_FREE(x) ((x) << 0) |
200 | #define DEPTH_FLUSH(x) ((x) << 5) | 208 | #define DEPTH_FLUSH(x) ((x) << 5) |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 91541e63d582..df6a4dbd93f8 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -233,7 +233,18 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, | |||
233 | bo->pin_count++; | 233 | bo->pin_count++; |
234 | if (gpu_addr) | 234 | if (gpu_addr) |
235 | *gpu_addr = radeon_bo_gpu_offset(bo); | 235 | *gpu_addr = radeon_bo_gpu_offset(bo); |
236 | WARN_ON_ONCE(max_offset != 0); | 236 | |
237 | if (max_offset != 0) { | ||
238 | u64 domain_start; | ||
239 | |||
240 | if (domain == RADEON_GEM_DOMAIN_VRAM) | ||
241 | domain_start = bo->rdev->mc.vram_start; | ||
242 | else | ||
243 | domain_start = bo->rdev->mc.gtt_start; | ||
244 | WARN_ON_ONCE(max_offset < | ||
245 | (radeon_bo_gpu_offset(bo) - domain_start)); | ||
246 | } | ||
247 | |||
237 | return 0; | 248 | return 0; |
238 | } | 249 | } |
239 | radeon_ttm_placement_from_domain(bo, domain); | 250 | radeon_ttm_placement_from_domain(bo, domain); |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman index aea63c415852..0f656b111c15 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/cayman +++ b/drivers/gpu/drm/radeon/reg_srcs/cayman | |||
@@ -509,7 +509,6 @@ cayman 0x9400 | |||
509 | 0x00028AA8 IA_MULTI_VGT_PARAM | 509 | 0x00028AA8 IA_MULTI_VGT_PARAM |
510 | 0x00028AB4 VGT_REUSE_OFF | 510 | 0x00028AB4 VGT_REUSE_OFF |
511 | 0x00028AB8 VGT_VTX_CNT_EN | 511 | 0x00028AB8 VGT_VTX_CNT_EN |
512 | 0x00028ABC DB_HTILE_SURFACE | ||
513 | 0x00028AC0 DB_SRESULTS_COMPARE_STATE0 | 512 | 0x00028AC0 DB_SRESULTS_COMPARE_STATE0 |
514 | 0x00028AC4 DB_SRESULTS_COMPARE_STATE1 | 513 | 0x00028AC4 DB_SRESULTS_COMPARE_STATE1 |
515 | 0x00028AC8 DB_PRELOAD_CONTROL | 514 | 0x00028AC8 DB_PRELOAD_CONTROL |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen index 77c37202376f..b912a37689bf 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/evergreen +++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen | |||
@@ -519,7 +519,6 @@ evergreen 0x9400 | |||
519 | 0x00028AA4 VGT_INSTANCE_STEP_RATE_1 | 519 | 0x00028AA4 VGT_INSTANCE_STEP_RATE_1 |
520 | 0x00028AB4 VGT_REUSE_OFF | 520 | 0x00028AB4 VGT_REUSE_OFF |
521 | 0x00028AB8 VGT_VTX_CNT_EN | 521 | 0x00028AB8 VGT_VTX_CNT_EN |
522 | 0x00028ABC DB_HTILE_SURFACE | ||
523 | 0x00028AC0 DB_SRESULTS_COMPARE_STATE0 | 522 | 0x00028AC0 DB_SRESULTS_COMPARE_STATE0 |
524 | 0x00028AC4 DB_SRESULTS_COMPARE_STATE1 | 523 | 0x00028AC4 DB_SRESULTS_COMPARE_STATE1 |
525 | 0x00028AC8 DB_PRELOAD_CONTROL | 524 | 0x00028AC8 DB_PRELOAD_CONTROL |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600 index 626c24ea0b56..5e659b034d9a 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r600 +++ b/drivers/gpu/drm/radeon/reg_srcs/r600 | |||
@@ -713,7 +713,6 @@ r600 0x9400 | |||
713 | 0x0000A710 TD_VS_SAMPLER17_BORDER_RED | 713 | 0x0000A710 TD_VS_SAMPLER17_BORDER_RED |
714 | 0x00009508 TA_CNTL_AUX | 714 | 0x00009508 TA_CNTL_AUX |
715 | 0x0002802C DB_DEPTH_CLEAR | 715 | 0x0002802C DB_DEPTH_CLEAR |
716 | 0x00028D24 DB_HTILE_SURFACE | ||
717 | 0x00028D34 DB_PREFETCH_LIMIT | 716 | 0x00028D34 DB_PREFETCH_LIMIT |
718 | 0x00028D30 DB_PRELOAD_CONTROL | 717 | 0x00028D30 DB_PRELOAD_CONTROL |
719 | 0x00028D0C DB_RENDER_CONTROL | 718 | 0x00028D0C DB_RENDER_CONTROL |
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 5340c5f3987b..53673907a6a0 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c | |||
@@ -47,7 +47,7 @@ static struct vm_operations_struct udl_gem_vm_ops = { | |||
47 | static const struct file_operations udl_driver_fops = { | 47 | static const struct file_operations udl_driver_fops = { |
48 | .owner = THIS_MODULE, | 48 | .owner = THIS_MODULE, |
49 | .open = drm_open, | 49 | .open = drm_open, |
50 | .mmap = drm_gem_mmap, | 50 | .mmap = udl_drm_gem_mmap, |
51 | .poll = drm_poll, | 51 | .poll = drm_poll, |
52 | .read = drm_read, | 52 | .read = drm_read, |
53 | .unlocked_ioctl = drm_ioctl, | 53 | .unlocked_ioctl = drm_ioctl, |
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index 1612954a5bc4..96820d03a303 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h | |||
@@ -121,6 +121,7 @@ struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, | |||
121 | 121 | ||
122 | int udl_gem_vmap(struct udl_gem_object *obj); | 122 | int udl_gem_vmap(struct udl_gem_object *obj); |
123 | void udl_gem_vunmap(struct udl_gem_object *obj); | 123 | void udl_gem_vunmap(struct udl_gem_object *obj); |
124 | int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); | ||
124 | int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 125 | int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
125 | 126 | ||
126 | int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, | 127 | int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, |
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 852642dc1187..92f19ef329b0 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c | |||
@@ -71,6 +71,20 @@ int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev, | |||
71 | return drm_gem_handle_delete(file, handle); | 71 | return drm_gem_handle_delete(file, handle); |
72 | } | 72 | } |
73 | 73 | ||
74 | int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | ||
75 | { | ||
76 | int ret; | ||
77 | |||
78 | ret = drm_gem_mmap(filp, vma); | ||
79 | if (ret) | ||
80 | return ret; | ||
81 | |||
82 | vma->vm_flags &= ~VM_PFNMAP; | ||
83 | vma->vm_flags |= VM_MIXEDMAP; | ||
84 | |||
85 | return ret; | ||
86 | } | ||
87 | |||
74 | int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 88 | int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
75 | { | 89 | { |
76 | struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data); | 90 | struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data); |