aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2012-05-08 07:39:59 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-05-08 07:39:59 -0400
commit5e13a0c5ec05d382b488a691dfb8af015b1dea1e (patch)
tree7a06dfa1f7661f8908193f2437b32452520221d3 /drivers/gpu/drm
parentb615b57a124a4af7b68196bc2fb8acc236041fa2 (diff)
parent4f256e8aa3eda15c11c3cec3ec5336e1fc579cbd (diff)
Merge remote-tracking branch 'airlied/drm-core-next' into drm-intel-next-queued
Backmerge of drm-next to resolve a few ugly conflicts and to get a few fixes from 3.4-rc6 (which drm-next has already merged). Note that this merge also restricts the stencil cache lra evict policy workaround to snb (as it should) - I had to frob the code anyway because the CM0_MASK_SHIFT define died in the masked bit cleanups. We need the backmerge to get Paulo Zanoni's infoframe regression fix for gm45 - further bugfixes from him touch the same area and would needlessly conflict. Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_bufs.c12
-rw-r--r--drivers/gpu/drm/drm_crtc.c28
-rw-r--r--drivers/gpu/drm/drm_fops.c6
-rw-r--r--drivers/gpu/drm/drm_usb.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c34
-rw-r--r--drivers/gpu/drm/gma500/Makefile4
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c117
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c7
-rw-r--r--drivers/gpu/drm/gma500/gtt.c14
-rw-r--r--drivers/gpu/drm/gma500/intel_opregion.c178
-rw-r--r--drivers/gpu/drm/gma500/mdfld_device.c4
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c1
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.h1
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c295
-rw-r--r--drivers/gpu/drm/gma500/oaktrail.h25
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c11
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c4
-rw-r--r--drivers/gpu/drm/gma500/opregion.c350
-rw-r--r--drivers/gpu/drm/gma500/opregion.h29
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c8
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h15
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_reg.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c6
-rw-r--r--drivers/gpu/drm/gma500/psb_lid.c2
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c9
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c29
-rw-r--r--drivers/gpu/drm/i915/intel_display.c23
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h4
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c10
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c10
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c34
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hdmi.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv10_gpio.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fb.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c22
-rw-r--r--drivers/gpu/drm/radeon/ni.c36
-rw-r--r--drivers/gpu/drm/radeon/r100.c77
-rw-r--r--drivers/gpu/drm/radeon/r300.c30
-rw-r--r--drivers/gpu/drm/radeon/r420.c7
-rw-r--r--drivers/gpu/drm/radeon/r520.c8
-rw-r--r--drivers/gpu/drm/radeon/r600.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon.h34
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c53
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c187
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c132
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c48
-rw-r--r--drivers/gpu/drm/radeon/rs400.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c8
-rw-r--r--drivers/gpu/drm/radeon/rs690.c7
-rw-r--r--drivers/gpu/drm/radeon/rv515.c8
-rw-r--r--drivers/gpu/drm/radeon/rv770.c11
-rw-r--r--drivers/gpu/drm/radeon/si.c21
77 files changed, 1312 insertions, 970 deletions
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 30372f7b2d45..348b367debeb 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -1510,8 +1510,8 @@ int drm_freebufs(struct drm_device *dev, void *data,
1510 * \param arg pointer to a drm_buf_map structure. 1510 * \param arg pointer to a drm_buf_map structure.
1511 * \return zero on success or a negative number on failure. 1511 * \return zero on success or a negative number on failure.
1512 * 1512 *
1513 * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information 1513 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1514 * about each buffer into user space. For PCI buffers, it calls do_mmap() with 1514 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1515 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls 1515 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1516 * drm_mmap_dma(). 1516 * drm_mmap_dma().
1517 */ 1517 */
@@ -1553,18 +1553,14 @@ int drm_mapbufs(struct drm_device *dev, void *data,
1553 retcode = -EINVAL; 1553 retcode = -EINVAL;
1554 goto done; 1554 goto done;
1555 } 1555 }
1556 down_write(&current->mm->mmap_sem); 1556 virtual = vm_mmap(file_priv->filp, 0, map->size,
1557 virtual = do_mmap(file_priv->filp, 0, map->size,
1558 PROT_READ | PROT_WRITE, 1557 PROT_READ | PROT_WRITE,
1559 MAP_SHARED, 1558 MAP_SHARED,
1560 token); 1559 token);
1561 up_write(&current->mm->mmap_sem);
1562 } else { 1560 } else {
1563 down_write(&current->mm->mmap_sem); 1561 virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
1564 virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
1565 PROT_READ | PROT_WRITE, 1562 PROT_READ | PROT_WRITE,
1566 MAP_SHARED, 0); 1563 MAP_SHARED, 0);
1567 up_write(&current->mm->mmap_sem);
1568 } 1564 }
1569 if (virtual > -1024UL) { 1565 if (virtual > -1024UL) {
1570 /* Real error */ 1566 /* Real error */
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index a9ca1b80fc28..ee63a123235c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2104,7 +2104,7 @@ int drm_mode_addfb(struct drm_device *dev,
2104 2104
2105 fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r); 2105 fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
2106 if (IS_ERR(fb)) { 2106 if (IS_ERR(fb)) {
2107 DRM_ERROR("could not create framebuffer\n"); 2107 DRM_DEBUG_KMS("could not create framebuffer\n");
2108 ret = PTR_ERR(fb); 2108 ret = PTR_ERR(fb);
2109 goto out; 2109 goto out;
2110 } 2110 }
@@ -2193,7 +2193,7 @@ static int framebuffer_check(struct drm_mode_fb_cmd2 *r)
2193 2193
2194 ret = format_check(r); 2194 ret = format_check(r);
2195 if (ret) { 2195 if (ret) {
2196 DRM_ERROR("bad framebuffer format 0x%08x\n", r->pixel_format); 2196 DRM_DEBUG_KMS("bad framebuffer format 0x%08x\n", r->pixel_format);
2197 return ret; 2197 return ret;
2198 } 2198 }
2199 2199
@@ -2202,12 +2202,12 @@ static int framebuffer_check(struct drm_mode_fb_cmd2 *r)
2202 num_planes = drm_format_num_planes(r->pixel_format); 2202 num_planes = drm_format_num_planes(r->pixel_format);
2203 2203
2204 if (r->width == 0 || r->width % hsub) { 2204 if (r->width == 0 || r->width % hsub) {
2205 DRM_ERROR("bad framebuffer width %u\n", r->height); 2205 DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height);
2206 return -EINVAL; 2206 return -EINVAL;
2207 } 2207 }
2208 2208
2209 if (r->height == 0 || r->height % vsub) { 2209 if (r->height == 0 || r->height % vsub) {
2210 DRM_ERROR("bad framebuffer height %u\n", r->height); 2210 DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
2211 return -EINVAL; 2211 return -EINVAL;
2212 } 2212 }
2213 2213
@@ -2215,12 +2215,12 @@ static int framebuffer_check(struct drm_mode_fb_cmd2 *r)
2215 unsigned int width = r->width / (i != 0 ? hsub : 1); 2215 unsigned int width = r->width / (i != 0 ? hsub : 1);
2216 2216
2217 if (!r->handles[i]) { 2217 if (!r->handles[i]) {
2218 DRM_ERROR("no buffer object handle for plane %d\n", i); 2218 DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
2219 return -EINVAL; 2219 return -EINVAL;
2220 } 2220 }
2221 2221
2222 if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) { 2222 if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) {
2223 DRM_ERROR("bad pitch %u for plane %d\n", r->pitches[i], i); 2223 DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
2224 return -EINVAL; 2224 return -EINVAL;
2225 } 2225 }
2226 } 2226 }
@@ -2257,12 +2257,12 @@ int drm_mode_addfb2(struct drm_device *dev,
2257 return -EINVAL; 2257 return -EINVAL;
2258 2258
2259 if ((config->min_width > r->width) || (r->width > config->max_width)) { 2259 if ((config->min_width > r->width) || (r->width > config->max_width)) {
2260 DRM_ERROR("bad framebuffer width %d, should be >= %d && <= %d\n", 2260 DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
2261 r->width, config->min_width, config->max_width); 2261 r->width, config->min_width, config->max_width);
2262 return -EINVAL; 2262 return -EINVAL;
2263 } 2263 }
2264 if ((config->min_height > r->height) || (r->height > config->max_height)) { 2264 if ((config->min_height > r->height) || (r->height > config->max_height)) {
2265 DRM_ERROR("bad framebuffer height %d, should be >= %d && <= %d\n", 2265 DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
2266 r->height, config->min_height, config->max_height); 2266 r->height, config->min_height, config->max_height);
2267 return -EINVAL; 2267 return -EINVAL;
2268 } 2268 }
@@ -2275,7 +2275,7 @@ int drm_mode_addfb2(struct drm_device *dev,
2275 2275
2276 fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); 2276 fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
2277 if (IS_ERR(fb)) { 2277 if (IS_ERR(fb)) {
2278 DRM_ERROR("could not create framebuffer\n"); 2278 DRM_DEBUG_KMS("could not create framebuffer\n");
2279 ret = PTR_ERR(fb); 2279 ret = PTR_ERR(fb);
2280 goto out; 2280 goto out;
2281 } 2281 }
@@ -3376,10 +3376,12 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3376 3376
3377 ret = crtc->funcs->page_flip(crtc, fb, e); 3377 ret = crtc->funcs->page_flip(crtc, fb, e);
3378 if (ret) { 3378 if (ret) {
3379 spin_lock_irqsave(&dev->event_lock, flags); 3379 if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
3380 file_priv->event_space += sizeof e->event; 3380 spin_lock_irqsave(&dev->event_lock, flags);
3381 spin_unlock_irqrestore(&dev->event_lock, flags); 3381 file_priv->event_space += sizeof e->event;
3382 kfree(e); 3382 spin_unlock_irqrestore(&dev->event_lock, flags);
3383 kfree(e);
3384 }
3383 } 3385 }
3384 3386
3385out: 3387out:
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index cdfbf27b2b3c..123de28f94ef 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -507,12 +507,12 @@ int drm_release(struct inode *inode, struct file *filp)
507 507
508 drm_events_release(file_priv); 508 drm_events_release(file_priv);
509 509
510 if (dev->driver->driver_features & DRIVER_GEM)
511 drm_gem_release(dev, file_priv);
512
513 if (dev->driver->driver_features & DRIVER_MODESET) 510 if (dev->driver->driver_features & DRIVER_MODESET)
514 drm_fb_release(file_priv); 511 drm_fb_release(file_priv);
515 512
513 if (dev->driver->driver_features & DRIVER_GEM)
514 drm_gem_release(dev, file_priv);
515
516 mutex_lock(&dev->ctxlist_mutex); 516 mutex_lock(&dev->ctxlist_mutex);
517 if (!list_empty(&dev->ctxlist)) { 517 if (!list_empty(&dev->ctxlist)) {
518 struct drm_ctx_list *pos, *n; 518 struct drm_ctx_list *pos, *n;
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index c8c83dad2ce1..37c9a523dd1c 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -1,6 +1,6 @@
1#include "drmP.h" 1#include "drmP.h"
2#include <linux/usb.h> 2#include <linux/usb.h>
3#include <linux/export.h> 3#include <linux/module.h>
4 4
5int drm_get_usb_dev(struct usb_interface *interface, 5int drm_get_usb_dev(struct usb_interface *interface,
6 const struct usb_device_id *id, 6 const struct usb_device_id *id,
@@ -114,3 +114,7 @@ void drm_usb_exit(struct drm_driver *driver,
114 usb_deregister(udriver); 114 usb_deregister(udriver);
115} 115}
116EXPORT_SYMBOL(drm_usb_exit); 116EXPORT_SYMBOL(drm_usb_exit);
117
118MODULE_AUTHOR("David Airlie");
119MODULE_DESCRIPTION("USB DRM support");
120MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 26d51979116b..1dffa8359f88 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -149,22 +149,12 @@ static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
149 unsigned long pfn; 149 unsigned long pfn;
150 150
151 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { 151 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
152 unsigned long usize = buf->size;
153
154 if (!buf->pages) 152 if (!buf->pages)
155 return -EINTR; 153 return -EINTR;
156 154
157 while (usize > 0) { 155 pfn = page_to_pfn(buf->pages[page_offset++]);
158 pfn = page_to_pfn(buf->pages[page_offset++]); 156 } else
159 vm_insert_mixed(vma, f_vaddr, pfn); 157 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
160 f_vaddr += PAGE_SIZE;
161 usize -= PAGE_SIZE;
162 }
163
164 return 0;
165 }
166
167 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
168 158
169 return vm_insert_mixed(vma, f_vaddr, pfn); 159 return vm_insert_mixed(vma, f_vaddr, pfn);
170} 160}
@@ -524,6 +514,8 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
524 if (!buffer->pages) 514 if (!buffer->pages)
525 return -EINVAL; 515 return -EINVAL;
526 516
517 vma->vm_flags |= VM_MIXEDMAP;
518
527 do { 519 do {
528 ret = vm_insert_page(vma, uaddr, buffer->pages[i++]); 520 ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
529 if (ret) { 521 if (ret) {
@@ -581,10 +573,8 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
581 obj->filp->f_op = &exynos_drm_gem_fops; 573 obj->filp->f_op = &exynos_drm_gem_fops;
582 obj->filp->private_data = obj; 574 obj->filp->private_data = obj;
583 575
584 down_write(&current->mm->mmap_sem); 576 addr = vm_mmap(obj->filp, 0, args->size,
585 addr = do_mmap(obj->filp, 0, args->size,
586 PROT_READ | PROT_WRITE, MAP_SHARED, 0); 577 PROT_READ | PROT_WRITE, MAP_SHARED, 0);
587 up_write(&current->mm->mmap_sem);
588 578
589 drm_gem_object_unreference_unlocked(obj); 579 drm_gem_object_unreference_unlocked(obj);
590 580
@@ -712,7 +702,6 @@ int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
712int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 702int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
713{ 703{
714 struct drm_gem_object *obj = vma->vm_private_data; 704 struct drm_gem_object *obj = vma->vm_private_data;
715 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
716 struct drm_device *dev = obj->dev; 705 struct drm_device *dev = obj->dev;
717 unsigned long f_vaddr; 706 unsigned long f_vaddr;
718 pgoff_t page_offset; 707 pgoff_t page_offset;
@@ -724,21 +713,10 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
724 713
725 mutex_lock(&dev->struct_mutex); 714 mutex_lock(&dev->struct_mutex);
726 715
727 /*
728 * allocate all pages as desired size if user wants to allocate
729 * physically non-continuous memory.
730 */
731 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
732 ret = exynos_drm_gem_get_pages(obj);
733 if (ret < 0)
734 goto err;
735 }
736
737 ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset); 716 ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
738 if (ret < 0) 717 if (ret < 0)
739 DRM_ERROR("failed to map pages.\n"); 718 DRM_ERROR("failed to map pages.\n");
740 719
741err:
742 mutex_unlock(&dev->struct_mutex); 720 mutex_unlock(&dev->struct_mutex);
743 721
744 return convert_to_vm_err_msg(ret); 722 return convert_to_vm_err_msg(ret);
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index 1583982917ce..dd7d6b57996f 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -1,7 +1,7 @@
1# 1#
2# KMS driver for the GMA500 2# KMS driver for the GMA500
3# 3#
4ccflags-y += -Iinclude/drm 4ccflags-y += -I$(srctree)/include/drm
5 5
6gma500_gfx-y += gem_glue.o \ 6gma500_gfx-y += gem_glue.o \
7 accel_2d.o \ 7 accel_2d.o \
@@ -12,8 +12,8 @@ gma500_gfx-y += gem_glue.o \
12 intel_bios.o \ 12 intel_bios.o \
13 intel_i2c.o \ 13 intel_i2c.o \
14 intel_gmbus.o \ 14 intel_gmbus.o \
15 intel_opregion.o \
16 mmu.o \ 15 mmu.o \
16 opregion.o \
17 power.o \ 17 power.o \
18 psb_drv.o \ 18 psb_drv.o \
19 psb_intel_display.o \ 19 psb_intel_display.o \
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 62f9b735459b..c10f02068d11 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -57,8 +57,7 @@ static int cdv_output_init(struct drm_device *dev)
57 cdv_intel_crt_init(dev, &dev_priv->mode_dev); 57 cdv_intel_crt_init(dev, &dev_priv->mode_dev);
58 cdv_intel_lvds_init(dev, &dev_priv->mode_dev); 58 cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
59 59
60 /* These bits indicate HDMI not SDVO on CDV, but we don't yet support 60 /* These bits indicate HDMI not SDVO on CDV */
61 the HDMI interface */
62 if (REG_READ(SDVOB) & SDVO_DETECTED) 61 if (REG_READ(SDVOB) & SDVO_DETECTED)
63 cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB); 62 cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
64 if (REG_READ(SDVOC) & SDVO_DETECTED) 63 if (REG_READ(SDVOC) & SDVO_DETECTED)
@@ -69,76 +68,71 @@ static int cdv_output_init(struct drm_device *dev)
69#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 68#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
70 69
71/* 70/*
72 * Poulsbo Backlight Interfaces 71 * Cedartrail Backlght Interfaces
73 */ 72 */
74 73
75#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
76#define BLC_PWM_FREQ_CALC_CONSTANT 32
77#define MHz 1000000
78
79#define PSB_BLC_PWM_PRECISION_FACTOR 10
80#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
81#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
82
83#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
84#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
85
86static int cdv_brightness;
87static struct backlight_device *cdv_backlight_device; 74static struct backlight_device *cdv_backlight_device;
88 75
89static int cdv_get_brightness(struct backlight_device *bd) 76static int cdv_backlight_combination_mode(struct drm_device *dev)
90{ 77{
91 /* return locally cached var instead of HW read (due to DPST etc.) */ 78 return REG_READ(BLC_PWM_CTL2) & PWM_LEGACY_MODE;
92 /* FIXME: ideally return actual value in case firmware fiddled with
93 it */
94 return cdv_brightness;
95} 79}
96 80
97 81static int cdv_get_brightness(struct backlight_device *bd)
98static int cdv_backlight_setup(struct drm_device *dev)
99{ 82{
100 struct drm_psb_private *dev_priv = dev->dev_private; 83 struct drm_device *dev = bl_get_data(bd);
101 unsigned long core_clock; 84 u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
102 /* u32 bl_max_freq; */
103 /* unsigned long value; */
104 u16 bl_max_freq;
105 uint32_t value;
106 uint32_t blc_pwm_precision_factor;
107
108 /* get bl_max_freq and pol from dev_priv*/
109 if (!dev_priv->lvds_bl) {
110 dev_err(dev->dev, "Has no valid LVDS backlight info\n");
111 return -ENOENT;
112 }
113 bl_max_freq = dev_priv->lvds_bl->freq;
114 blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
115 85
116 core_clock = dev_priv->core_freq; 86 if (cdv_backlight_combination_mode(dev)) {
87 u8 lbpc;
117 88
118 value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT; 89 val &= ~1;
119 value *= blc_pwm_precision_factor; 90 pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
120 value /= bl_max_freq; 91 val *= lbpc;
121 value /= blc_pwm_precision_factor; 92 }
93 return val;
94}
122 95
123 if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ || 96static u32 cdv_get_max_backlight(struct drm_device *dev)
124 value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ) 97{
125 return -ERANGE; 98 u32 max = REG_READ(BLC_PWM_CTL);
126 else { 99
127 /* FIXME */ 100 if (max == 0) {
101 DRM_DEBUG_KMS("LVDS Panel PWM value is 0!\n");
102 /* i915 does this, I believe which means that we should not
103 * smash PWM control as firmware will take control of it. */
104 return 1;
128 } 105 }
129 return 0; 106
107 max >>= 16;
108 if (cdv_backlight_combination_mode(dev))
109 max *= 0xff;
110 return max;
130} 111}
131 112
132static int cdv_set_brightness(struct backlight_device *bd) 113static int cdv_set_brightness(struct backlight_device *bd)
133{ 114{
115 struct drm_device *dev = bl_get_data(bd);
134 int level = bd->props.brightness; 116 int level = bd->props.brightness;
117 u32 blc_pwm_ctl;
135 118
136 /* Percentage 1-100% being valid */ 119 /* Percentage 1-100% being valid */
137 if (level < 1) 120 if (level < 1)
138 level = 1; 121 level = 1;
139 122
140 /*cdv_intel_lvds_set_brightness(dev, level); FIXME */ 123 if (cdv_backlight_combination_mode(dev)) {
141 cdv_brightness = level; 124 u32 max = cdv_get_max_backlight(dev);
125 u8 lbpc;
126
127 lbpc = level * 0xfe / max + 1;
128 level /= lbpc;
129
130 pci_write_config_byte(dev->pdev, 0xF4, lbpc);
131 }
132
133 blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
134 REG_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
135 (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
142 return 0; 136 return 0;
143} 137}
144 138
@@ -150,7 +144,6 @@ static const struct backlight_ops cdv_ops = {
150static int cdv_backlight_init(struct drm_device *dev) 144static int cdv_backlight_init(struct drm_device *dev)
151{ 145{
152 struct drm_psb_private *dev_priv = dev->dev_private; 146 struct drm_psb_private *dev_priv = dev->dev_private;
153 int ret;
154 struct backlight_properties props; 147 struct backlight_properties props;
155 148
156 memset(&props, 0, sizeof(struct backlight_properties)); 149 memset(&props, 0, sizeof(struct backlight_properties));
@@ -162,14 +155,9 @@ static int cdv_backlight_init(struct drm_device *dev)
162 if (IS_ERR(cdv_backlight_device)) 155 if (IS_ERR(cdv_backlight_device))
163 return PTR_ERR(cdv_backlight_device); 156 return PTR_ERR(cdv_backlight_device);
164 157
165 ret = cdv_backlight_setup(dev); 158 cdv_backlight_device->props.brightness =
166 if (ret < 0) { 159 cdv_get_brightness(cdv_backlight_device);
167 backlight_device_unregister(cdv_backlight_device); 160 cdv_backlight_device->props.max_brightness = cdv_get_max_backlight(dev);
168 cdv_backlight_device = NULL;
169 return ret;
170 }
171 cdv_backlight_device->props.brightness = 100;
172 cdv_backlight_device->props.max_brightness = 100;
173 backlight_update_status(cdv_backlight_device); 161 backlight_update_status(cdv_backlight_device);
174 dev_priv->backlight_device = cdv_backlight_device; 162 dev_priv->backlight_device = cdv_backlight_device;
175 return 0; 163 return 0;
@@ -244,11 +232,12 @@ static void cdv_init_pm(struct drm_device *dev)
244static void cdv_errata(struct drm_device *dev) 232static void cdv_errata(struct drm_device *dev)
245{ 233{
246 /* Disable bonus launch. 234 /* Disable bonus launch.
247 * CPU and GPU competes for memory and display misses updates and flickers. 235 * CPU and GPU competes for memory and display misses updates and
248 * Worst with dual core, dual displays. 236 * flickers. Worst with dual core, dual displays.
249 * 237 *
250 * Fixes were done to Win 7 gfx driver to disable a feature called Bonus 238 * Fixes were done to Win 7 gfx driver to disable a feature called
251 * Launch to work around the issue, by degrading performance. 239 * Bonus Launch to work around the issue, by degrading
240 * performance.
252 */ 241 */
253 CDV_MSG_WRITE32(3, 0x30, 0x08027108); 242 CDV_MSG_WRITE32(3, 0x30, 0x08027108);
254} 243}
@@ -501,7 +490,7 @@ static int cdv_chip_setup(struct drm_device *dev)
501 struct drm_psb_private *dev_priv = dev->dev_private; 490 struct drm_psb_private *dev_priv = dev->dev_private;
502 INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func); 491 INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
503 cdv_get_core_freq(dev); 492 cdv_get_core_freq(dev);
504 gma_intel_opregion_init(dev); 493 psb_intel_opregion_init(dev);
505 psb_intel_init_bios(dev); 494 psb_intel_init_bios(dev);
506 cdv_hotplug_enable(dev, false); 495 cdv_hotplug_enable(dev, false);
507 return 0; 496 return 0;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 44a8353d92bf..ff5b58eb878c 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -556,7 +556,7 @@ static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
556 drm_encoder_cleanup(encoder); 556 drm_encoder_cleanup(encoder);
557} 557}
558 558
559const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = { 559static const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
560 .destroy = cdv_intel_lvds_enc_destroy, 560 .destroy = cdv_intel_lvds_enc_destroy,
561}; 561};
562 562
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index c9fe4bdeb681..f47f883ff9ef 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -408,6 +408,8 @@ static int psbfb_create(struct psb_fbdev *fbdev,
408 return -ENOMEM; 408 return -ENOMEM;
409 } 409 }
410 410
411 memset(dev_priv->vram_addr + backing->offset, 0, size);
412
411 mutex_lock(&dev->struct_mutex); 413 mutex_lock(&dev->struct_mutex);
412 414
413 info = framebuffer_alloc(0, device); 415 info = framebuffer_alloc(0, device);
@@ -453,8 +455,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
453 info->fix.ypanstep = 0; 455 info->fix.ypanstep = 0;
454 456
455 /* Accessed stolen memory directly */ 457 /* Accessed stolen memory directly */
456 info->screen_base = (char *)dev_priv->vram_addr + 458 info->screen_base = dev_priv->vram_addr + backing->offset;
457 backing->offset;
458 info->screen_size = size; 459 info->screen_size = size;
459 460
460 if (dev_priv->gtt.stolen_size) { 461 if (dev_priv->gtt.stolen_size) {
@@ -571,7 +572,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
571 return new_fb; 572 return new_fb;
572} 573}
573 574
574struct drm_fb_helper_funcs psb_fb_helper_funcs = { 575static struct drm_fb_helper_funcs psb_fb_helper_funcs = {
575 .gamma_set = psbfb_gamma_set, 576 .gamma_set = psbfb_gamma_set,
576 .gamma_get = psbfb_gamma_get, 577 .gamma_get = psbfb_gamma_get,
577 .fb_probe = psbfb_probe, 578 .fb_probe = psbfb_probe,
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 54e5c9e1e6fa..4cd33df5f93c 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -61,7 +61,7 @@ static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
61 * Given a gtt_range object return the GTT offset of the page table 61 * Given a gtt_range object return the GTT offset of the page table
62 * entries for this gtt_range 62 * entries for this gtt_range
63 */ 63 */
64static u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r) 64static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
65{ 65{
66 struct drm_psb_private *dev_priv = dev->dev_private; 66 struct drm_psb_private *dev_priv = dev->dev_private;
67 unsigned long offset; 67 unsigned long offset;
@@ -82,7 +82,8 @@ static u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
82 */ 82 */
83static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r) 83static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
84{ 84{
85 u32 *gtt_slot, pte; 85 u32 __iomem *gtt_slot;
86 u32 pte;
86 struct page **pages; 87 struct page **pages;
87 int i; 88 int i;
88 89
@@ -126,7 +127,8 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
126static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r) 127static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
127{ 128{
128 struct drm_psb_private *dev_priv = dev->dev_private; 129 struct drm_psb_private *dev_priv = dev->dev_private;
129 u32 *gtt_slot, pte; 130 u32 __iomem *gtt_slot;
131 u32 pte;
130 int i; 132 int i;
131 133
132 WARN_ON(r->stolen); 134 WARN_ON(r->stolen);
@@ -152,7 +154,8 @@ static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
152 */ 154 */
153void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll) 155void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
154{ 156{
155 u32 *gtt_slot, pte; 157 u32 __iomem *gtt_slot;
158 u32 pte;
156 int i; 159 int i;
157 160
158 if (roll >= r->npage) { 161 if (roll >= r->npage) {
@@ -413,7 +416,6 @@ int psb_gtt_init(struct drm_device *dev, int resume)
413 unsigned long stolen_size, vram_stolen_size; 416 unsigned long stolen_size, vram_stolen_size;
414 unsigned i, num_pages; 417 unsigned i, num_pages;
415 unsigned pfn_base; 418 unsigned pfn_base;
416 uint32_t vram_pages;
417 uint32_t dvmt_mode = 0; 419 uint32_t dvmt_mode = 0;
418 struct psb_gtt *pg; 420 struct psb_gtt *pg;
419 421
@@ -529,7 +531,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
529 */ 531 */
530 532
531 pfn_base = dev_priv->stolen_base >> PAGE_SHIFT; 533 pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
532 vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT; 534 num_pages = vram_stolen_size >> PAGE_SHIFT;
533 printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n", 535 printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
534 num_pages, pfn_base << PAGE_SHIFT, 0); 536 num_pages, pfn_base << PAGE_SHIFT, 0);
535 for (i = 0; i < num_pages; ++i) { 537 for (i = 0; i < num_pages; ++i) {
diff --git a/drivers/gpu/drm/gma500/intel_opregion.c b/drivers/gpu/drm/gma500/intel_opregion.c
deleted file mode 100644
index 7041f40affff..000000000000
--- a/drivers/gpu/drm/gma500/intel_opregion.c
+++ /dev/null
@@ -1,178 +0,0 @@
1/*
2 * Copyright 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * FIXME: resolve with the i915 version
24 */
25
26#include "psb_drv.h"
27
28#define PCI_ASLE 0xe4
29#define PCI_ASLS 0xfc
30
31#define OPREGION_HEADER_OFFSET 0
32#define OPREGION_ACPI_OFFSET 0x100
33#define ACPI_CLID 0x01ac /* current lid state indicator */
34#define ACPI_CDCK 0x01b0 /* current docking state indicator */
35#define OPREGION_SWSCI_OFFSET 0x200
36#define OPREGION_ASLE_OFFSET 0x300
37#define OPREGION_VBT_OFFSET 0x400
38
39#define OPREGION_SIGNATURE "IntelGraphicsMem"
40#define MBOX_ACPI (1<<0)
41#define MBOX_SWSCI (1<<1)
42#define MBOX_ASLE (1<<2)
43
44struct opregion_header {
45 u8 signature[16];
46 u32 size;
47 u32 opregion_ver;
48 u8 bios_ver[32];
49 u8 vbios_ver[16];
50 u8 driver_ver[16];
51 u32 mboxes;
52 u8 reserved[164];
53} __packed;
54
55/* OpRegion mailbox #1: public ACPI methods */
56struct opregion_acpi {
57 u32 drdy; /* driver readiness */
58 u32 csts; /* notification status */
59 u32 cevt; /* current event */
60 u8 rsvd1[20];
61 u32 didl[8]; /* supported display devices ID list */
62 u32 cpdl[8]; /* currently presented display list */
63 u32 cadl[8]; /* currently active display list */
64 u32 nadl[8]; /* next active devices list */
65 u32 aslp; /* ASL sleep time-out */
66 u32 tidx; /* toggle table index */
67 u32 chpd; /* current hotplug enable indicator */
68 u32 clid; /* current lid state*/
69 u32 cdck; /* current docking state */
70 u32 sxsw; /* Sx state resume */
71 u32 evts; /* ASL supported events */
72 u32 cnot; /* current OS notification */
73 u32 nrdy; /* driver status */
74 u8 rsvd2[60];
75} __attribute__((packed));
76
77/* OpRegion mailbox #2: SWSCI */
78struct opregion_swsci {
79 u32 scic; /* SWSCI command|status|data */
80 u32 parm; /* command parameters */
81 u32 dslp; /* driver sleep time-out */
82 u8 rsvd[244];
83} __attribute__((packed));
84
85/* OpRegion mailbox #3: ASLE */
86struct opregion_asle {
87 u32 ardy; /* driver readiness */
88 u32 aslc; /* ASLE interrupt command */
89 u32 tche; /* technology enabled indicator */
90 u32 alsi; /* current ALS illuminance reading */
91 u32 bclp; /* backlight brightness to set */
92 u32 pfit; /* panel fitting state */
93 u32 cblv; /* current brightness level */
94 u16 bclm[20]; /* backlight level duty cycle mapping table */
95 u32 cpfm; /* current panel fitting mode */
96 u32 epfm; /* enabled panel fitting modes */
97 u8 plut[74]; /* panel LUT and identifier */
98 u32 pfmb; /* PWM freq and min brightness */
99 u8 rsvd[102];
100} __attribute__((packed));
101
102/* ASLE irq request bits */
103#define ASLE_SET_ALS_ILLUM (1 << 0)
104#define ASLE_SET_BACKLIGHT (1 << 1)
105#define ASLE_SET_PFIT (1 << 2)
106#define ASLE_SET_PWM_FREQ (1 << 3)
107#define ASLE_REQ_MSK 0xf
108
109/* response bits of ASLE irq request */
110#define ASLE_ALS_ILLUM_FAILED (1<<10)
111#define ASLE_BACKLIGHT_FAILED (1<<12)
112#define ASLE_PFIT_FAILED (1<<14)
113#define ASLE_PWM_FREQ_FAILED (1<<16)
114
115/* ASLE backlight brightness to set */
116#define ASLE_BCLP_VALID (1<<31)
117#define ASLE_BCLP_MSK (~(1<<31))
118
119/* ASLE panel fitting request */
120#define ASLE_PFIT_VALID (1<<31)
121#define ASLE_PFIT_CENTER (1<<0)
122#define ASLE_PFIT_STRETCH_TEXT (1<<1)
123#define ASLE_PFIT_STRETCH_GFX (1<<2)
124
125/* PWM frequency and minimum brightness */
126#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
127#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
128#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
129#define ASLE_PFMB_PWM_VALID (1<<31)
130
131#define ASLE_CBLV_VALID (1<<31)
132
133#define ACPI_OTHER_OUTPUT (0<<8)
134#define ACPI_VGA_OUTPUT (1<<8)
135#define ACPI_TV_OUTPUT (2<<8)
136#define ACPI_DIGITAL_OUTPUT (3<<8)
137#define ACPI_LVDS_OUTPUT (4<<8)
138
139int gma_intel_opregion_init(struct drm_device *dev)
140{
141 struct drm_psb_private *dev_priv = dev->dev_private;
142 struct psb_intel_opregion *opregion = &dev_priv->opregion;
143 u32 opregion_phy;
144 void *base;
145 u32 *lid_state;
146
147 dev_priv->lid_state = NULL;
148
149 pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy);
150 if (opregion_phy == 0)
151 return -ENOTSUPP;
152
153 base = ioremap(opregion_phy, 8*1024);
154 if (!base)
155 return -ENOMEM;
156 /* FIXME: should use _io ops - ditto on i915 */
157 if (memcmp(base, OPREGION_SIGNATURE, 16)) {
158 DRM_ERROR("opregion signature mismatch\n");
159 iounmap(base);
160 return -EINVAL;
161 }
162
163 lid_state = base + 0x01ac;
164
165 dev_priv->lid_state = lid_state;
166 dev_priv->lid_last_state = readl(lid_state);
167 opregion->header = base;
168 opregion->vbt = base + OPREGION_VBT_OFFSET;
169 return 0;
170}
171
172int gma_intel_opregion_exit(struct drm_device *dev)
173{
174 struct drm_psb_private *dev_priv = dev->dev_private;
175 if (dev_priv->opregion.header)
176 iounmap(dev_priv->opregion.header);
177 return 0;
178}
diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c
index a0bd48cd92f4..717f4db28c3c 100644
--- a/drivers/gpu/drm/gma500/mdfld_device.c
+++ b/drivers/gpu/drm/gma500/mdfld_device.c
@@ -672,8 +672,8 @@ const struct psb_ops mdfld_chip_ops = {
672 .accel_2d = 0, 672 .accel_2d = 0,
673 .pipes = 3, 673 .pipes = 3,
674 .crtcs = 3, 674 .crtcs = 3,
675 .lvds_mask = (1 << 1); 675 .lvds_mask = (1 << 1),
676 .hdmi_mask = (1 << 1); 676 .hdmi_mask = (1 << 1),
677 .sgx_offset = MRST_SGX_OFFSET, 677 .sgx_offset = MRST_SGX_OFFSET,
678 678
679 .chip_setup = mid_chip_setup, 679 .chip_setup = mid_chip_setup,
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index d52358b744a0..b34ff097b979 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -869,7 +869,6 @@ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
869 mdfld_set_pipe_timing(dsi_config, pipe); 869 mdfld_set_pipe_timing(dsi_config, pipe);
870 870
871 REG_WRITE(DSPABASE, 0x00); 871 REG_WRITE(DSPABASE, 0x00);
872 REG_WRITE(DSPASTRIDE, (mode->hdisplay * 4));
873 REG_WRITE(DSPASIZE, 872 REG_WRITE(DSPASIZE,
874 ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); 873 ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
875 874
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
index 21071cef92a4..36eb0744841c 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.h
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
@@ -29,7 +29,6 @@
29#define __MDFLD_DSI_OUTPUT_H__ 29#define __MDFLD_DSI_OUTPUT_H__
30 30
31#include <linux/backlight.h> 31#include <linux/backlight.h>
32#include <linux/version.h>
33#include <drm/drmP.h> 32#include <drm/drmP.h>
34#include <drm/drm.h> 33#include <drm/drm.h>
35#include <drm/drm_crtc.h> 34#include <drm/drm_crtc.h>
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 5eee9ad80da4..b2a790bd9899 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -118,139 +118,214 @@ static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
118 dev_priv->platform_rev_id); 118 dev_priv->platform_rev_id);
119} 119}
120 120
121struct vbt_header {
122 u32 signature;
123 u8 revision;
124} __packed;
125
126/* The same for r0 and r1 */
127struct vbt_r0 {
128 struct vbt_header vbt_header;
129 u8 size;
130 u8 checksum;
131} __packed;
132
133struct vbt_r10 {
134 struct vbt_header vbt_header;
135 u8 checksum;
136 u16 size;
137 u8 panel_count;
138 u8 primary_panel_idx;
139 u8 secondary_panel_idx;
140 u8 __reserved[5];
141} __packed;
142
143static int read_vbt_r0(u32 addr, struct vbt_r0 *vbt)
144{
145 void __iomem *vbt_virtual;
146
147 vbt_virtual = ioremap(addr, sizeof(*vbt));
148 if (vbt_virtual == NULL)
149 return -1;
150
151 memcpy_fromio(vbt, vbt_virtual, sizeof(*vbt));
152 iounmap(vbt_virtual);
153
154 return 0;
155}
156
157static int read_vbt_r10(u32 addr, struct vbt_r10 *vbt)
158{
159 void __iomem *vbt_virtual;
160
161 vbt_virtual = ioremap(addr, sizeof(*vbt));
162 if (!vbt_virtual)
163 return -1;
164
165 memcpy_fromio(vbt, vbt_virtual, sizeof(*vbt));
166 iounmap(vbt_virtual);
167
168 return 0;
169}
170
171static int mid_get_vbt_data_r0(struct drm_psb_private *dev_priv, u32 addr)
172{
173 struct vbt_r0 vbt;
174 void __iomem *gct_virtual;
175 struct gct_r0 gct;
176 u8 bpi;
177
178 if (read_vbt_r0(addr, &vbt))
179 return -1;
180
181 gct_virtual = ioremap(addr + sizeof(vbt), vbt.size - sizeof(vbt));
182 if (!gct_virtual)
183 return -1;
184 memcpy_fromio(&gct, gct_virtual, sizeof(gct));
185 iounmap(gct_virtual);
186
187 bpi = gct.PD.BootPanelIndex;
188 dev_priv->gct_data.bpi = bpi;
189 dev_priv->gct_data.pt = gct.PD.PanelType;
190 dev_priv->gct_data.DTD = gct.panel[bpi].DTD;
191 dev_priv->gct_data.Panel_Port_Control =
192 gct.panel[bpi].Panel_Port_Control;
193 dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
194 gct.panel[bpi].Panel_MIPI_Display_Descriptor;
195
196 return 0;
197}
198
199static int mid_get_vbt_data_r1(struct drm_psb_private *dev_priv, u32 addr)
200{
201 struct vbt_r0 vbt;
202 void __iomem *gct_virtual;
203 struct gct_r1 gct;
204 u8 bpi;
205
206 if (read_vbt_r0(addr, &vbt))
207 return -1;
208
209 gct_virtual = ioremap(addr + sizeof(vbt), vbt.size - sizeof(vbt));
210 if (!gct_virtual)
211 return -1;
212 memcpy_fromio(&gct, gct_virtual, sizeof(gct));
213 iounmap(gct_virtual);
214
215 bpi = gct.PD.BootPanelIndex;
216 dev_priv->gct_data.bpi = bpi;
217 dev_priv->gct_data.pt = gct.PD.PanelType;
218 dev_priv->gct_data.DTD = gct.panel[bpi].DTD;
219 dev_priv->gct_data.Panel_Port_Control =
220 gct.panel[bpi].Panel_Port_Control;
221 dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
222 gct.panel[bpi].Panel_MIPI_Display_Descriptor;
223
224 return 0;
225}
226
227static int mid_get_vbt_data_r10(struct drm_psb_private *dev_priv, u32 addr)
228{
229 struct vbt_r10 vbt;
230 void __iomem *gct_virtual;
231 struct gct_r10 *gct;
232 struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
233 struct gct_r10_timing_info *ti;
234 int ret = -1;
235
236 if (read_vbt_r10(addr, &vbt))
237 return -1;
238
239 gct = kmalloc(sizeof(*gct) * vbt.panel_count, GFP_KERNEL);
240 if (!gct)
241 return -1;
242
243 gct_virtual = ioremap(addr + sizeof(vbt),
244 sizeof(*gct) * vbt.panel_count);
245 if (!gct_virtual)
246 goto out;
247 memcpy_fromio(gct, gct_virtual, sizeof(*gct));
248 iounmap(gct_virtual);
249
250 dev_priv->gct_data.bpi = vbt.primary_panel_idx;
251 dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
252 gct[vbt.primary_panel_idx].Panel_MIPI_Display_Descriptor;
253
254 ti = &gct[vbt.primary_panel_idx].DTD;
255 dp_ti->pixel_clock = ti->pixel_clock;
256 dp_ti->hactive_hi = ti->hactive_hi;
257 dp_ti->hactive_lo = ti->hactive_lo;
258 dp_ti->hblank_hi = ti->hblank_hi;
259 dp_ti->hblank_lo = ti->hblank_lo;
260 dp_ti->hsync_offset_hi = ti->hsync_offset_hi;
261 dp_ti->hsync_offset_lo = ti->hsync_offset_lo;
262 dp_ti->hsync_pulse_width_hi = ti->hsync_pulse_width_hi;
263 dp_ti->hsync_pulse_width_lo = ti->hsync_pulse_width_lo;
264 dp_ti->vactive_hi = ti->vactive_hi;
265 dp_ti->vactive_lo = ti->vactive_lo;
266 dp_ti->vblank_hi = ti->vblank_hi;
267 dp_ti->vblank_lo = ti->vblank_lo;
268 dp_ti->vsync_offset_hi = ti->vsync_offset_hi;
269 dp_ti->vsync_offset_lo = ti->vsync_offset_lo;
270 dp_ti->vsync_pulse_width_hi = ti->vsync_pulse_width_hi;
271 dp_ti->vsync_pulse_width_lo = ti->vsync_pulse_width_lo;
272
273 ret = 0;
274out:
275 kfree(gct);
276 return ret;
277}
278
121static void mid_get_vbt_data(struct drm_psb_private *dev_priv) 279static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
122{ 280{
123 struct drm_device *dev = dev_priv->dev; 281 struct drm_device *dev = dev_priv->dev;
124 struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
125 u32 addr; 282 u32 addr;
126 u16 new_size; 283 u8 __iomem *vbt_virtual;
127 u8 *vbt_virtual; 284 struct vbt_header vbt_header;
128 u8 bpi;
129 u8 number_desc = 0;
130 struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
131 struct gct_r10_timing_info ti;
132 void *pGCT;
133 struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0)); 285 struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
286 int ret = -1;
134 287
135 /* Get the address of the platform config vbt, B0:D2:F0;0xFC */ 288 /* Get the address of the platform config vbt */
136 pci_read_config_dword(pci_gfx_root, 0xFC, &addr); 289 pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
137 pci_dev_put(pci_gfx_root); 290 pci_dev_put(pci_gfx_root);
138 291
139 dev_dbg(dev->dev, "drm platform config address is %x\n", addr); 292 dev_dbg(dev->dev, "drm platform config address is %x\n", addr);
140 293
141 /* check for platform config address == 0. */ 294 if (!addr)
142 /* this means fw doesn't support vbt */ 295 goto out;
143
144 if (addr == 0) {
145 vbt->size = 0;
146 return;
147 }
148 296
149 /* get the virtual address of the vbt */ 297 /* get the virtual address of the vbt */
150 vbt_virtual = ioremap(addr, sizeof(*vbt)); 298 vbt_virtual = ioremap(addr, sizeof(vbt_header));
151 if (vbt_virtual == NULL) { 299 if (!vbt_virtual)
152 vbt->size = 0; 300 goto out;
153 return;
154 }
155 301
156 memcpy(vbt, vbt_virtual, sizeof(*vbt)); 302 memcpy_fromio(&vbt_header, vbt_virtual, sizeof(vbt_header));
157 iounmap(vbt_virtual); /* Free virtual address space */ 303 iounmap(vbt_virtual);
158 304
159 /* No matching signature don't process the data */ 305 if (memcmp(&vbt_header.signature, "$GCT", 4))
160 if (memcmp(vbt->signature, "$GCT", 4)) { 306 goto out;
161 vbt->size = 0; 307
162 return; 308 dev_dbg(dev->dev, "GCT revision is %02x\n", vbt_header.revision);
163 }
164 309
165 dev_dbg(dev->dev, "GCT revision is %x\n", vbt->revision); 310 switch (vbt_header.revision) {
166 311 case 0x00:
167 switch (vbt->revision) { 312 ret = mid_get_vbt_data_r0(dev_priv, addr);
168 case 0:
169 vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
170 vbt->size - sizeof(*vbt) + 4);
171 pGCT = vbt->oaktrail_gct;
172 bpi = ((struct oaktrail_gct_v1 *)pGCT)->PD.BootPanelIndex;
173 dev_priv->gct_data.bpi = bpi;
174 dev_priv->gct_data.pt =
175 ((struct oaktrail_gct_v1 *)pGCT)->PD.PanelType;
176 memcpy(&dev_priv->gct_data.DTD,
177 &((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].DTD,
178 sizeof(struct oaktrail_timing_info));
179 dev_priv->gct_data.Panel_Port_Control =
180 ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
181 dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
182 ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
183 break; 313 break;
184 case 1: 314 case 0x01:
185 vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4, 315 ret = mid_get_vbt_data_r1(dev_priv, addr);
186 vbt->size - sizeof(*vbt) + 4);
187 pGCT = vbt->oaktrail_gct;
188 bpi = ((struct oaktrail_gct_v2 *)pGCT)->PD.BootPanelIndex;
189 dev_priv->gct_data.bpi = bpi;
190 dev_priv->gct_data.pt =
191 ((struct oaktrail_gct_v2 *)pGCT)->PD.PanelType;
192 memcpy(&dev_priv->gct_data.DTD,
193 &((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].DTD,
194 sizeof(struct oaktrail_timing_info));
195 dev_priv->gct_data.Panel_Port_Control =
196 ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
197 dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
198 ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
199 break; 316 break;
200 case 0x10: 317 case 0x10:
201 /*header definition changed from rev 01 (v2) to rev 10h. */ 318 ret = mid_get_vbt_data_r10(dev_priv, addr);
202 /*so, some values have changed location*/
203 new_size = vbt->checksum; /*checksum contains lo size byte*/
204 /*LSB of oaktrail_gct contains hi size byte*/
205 new_size |= ((0xff & (unsigned int)(long)vbt->oaktrail_gct)) << 8;
206
207 vbt->checksum = vbt->size; /*size contains the checksum*/
208 if (new_size > 0xff)
209 vbt->size = 0xff; /*restrict size to 255*/
210 else
211 vbt->size = new_size;
212
213 /* number of descriptors defined in the GCT */
214 number_desc = ((0xff00 & (unsigned int)(long)vbt->oaktrail_gct)) >> 8;
215 bpi = ((0xff0000 & (unsigned int)(long)vbt->oaktrail_gct)) >> 16;
216 vbt->oaktrail_gct = ioremap(addr + GCT_R10_HEADER_SIZE,
217 GCT_R10_DISPLAY_DESC_SIZE * number_desc);
218 pGCT = vbt->oaktrail_gct;
219 pGCT = (u8 *)pGCT + (bpi*GCT_R10_DISPLAY_DESC_SIZE);
220 dev_priv->gct_data.bpi = bpi; /*save boot panel id*/
221
222 /*copy the GCT display timings into a temp structure*/
223 memcpy(&ti, pGCT, sizeof(struct gct_r10_timing_info));
224
225 /*now copy the temp struct into the dev_priv->gct_data*/
226 dp_ti->pixel_clock = ti.pixel_clock;
227 dp_ti->hactive_hi = ti.hactive_hi;
228 dp_ti->hactive_lo = ti.hactive_lo;
229 dp_ti->hblank_hi = ti.hblank_hi;
230 dp_ti->hblank_lo = ti.hblank_lo;
231 dp_ti->hsync_offset_hi = ti.hsync_offset_hi;
232 dp_ti->hsync_offset_lo = ti.hsync_offset_lo;
233 dp_ti->hsync_pulse_width_hi = ti.hsync_pulse_width_hi;
234 dp_ti->hsync_pulse_width_lo = ti.hsync_pulse_width_lo;
235 dp_ti->vactive_hi = ti.vactive_hi;
236 dp_ti->vactive_lo = ti.vactive_lo;
237 dp_ti->vblank_hi = ti.vblank_hi;
238 dp_ti->vblank_lo = ti.vblank_lo;
239 dp_ti->vsync_offset_hi = ti.vsync_offset_hi;
240 dp_ti->vsync_offset_lo = ti.vsync_offset_lo;
241 dp_ti->vsync_pulse_width_hi = ti.vsync_pulse_width_hi;
242 dp_ti->vsync_pulse_width_lo = ti.vsync_pulse_width_lo;
243
244 /* Move the MIPI_Display_Descriptor data from GCT to dev priv */
245 dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
246 *((u8 *)pGCT + 0x0d);
247 dev_priv->gct_data.Panel_MIPI_Display_Descriptor |=
248 (*((u8 *)pGCT + 0x0e)) << 8;
249 break; 319 break;
250 default: 320 default:
251 dev_err(dev->dev, "Unknown revision of GCT!\n"); 321 dev_err(dev->dev, "Unknown revision of GCT!\n");
252 vbt->size = 0;
253 } 322 }
323
324out:
325 if (ret)
326 dev_err(dev->dev, "Unable to read GCT!");
327 else
328 dev_priv->has_gct = true;
254} 329}
255 330
256int mid_chip_setup(struct drm_device *dev) 331int mid_chip_setup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/gma500/oaktrail.h b/drivers/gpu/drm/gma500/oaktrail.h
index 2da1f368f14e..f2f9f38a5362 100644
--- a/drivers/gpu/drm/gma500/oaktrail.h
+++ b/drivers/gpu/drm/gma500/oaktrail.h
@@ -19,14 +19,6 @@
19 19
20/* MID device specific descriptors */ 20/* MID device specific descriptors */
21 21
22struct oaktrail_vbt {
23 s8 signature[4]; /*4 bytes,"$GCT" */
24 u8 revision;
25 u8 size;
26 u8 checksum;
27 void *oaktrail_gct;
28} __packed;
29
30struct oaktrail_timing_info { 22struct oaktrail_timing_info {
31 u16 pixel_clock; 23 u16 pixel_clock;
32 u8 hactive_lo; 24 u8 hactive_lo;
@@ -161,7 +153,7 @@ union oaktrail_panel_rx {
161 u16 panel_receiver; 153 u16 panel_receiver;
162} __packed; 154} __packed;
163 155
164struct oaktrail_gct_v1 { 156struct gct_r0 {
165 union { /*8 bits,Defined as follows: */ 157 union { /*8 bits,Defined as follows: */
166 struct { 158 struct {
167 u8 PanelType:4; /*4 bits, Bit field for panels*/ 159 u8 PanelType:4; /*4 bits, Bit field for panels*/
@@ -178,7 +170,7 @@ struct oaktrail_gct_v1 {
178 union oaktrail_panel_rx panelrx[4]; /* panel receivers*/ 170 union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
179} __packed; 171} __packed;
180 172
181struct oaktrail_gct_v2 { 173struct gct_r1 {
182 union { /*8 bits,Defined as follows: */ 174 union { /*8 bits,Defined as follows: */
183 struct { 175 struct {
184 u8 PanelType:4; /*4 bits, Bit field for panels*/ 176 u8 PanelType:4; /*4 bits, Bit field for panels*/
@@ -195,6 +187,16 @@ struct oaktrail_gct_v2 {
195 union oaktrail_panel_rx panelrx[4]; /* panel receivers*/ 187 union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
196} __packed; 188} __packed;
197 189
190struct gct_r10 {
191 struct gct_r10_timing_info DTD;
192 u16 Panel_MIPI_Display_Descriptor;
193 u16 Panel_MIPI_Receiver_Descriptor;
194 u16 Panel_Backlight_Inverter_Descriptor;
195 u8 Panel_Initial_Brightness;
196 u32 MIPI_Ctlr_Init_ptr;
197 u32 MIPI_Panel_Init_ptr;
198} __packed;
199
198struct oaktrail_gct_data { 200struct oaktrail_gct_data {
199 u8 bpi; /* boot panel index, number of panel used during boot */ 201 u8 bpi; /* boot panel index, number of panel used during boot */
200 u8 pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */ 202 u8 pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
@@ -213,9 +215,6 @@ struct oaktrail_gct_data {
213#define MODE_SETTING_IN_DSR 0x4 215#define MODE_SETTING_IN_DSR 0x4
214#define MODE_SETTING_ENCODER_DONE 0x8 216#define MODE_SETTING_ENCODER_DONE 0x8
215 217
216#define GCT_R10_HEADER_SIZE 16
217#define GCT_R10_DISPLAY_DESC_SIZE 28
218
219/* 218/*
220 * Moorestown HDMI interfaces 219 * Moorestown HDMI interfaces
221 */ 220 */
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 4c5a1864adf4..0bb74cc3ecf8 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -458,27 +458,26 @@ static int oaktrail_power_up(struct drm_device *dev)
458static int oaktrail_chip_setup(struct drm_device *dev) 458static int oaktrail_chip_setup(struct drm_device *dev)
459{ 459{
460 struct drm_psb_private *dev_priv = dev->dev_private; 460 struct drm_psb_private *dev_priv = dev->dev_private;
461 struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
462 int ret; 461 int ret;
463 462
464 ret = mid_chip_setup(dev); 463 ret = mid_chip_setup(dev);
465 if (ret < 0) 464 if (ret < 0)
466 return ret; 465 return ret;
467 if (vbt->size == 0) { 466 if (!dev_priv->has_gct) {
468 /* Now pull the BIOS data */ 467 /* Now pull the BIOS data */
469 gma_intel_opregion_init(dev); 468 psb_intel_opregion_init(dev);
470 psb_intel_init_bios(dev); 469 psb_intel_init_bios(dev);
471 } 470 }
471 oaktrail_hdmi_setup(dev);
472 return 0; 472 return 0;
473} 473}
474 474
475static void oaktrail_teardown(struct drm_device *dev) 475static void oaktrail_teardown(struct drm_device *dev)
476{ 476{
477 struct drm_psb_private *dev_priv = dev->dev_private; 477 struct drm_psb_private *dev_priv = dev->dev_private;
478 struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
479 478
480 oaktrail_hdmi_teardown(dev); 479 oaktrail_hdmi_teardown(dev);
481 if (vbt->size == 0) 480 if (!dev_priv->has_gct)
482 psb_intel_destroy_bios(dev); 481 psb_intel_destroy_bios(dev);
483} 482}
484 483
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index 5e84fbde749b..88627e3ba1e3 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -250,7 +250,7 @@ static irqreturn_t oaktrail_hdmi_i2c_handler(int this_irq, void *dev)
250 */ 250 */
251static void oaktrail_hdmi_i2c_gpio_fix(void) 251static void oaktrail_hdmi_i2c_gpio_fix(void)
252{ 252{
253 void *base; 253 void __iomem *base;
254 unsigned int gpio_base = 0xff12c000; 254 unsigned int gpio_base = 0xff12c000;
255 int gpio_len = 0x1000; 255 int gpio_len = 0x1000;
256 u32 temp; 256 u32 temp;
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 654f32b22b21..558c77fb55ec 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -257,7 +257,7 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
257 mode_dev->panel_fixed_mode = NULL; 257 mode_dev->panel_fixed_mode = NULL;
258 258
259 /* Use the firmware provided data on Moorestown */ 259 /* Use the firmware provided data on Moorestown */
260 if (dev_priv->vbt_data.size != 0x00) { /*if non-zero, then use vbt*/ 260 if (dev_priv->has_gct) {
261 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 261 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
262 if (!mode) 262 if (!mode)
263 return; 263 return;
@@ -371,7 +371,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
371 BRIGHTNESS_MAX_LEVEL); 371 BRIGHTNESS_MAX_LEVEL);
372 372
373 mode_dev->panel_wants_dither = false; 373 mode_dev->panel_wants_dither = false;
374 if (dev_priv->vbt_data.size != 0x00) 374 if (dev_priv->has_gct)
375 mode_dev->panel_wants_dither = (dev_priv->gct_data. 375 mode_dev->panel_wants_dither = (dev_priv->gct_data.
376 Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE); 376 Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE);
377 if (dev_priv->lvds_dither) 377 if (dev_priv->lvds_dither)
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
new file mode 100644
index 000000000000..05661bfeac75
--- /dev/null
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -0,0 +1,350 @@
1/*
2 * Copyright 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 */
24#ifdef CONFIG_ACPI
25#include <linux/acpi.h>
26#include <linux/acpi_io.h>
27#endif
28#include "psb_drv.h"
29#include "psb_intel_reg.h"
30
31#define PCI_ASLE 0xe4
32#define PCI_ASLS 0xfc
33
34#define OPREGION_HEADER_OFFSET 0
35#define OPREGION_ACPI_OFFSET 0x100
36#define ACPI_CLID 0x01ac /* current lid state indicator */
37#define ACPI_CDCK 0x01b0 /* current docking state indicator */
38#define OPREGION_SWSCI_OFFSET 0x200
39#define OPREGION_ASLE_OFFSET 0x300
40#define OPREGION_VBT_OFFSET 0x400
41
42#define OPREGION_SIGNATURE "IntelGraphicsMem"
43#define MBOX_ACPI (1<<0)
44#define MBOX_SWSCI (1<<1)
45#define MBOX_ASLE (1<<2)
46
47struct opregion_header {
48 u8 signature[16];
49 u32 size;
50 u32 opregion_ver;
51 u8 bios_ver[32];
52 u8 vbios_ver[16];
53 u8 driver_ver[16];
54 u32 mboxes;
55 u8 reserved[164];
56} __packed;
57
58/* OpRegion mailbox #1: public ACPI methods */
59struct opregion_acpi {
60 u32 drdy; /* driver readiness */
61 u32 csts; /* notification status */
62 u32 cevt; /* current event */
63 u8 rsvd1[20];
64 u32 didl[8]; /* supported display devices ID list */
65 u32 cpdl[8]; /* currently presented display list */
66 u32 cadl[8]; /* currently active display list */
67 u32 nadl[8]; /* next active devices list */
68 u32 aslp; /* ASL sleep time-out */
69 u32 tidx; /* toggle table index */
70 u32 chpd; /* current hotplug enable indicator */
71 u32 clid; /* current lid state*/
72 u32 cdck; /* current docking state */
73 u32 sxsw; /* Sx state resume */
74 u32 evts; /* ASL supported events */
75 u32 cnot; /* current OS notification */
76 u32 nrdy; /* driver status */
77 u8 rsvd2[60];
78} __packed;
79
80/* OpRegion mailbox #2: SWSCI */
81struct opregion_swsci {
82 /*FIXME: add it later*/
83} __packed;
84
85/* OpRegion mailbox #3: ASLE */
86struct opregion_asle {
87 u32 ardy; /* driver readiness */
88 u32 aslc; /* ASLE interrupt command */
89 u32 tche; /* technology enabled indicator */
90 u32 alsi; /* current ALS illuminance reading */
91 u32 bclp; /* backlight brightness to set */
92 u32 pfit; /* panel fitting state */
93 u32 cblv; /* current brightness level */
94 u16 bclm[20]; /* backlight level duty cycle mapping table */
95 u32 cpfm; /* current panel fitting mode */
96 u32 epfm; /* enabled panel fitting modes */
97 u8 plut[74]; /* panel LUT and identifier */
98 u32 pfmb; /* PWM freq and min brightness */
99 u8 rsvd[102];
100} __packed;
101
102/* ASLE irq request bits */
103#define ASLE_SET_ALS_ILLUM (1 << 0)
104#define ASLE_SET_BACKLIGHT (1 << 1)
105#define ASLE_SET_PFIT (1 << 2)
106#define ASLE_SET_PWM_FREQ (1 << 3)
107#define ASLE_REQ_MSK 0xf
108
109/* response bits of ASLE irq request */
110#define ASLE_ALS_ILLUM_FAILED (1<<10)
111#define ASLE_BACKLIGHT_FAILED (1<<12)
112#define ASLE_PFIT_FAILED (1<<14)
113#define ASLE_PWM_FREQ_FAILED (1<<16)
114
115/* ASLE backlight brightness to set */
116#define ASLE_BCLP_VALID (1<<31)
117#define ASLE_BCLP_MSK (~(1<<31))
118
119/* ASLE panel fitting request */
120#define ASLE_PFIT_VALID (1<<31)
121#define ASLE_PFIT_CENTER (1<<0)
122#define ASLE_PFIT_STRETCH_TEXT (1<<1)
123#define ASLE_PFIT_STRETCH_GFX (1<<2)
124
125/* response bits of ASLE irq request */
126#define ASLE_ALS_ILLUM_FAILED (1<<10)
127#define ASLE_BACKLIGHT_FAILED (1<<12)
128#define ASLE_PFIT_FAILED (1<<14)
129#define ASLE_PWM_FREQ_FAILED (1<<16)
130
131/* ASLE backlight brightness to set */
132#define ASLE_BCLP_VALID (1<<31)
133#define ASLE_BCLP_MSK (~(1<<31))
134
135/* ASLE panel fitting request */
136#define ASLE_PFIT_VALID (1<<31)
137#define ASLE_PFIT_CENTER (1<<0)
138#define ASLE_PFIT_STRETCH_TEXT (1<<1)
139#define ASLE_PFIT_STRETCH_GFX (1<<2)
140
141/* PWM frequency and minimum brightness */
142#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
143#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
144#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
145#define ASLE_PFMB_PWM_VALID (1<<31)
146
147#define ASLE_CBLV_VALID (1<<31)
148
149static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
150{
151 struct drm_psb_private *dev_priv = dev->dev_private;
152 struct opregion_asle *asle = dev_priv->opregion.asle;
153 struct backlight_device *bd = dev_priv->backlight_device;
154 u32 max;
155
156 DRM_DEBUG_DRIVER("asle set backlight %x\n", bclp);
157
158 if (!(bclp & ASLE_BCLP_VALID))
159 return ASLE_BACKLIGHT_FAILED;
160
161 if (bd == NULL)
162 return ASLE_BACKLIGHT_FAILED;
163
164 bclp &= ASLE_BCLP_MSK;
165 if (bclp > 255)
166 return ASLE_BACKLIGHT_FAILED;
167
168#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
169 max = bd->props.max_brightness;
170 bd->props.brightness = bclp * max / 255;
171 backlight_update_status(bd);
172#endif
173 asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
174
175 return 0;
176}
177
178void psb_intel_opregion_asle_intr(struct drm_device *dev)
179{
180 struct drm_psb_private *dev_priv = dev->dev_private;
181 struct opregion_asle *asle = dev_priv->opregion.asle;
182 u32 asle_stat = 0;
183 u32 asle_req;
184
185 if (!asle)
186 return;
187
188 asle_req = asle->aslc & ASLE_REQ_MSK;
189 if (!asle_req) {
190 DRM_DEBUG_DRIVER("non asle set request??\n");
191 return;
192 }
193
194 if (asle_req & ASLE_SET_BACKLIGHT)
195 asle_stat |= asle_set_backlight(dev, asle->bclp);
196
197 asle->aslc = asle_stat;
198}
199
200#define ASLE_ALS_EN (1<<0)
201#define ASLE_BLC_EN (1<<1)
202#define ASLE_PFIT_EN (1<<2)
203#define ASLE_PFMB_EN (1<<3)
204
205void psb_intel_opregion_enable_asle(struct drm_device *dev)
206{
207 struct drm_psb_private *dev_priv = dev->dev_private;
208 struct opregion_asle *asle = dev_priv->opregion.asle;
209
210 if (asle) {
211 /* Don't do this on Medfield or other non PC like devices, they
212 use the bit for something different altogether */
213 psb_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
214 psb_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
215
216 asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN
217 | ASLE_PFMB_EN;
218 asle->ardy = 1;
219 }
220}
221
222#define ACPI_EV_DISPLAY_SWITCH (1<<0)
223#define ACPI_EV_LID (1<<1)
224#define ACPI_EV_DOCK (1<<2)
225
226static struct psb_intel_opregion *system_opregion;
227
228static int psb_intel_opregion_video_event(struct notifier_block *nb,
229 unsigned long val, void *data)
230{
231 /* The only video events relevant to opregion are 0x80. These indicate
232 either a docking event, lid switch or display switch request. In
233 Linux, these are handled by the dock, button and video drivers.
234 We might want to fix the video driver to be opregion-aware in
235 future, but right now we just indicate to the firmware that the
236 request has been handled */
237
238 struct opregion_acpi *acpi;
239
240 if (!system_opregion)
241 return NOTIFY_DONE;
242
243 acpi = system_opregion->acpi;
244 acpi->csts = 0;
245
246 return NOTIFY_OK;
247}
248
249static struct notifier_block psb_intel_opregion_notifier = {
250 .notifier_call = psb_intel_opregion_video_event,
251};
252
253void psb_intel_opregion_init(struct drm_device *dev)
254{
255 struct drm_psb_private *dev_priv = dev->dev_private;
256 struct psb_intel_opregion *opregion = &dev_priv->opregion;
257
258 if (!opregion->header)
259 return;
260
261 if (opregion->acpi) {
262 /* Notify BIOS we are ready to handle ACPI video ext notifs.
263 * Right now, all the events are handled by the ACPI video
264 * module. We don't actually need to do anything with them. */
265 opregion->acpi->csts = 0;
266 opregion->acpi->drdy = 1;
267
268 system_opregion = opregion;
269 register_acpi_notifier(&psb_intel_opregion_notifier);
270 }
271
272 if (opregion->asle)
273 psb_intel_opregion_enable_asle(dev);
274}
275
276void psb_intel_opregion_fini(struct drm_device *dev)
277{
278 struct drm_psb_private *dev_priv = dev->dev_private;
279 struct psb_intel_opregion *opregion = &dev_priv->opregion;
280
281 if (!opregion->header)
282 return;
283
284 if (opregion->acpi) {
285 opregion->acpi->drdy = 0;
286
287 system_opregion = NULL;
288 unregister_acpi_notifier(&psb_intel_opregion_notifier);
289 }
290
291 /* just clear all opregion memory pointers now */
292 iounmap(opregion->header);
293 opregion->header = NULL;
294 opregion->acpi = NULL;
295 opregion->swsci = NULL;
296 opregion->asle = NULL;
297 opregion->vbt = NULL;
298}
299
300int psb_intel_opregion_setup(struct drm_device *dev)
301{
302 struct drm_psb_private *dev_priv = dev->dev_private;
303 struct psb_intel_opregion *opregion = &dev_priv->opregion;
304 u32 opregion_phy, mboxes;
305 void __iomem *base;
306 int err = 0;
307
308 pci_read_config_dword(dev->pdev, PCI_ASLS, &opregion_phy);
309 if (opregion_phy == 0) {
310 DRM_DEBUG_DRIVER("ACPI Opregion not supported\n");
311 return -ENOTSUPP;
312 }
313 DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy);
314#ifdef CONFIG_ACPI
315 base = acpi_os_ioremap(opregion_phy, 8*1024);
316#else
317 base = ioremap(opregion_phy, 8*1024);
318#endif
319 if (!base)
320 return -ENOMEM;
321
322 if (memcmp(base, OPREGION_SIGNATURE, 16)) {
323 DRM_DEBUG_DRIVER("opregion signature mismatch\n");
324 err = -EINVAL;
325 goto err_out;
326 }
327
328 opregion->header = base;
329 opregion->vbt = base + OPREGION_VBT_OFFSET;
330
331 opregion->lid_state = base + ACPI_CLID;
332
333 mboxes = opregion->header->mboxes;
334 if (mboxes & MBOX_ACPI) {
335 DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
336 opregion->acpi = base + OPREGION_ACPI_OFFSET;
337 }
338
339 if (mboxes & MBOX_ASLE) {
340 DRM_DEBUG_DRIVER("ASLE supported\n");
341 opregion->asle = base + OPREGION_ASLE_OFFSET;
342 }
343
344 return 0;
345
346err_out:
347 iounmap(base);
348 return err;
349}
350
diff --git a/drivers/gpu/drm/gma500/opregion.h b/drivers/gpu/drm/gma500/opregion.h
new file mode 100644
index 000000000000..a392ea8908b7
--- /dev/null
+++ b/drivers/gpu/drm/gma500/opregion.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25extern void psb_intel_opregion_asle_intr(struct drm_device *dev);
26extern void psb_intel_opregion_enable_asle(struct drm_device *dev);
27extern void psb_intel_opregion_init(struct drm_device *dev);
28extern void psb_intel_opregion_fini(struct drm_device *dev);
29extern int psb_intel_opregion_setup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 34e6866a73b2..e95cddbceb60 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -293,7 +293,7 @@ static int psb_chip_setup(struct drm_device *dev)
293{ 293{
294 psb_get_core_freq(dev); 294 psb_get_core_freq(dev);
295 gma_intel_setup_gmbus(dev); 295 gma_intel_setup_gmbus(dev);
296 gma_intel_opregion_init(dev); 296 psb_intel_opregion_init(dev);
297 psb_intel_init_bios(dev); 297 psb_intel_init_bios(dev);
298 return 0; 298 return 0;
299} 299}
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index d5a6eab8227e..0e85978877e8 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -215,12 +215,11 @@ static int psb_driver_unload(struct drm_device *dev)
215 /* Kill vblank etc here */ 215 /* Kill vblank etc here */
216 216
217 gma_backlight_exit(dev); 217 gma_backlight_exit(dev);
218
219 psb_modeset_cleanup(dev); 218 psb_modeset_cleanup(dev);
220 219
221 if (dev_priv) { 220 if (dev_priv) {
221 psb_intel_opregion_fini(dev);
222 psb_lid_timer_takedown(dev_priv); 222 psb_lid_timer_takedown(dev_priv);
223 gma_intel_opregion_exit(dev);
224 223
225 if (dev_priv->ops->chip_teardown) 224 if (dev_priv->ops->chip_teardown)
226 dev_priv->ops->chip_teardown(dev); 225 dev_priv->ops->chip_teardown(dev);
@@ -310,6 +309,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
310 if (!dev_priv->sgx_reg) 309 if (!dev_priv->sgx_reg)
311 goto out_err; 310 goto out_err;
312 311
312 psb_intel_opregion_setup(dev);
313
313 ret = dev_priv->ops->chip_setup(dev); 314 ret = dev_priv->ops->chip_setup(dev);
314 if (ret) 315 if (ret)
315 goto out_err; 316 goto out_err;
@@ -349,9 +350,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
349 PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE); 350 PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
350 PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE); 351 PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
351 352
352/* igd_opregion_init(&dev_priv->opregion_dev); */
353 acpi_video_register(); 353 acpi_video_register();
354 if (dev_priv->lid_state) 354 if (dev_priv->opregion.lid_state)
355 psb_lid_timer_init(dev_priv); 355 psb_lid_timer_init(dev_priv);
356 356
357 ret = drm_vblank_init(dev, dev_priv->num_pipe); 357 ret = drm_vblank_init(dev, dev_priv->num_pipe);
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index d3528a694206..270a27bc936a 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -30,6 +30,7 @@
30#include "psb_intel_drv.h" 30#include "psb_intel_drv.h"
31#include "gtt.h" 31#include "gtt.h"
32#include "power.h" 32#include "power.h"
33#include "opregion.h"
33#include "oaktrail.h" 34#include "oaktrail.h"
34 35
35/* Append new drm mode definition here, align with libdrm definition */ 36/* Append new drm mode definition here, align with libdrm definition */
@@ -120,6 +121,7 @@ enum {
120#define PSB_HWSTAM 0x2098 121#define PSB_HWSTAM 0x2098
121#define PSB_INSTPM 0x20C0 122#define PSB_INSTPM 0x20C0
122#define PSB_INT_IDENTITY_R 0x20A4 123#define PSB_INT_IDENTITY_R 0x20A4
124#define _PSB_IRQ_ASLE (1<<0)
123#define _MDFLD_PIPEC_EVENT_FLAG (1<<2) 125#define _MDFLD_PIPEC_EVENT_FLAG (1<<2)
124#define _MDFLD_PIPEC_VBLANK_FLAG (1<<3) 126#define _MDFLD_PIPEC_VBLANK_FLAG (1<<3)
125#define _PSB_DPST_PIPEB_FLAG (1<<4) 127#define _PSB_DPST_PIPEB_FLAG (1<<4)
@@ -259,7 +261,7 @@ struct psb_intel_opregion {
259 struct opregion_swsci *swsci; 261 struct opregion_swsci *swsci;
260 struct opregion_asle *asle; 262 struct opregion_asle *asle;
261 void *vbt; 263 void *vbt;
262 int enabled; 264 u32 __iomem *lid_state;
263}; 265};
264 266
265struct sdvo_device_mapping { 267struct sdvo_device_mapping {
@@ -505,9 +507,9 @@ struct drm_psb_private {
505 /* GTT Memory manager */ 507 /* GTT Memory manager */
506 struct psb_gtt_mm *gtt_mm; 508 struct psb_gtt_mm *gtt_mm;
507 struct page *scratch_page; 509 struct page *scratch_page;
508 u32 *gtt_map; 510 u32 __iomem *gtt_map;
509 uint32_t stolen_base; 511 uint32_t stolen_base;
510 void *vram_addr; 512 u8 __iomem *vram_addr;
511 unsigned long vram_stolen_size; 513 unsigned long vram_stolen_size;
512 int gtt_initialized; 514 int gtt_initialized;
513 u16 gmch_ctrl; /* Saved GTT setup */ 515 u16 gmch_ctrl; /* Saved GTT setup */
@@ -523,8 +525,8 @@ struct drm_psb_private {
523 * Register base 525 * Register base
524 */ 526 */
525 527
526 uint8_t *sgx_reg; 528 uint8_t __iomem *sgx_reg;
527 uint8_t *vdc_reg; 529 uint8_t __iomem *vdc_reg;
528 uint32_t gatt_free_offset; 530 uint32_t gatt_free_offset;
529 531
530 /* 532 /*
@@ -610,7 +612,7 @@ struct drm_psb_private {
610 int rpm_enabled; 612 int rpm_enabled;
611 613
612 /* MID specific */ 614 /* MID specific */
613 struct oaktrail_vbt vbt_data; 615 bool has_gct;
614 struct oaktrail_gct_data gct_data; 616 struct oaktrail_gct_data gct_data;
615 617
616 /* Oaktrail HDMI state */ 618 /* Oaktrail HDMI state */
@@ -638,7 +640,6 @@ struct drm_psb_private {
638 spinlock_t lid_lock; 640 spinlock_t lid_lock;
639 struct timer_list lid_timer; 641 struct timer_list lid_timer;
640 struct psb_intel_opregion opregion; 642 struct psb_intel_opregion opregion;
641 u32 *lid_state;
642 u32 lid_last_state; 643 u32 lid_last_state;
643 644
644 /* 645 /*
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
index 519a9cd9ffbc..8e8c8efb0a89 100644
--- a/drivers/gpu/drm/gma500/psb_intel_reg.h
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -507,6 +507,7 @@
507#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17) 507#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
508#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18) 508#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18)
509#define PIPE_TE_ENABLE (1UL << 22) 509#define PIPE_TE_ENABLE (1UL << 22)
510#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL << 22)
510#define PIPE_DPST_EVENT_ENABLE (1UL << 23) 511#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
511#define PIPE_VSYNC_ENABL (1UL << 25) 512#define PIPE_VSYNC_ENABL (1UL << 25)
512#define PIPE_HDMI_AUDIO_UNDERRUN (1UL << 26) 513#define PIPE_HDMI_AUDIO_UNDERRUN (1UL << 26)
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 2fcdffdc9063..8652cdf3f03f 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -190,6 +190,9 @@ static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
190 */ 190 */
191static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat) 191static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
192{ 192{
193 if (vdc_stat & _PSB_IRQ_ASLE)
194 psb_intel_opregion_asle_intr(dev);
195
193 if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG) 196 if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
194 mid_pipe_event_handler(dev, 0); 197 mid_pipe_event_handler(dev, 0);
195 198
@@ -283,6 +286,7 @@ void psb_irq_preinstall(struct drm_device *dev)
283 /* Revisit this area - want per device masks ? */ 286 /* Revisit this area - want per device masks ? */
284 if (dev_priv->ops->hotplug) 287 if (dev_priv->ops->hotplug)
285 dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC; 288 dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
289 dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE;
286 290
287 /* This register is safe even if display island is off */ 291 /* This register is safe even if display island is off */
288 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R); 292 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
@@ -422,7 +426,7 @@ void psb_irq_turn_off_dpst(struct drm_device *dev)
422 psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE); 426 psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
423 427
424 pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC); 428 pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
425 PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE), 429 PSB_WVDC32(pwm_reg & ~PWM_PHASEIN_INT_ENABLE,
426 PWM_CONTROL_LOGIC); 430 PWM_CONTROL_LOGIC);
427 pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC); 431 pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
428 432
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
index b867aabe6bf3..7ff8bb2bdc23 100644
--- a/drivers/gpu/drm/gma500/psb_lid.c
+++ b/drivers/gpu/drm/gma500/psb_lid.c
@@ -29,7 +29,7 @@ static void psb_lid_timer_func(unsigned long data)
29 struct drm_device *dev = (struct drm_device *)dev_priv->dev; 29 struct drm_device *dev = (struct drm_device *)dev_priv->dev;
30 struct timer_list *lid_timer = &dev_priv->lid_timer; 30 struct timer_list *lid_timer = &dev_priv->lid_timer;
31 unsigned long irq_flags; 31 unsigned long irq_flags;
32 u32 *lid_state = dev_priv->lid_state; 32 u32 __iomem *lid_state = dev_priv->opregion.lid_state;
33 u32 pp_status; 33 u32 pp_status;
34 34
35 if (readl(lid_state) == dev_priv->lid_last_state) 35 if (readl(lid_state) == dev_priv->lid_last_state)
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 2c8a60c3b98e..f920fb5e42b6 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -129,6 +129,7 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
129 if (buf_priv->currently_mapped == I810_BUF_MAPPED) 129 if (buf_priv->currently_mapped == I810_BUF_MAPPED)
130 return -EINVAL; 130 return -EINVAL;
131 131
132 /* This is all entirely broken */
132 down_write(&current->mm->mmap_sem); 133 down_write(&current->mm->mmap_sem);
133 old_fops = file_priv->filp->f_op; 134 old_fops = file_priv->filp->f_op;
134 file_priv->filp->f_op = &i810_buffer_fops; 135 file_priv->filp->f_op = &i810_buffer_fops;
@@ -157,11 +158,8 @@ static int i810_unmap_buffer(struct drm_buf *buf)
157 if (buf_priv->currently_mapped != I810_BUF_MAPPED) 158 if (buf_priv->currently_mapped != I810_BUF_MAPPED)
158 return -EINVAL; 159 return -EINVAL;
159 160
160 down_write(&current->mm->mmap_sem); 161 retcode = vm_munmap((unsigned long)buf_priv->virtual,
161 retcode = do_munmap(current->mm,
162 (unsigned long)buf_priv->virtual,
163 (size_t) buf->total); 162 (size_t) buf->total);
164 up_write(&current->mm->mmap_sem);
165 163
166 buf_priv->currently_mapped = I810_BUF_UNMAPPED; 164 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
167 buf_priv->virtual = NULL; 165 buf_priv->virtual = NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4ab57fd752dc..44a5f241b1a0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1021,11 +1021,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1021 if (obj == NULL) 1021 if (obj == NULL)
1022 return -ENOENT; 1022 return -ENOENT;
1023 1023
1024 down_write(&current->mm->mmap_sem); 1024 addr = vm_mmap(obj->filp, 0, args->size,
1025 addr = do_mmap(obj->filp, 0, args->size,
1026 PROT_READ | PROT_WRITE, MAP_SHARED, 1025 PROT_READ | PROT_WRITE, MAP_SHARED,
1027 args->offset); 1026 args->offset);
1028 up_write(&current->mm->mmap_sem);
1029 drm_gem_object_unreference_unlocked(obj); 1027 drm_gem_object_unreference_unlocked(obj);
1030 if (IS_ERR((void *)addr)) 1028 if (IS_ERR((void *)addr))
1031 return addr; 1029 return addr;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a46ed26464f4..206b9bbe6979 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1121,6 +1121,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1121 return -EINVAL; 1121 return -EINVAL;
1122 } 1122 }
1123 1123
1124 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1125 DRM_DEBUG("execbuf with %u cliprects\n",
1126 args->num_cliprects);
1127 return -EINVAL;
1128 }
1129
1124 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), 1130 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
1125 GFP_KERNEL); 1131 GFP_KERNEL);
1126 if (cliprects == NULL) { 1132 if (cliprects == NULL) {
@@ -1393,7 +1399,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1393 struct drm_i915_gem_exec_object2 *exec2_list = NULL; 1399 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1394 int ret; 1400 int ret;
1395 1401
1396 if (args->buffer_count < 1) { 1402 if (args->buffer_count < 1 ||
1403 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1397 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); 1404 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1398 return -EINVAL; 1405 return -EINVAL;
1399 } 1406 }
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 0976137ab79a..417ca99e697d 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -449,8 +449,8 @@ intel_crt_detect(struct drm_connector *connector, bool force)
449{ 449{
450 struct drm_device *dev = connector->dev; 450 struct drm_device *dev = connector->dev;
451 struct intel_crt *crt = intel_attached_crt(connector); 451 struct intel_crt *crt = intel_attached_crt(connector);
452 struct drm_crtc *crtc;
453 enum drm_connector_status status; 452 enum drm_connector_status status;
453 struct intel_load_detect_pipe tmp;
454 454
455 if (I915_HAS_HOTPLUG(dev)) { 455 if (I915_HAS_HOTPLUG(dev)) {
456 if (intel_crt_detect_hotplug(connector)) { 456 if (intel_crt_detect_hotplug(connector)) {
@@ -469,23 +469,16 @@ intel_crt_detect(struct drm_connector *connector, bool force)
469 return connector->status; 469 return connector->status;
470 470
471 /* for pre-945g platforms use load detect */ 471 /* for pre-945g platforms use load detect */
472 crtc = crt->base.base.crtc; 472 if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
473 if (crtc && crtc->enabled) { 473 &tmp)) {
474 status = intel_crt_load_detect(crt); 474 if (intel_crt_detect_ddc(connector))
475 } else { 475 status = connector_status_connected;
476 struct intel_load_detect_pipe tmp; 476 else
477 477 status = intel_crt_load_detect(crt);
478 if (intel_get_load_detect_pipe(&crt->base, connector, NULL, 478 intel_release_load_detect_pipe(&crt->base, connector,
479 &tmp)) { 479 &tmp);
480 if (intel_crt_detect_ddc(connector)) 480 } else
481 status = connector_status_connected; 481 status = connector_status_unknown;
482 else
483 status = intel_crt_load_detect(crt);
484 intel_release_load_detect_pipe(&crt->base, connector,
485 &tmp);
486 } else
487 status = connector_status_unknown;
488 }
489 482
490 return status; 483 return status;
491} 484}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e20f8042fddd..6b4139064f9c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3150,8 +3150,11 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3150 return false; 3150 return false;
3151 } 3151 }
3152 3152
3153 /* All interlaced capable intel hw wants timings in frames. */ 3153 /* All interlaced capable intel hw wants timings in frames. Note though
3154 drm_mode_set_crtcinfo(adjusted_mode, 0); 3154 * that intel_lvds_mode_fixup does some funny tricks with the crtc
3155 * timings, so we need to be careful not to clobber these.*/
3156 if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3157 drm_mode_set_crtcinfo(adjusted_mode, 0);
3155 3158
3156 return true; 3159 return true;
3157} 3160}
@@ -5441,9 +5444,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
5441 struct drm_device *dev = crtc->dev; 5444 struct drm_device *dev = crtc->dev;
5442 drm_i915_private_t *dev_priv = dev->dev_private; 5445 drm_i915_private_t *dev_priv = dev->dev_private;
5443 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5446 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5444 int pipe = intel_crtc->pipe;
5445 int dpll_reg = DPLL(pipe);
5446 int dpll = I915_READ(dpll_reg);
5447 5447
5448 if (HAS_PCH_SPLIT(dev)) 5448 if (HAS_PCH_SPLIT(dev))
5449 return; 5449 return;
@@ -5456,10 +5456,15 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
5456 * the manual case. 5456 * the manual case.
5457 */ 5457 */
5458 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { 5458 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
5459 int pipe = intel_crtc->pipe;
5460 int dpll_reg = DPLL(pipe);
5461 int dpll;
5462
5459 DRM_DEBUG_DRIVER("downclocking LVDS\n"); 5463 DRM_DEBUG_DRIVER("downclocking LVDS\n");
5460 5464
5461 assert_panel_unlocked(dev_priv, pipe); 5465 assert_panel_unlocked(dev_priv, pipe);
5462 5466
5467 dpll = I915_READ(dpll_reg);
5463 dpll |= DISPLAY_RATE_SELECT_FPA1; 5468 dpll |= DISPLAY_RATE_SELECT_FPA1;
5464 I915_WRITE(dpll_reg, dpll); 5469 I915_WRITE(dpll_reg, dpll);
5465 intel_wait_for_vblank(dev, pipe); 5470 intel_wait_for_vblank(dev, pipe);
@@ -5854,7 +5859,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
5854 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); 5859 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
5855 intel_ring_emit(ring, obj->gtt_offset); 5860 intel_ring_emit(ring, obj->gtt_offset);
5856 5861
5857 pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; 5862 /* Contrary to the suggestions in the documentation,
5863 * "Enable Panel Fitter" does not seem to be required when page
5864 * flipping with a non-native mode, and worse causes a normal
5865 * modeset to fail.
5866 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
5867 */
5868 pf = 0;
5858 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 5869 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
5859 intel_ring_emit(ring, pf | pipesrc); 5870 intel_ring_emit(ring, pf | pipesrc);
5860 intel_ring_advance(ring); 5871 intel_ring_advance(ring);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cfec4842e0c4..e5ee166e2faf 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -117,6 +117,10 @@
117#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0) 117#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
118#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT) 118#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
119#define INTEL_MODE_DP_FORCE_6BPC (0x10) 119#define INTEL_MODE_DP_FORCE_6BPC (0x10)
120/* This flag must be set by the encoder's mode_fixup if it changes the crtc
121 * timings in the mode to prevent the crtc fixup from overwriting them.
122 * Currently only lvds needs that. */
123#define INTEL_MODE_CRTC_TIMINGS_SET (0x20)
120 124
121static inline void 125static inline void
122intel_mode_set_pixel_multiplier(struct drm_display_mode *mode, 126intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 71ef2896be96..bf8690720a0c 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -279,6 +279,8 @@ void intel_fb_restore_mode(struct drm_device *dev)
279 struct drm_mode_config *config = &dev->mode_config; 279 struct drm_mode_config *config = &dev->mode_config;
280 struct drm_plane *plane; 280 struct drm_plane *plane;
281 281
282 mutex_lock(&dev->mode_config.mutex);
283
282 ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); 284 ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
283 if (ret) 285 if (ret)
284 DRM_DEBUG("failed to restore crtc mode\n"); 286 DRM_DEBUG("failed to restore crtc mode\n");
@@ -286,4 +288,6 @@ void intel_fb_restore_mode(struct drm_device *dev)
286 /* Be sure to shut off any planes that may be active */ 288 /* Be sure to shut off any planes that may be active */
287 list_for_each_entry(plane, &config->plane_list, head) 289 list_for_each_entry(plane, &config->plane_list, head)
288 plane->funcs->disable_plane(plane); 290 plane->funcs->disable_plane(plane);
291
292 mutex_unlock(&dev->mode_config.mutex);
289} 293}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 8d2501704182..bf218753cbaf 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -136,7 +136,7 @@ static void i9xx_write_infoframe(struct drm_encoder *encoder,
136 136
137 val &= ~VIDEO_DIP_SELECT_MASK; 137 val &= ~VIDEO_DIP_SELECT_MASK;
138 138
139 I915_WRITE(VIDEO_DIP_CTL, val | port | flags); 139 I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
140 140
141 for (i = 0; i < len; i += 4) { 141 for (i = 0; i < len; i += 4) {
142 I915_WRITE(VIDEO_DIP_DATA, *data); 142 I915_WRITE(VIDEO_DIP_DATA, *data);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 17a4630cec8a..9dee82350def 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -187,6 +187,8 @@ centre_horizontally(struct drm_display_mode *mode,
187 187
188 mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos; 188 mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
189 mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width; 189 mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
190
191 mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
190} 192}
191 193
192static void 194static void
@@ -208,6 +210,8 @@ centre_vertically(struct drm_display_mode *mode,
208 210
209 mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos; 211 mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
210 mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width; 212 mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
213
214 mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
211} 215}
212 216
213static inline u32 panel_fitter_scaling(u32 source, u32 target) 217static inline u32 panel_fitter_scaling(u32 source, u32 target)
@@ -283,6 +287,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
283 for_each_pipe(pipe) 287 for_each_pipe(pipe)
284 I915_WRITE(BCLRPAT(pipe), 0); 288 I915_WRITE(BCLRPAT(pipe), 0);
285 289
290 drm_mode_set_crtcinfo(adjusted_mode, 0);
291
286 switch (intel_lvds->fitting_mode) { 292 switch (intel_lvds->fitting_mode) {
287 case DRM_MODE_SCALE_CENTER: 293 case DRM_MODE_SCALE_CENTER:
288 /* 294 /*
@@ -744,7 +750,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
744 .ident = "Hewlett-Packard t5745", 750 .ident = "Hewlett-Packard t5745",
745 .matches = { 751 .matches = {
746 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), 752 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
747 DMI_MATCH(DMI_BOARD_NAME, "hp t5745"), 753 DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"),
748 }, 754 },
749 }, 755 },
750 { 756 {
@@ -752,7 +758,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
752 .ident = "Hewlett-Packard st5747", 758 .ident = "Hewlett-Packard st5747",
753 .matches = { 759 .matches = {
754 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), 760 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
755 DMI_MATCH(DMI_BOARD_NAME, "hp st5747"), 761 DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"),
756 }, 762 },
757 }, 763 },
758 { 764 {
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index cad45ff8251b..2b2e011e9055 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -50,8 +50,6 @@ intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
50 adjusted_mode->vtotal = fixed_mode->vtotal; 50 adjusted_mode->vtotal = fixed_mode->vtotal;
51 51
52 adjusted_mode->clock = fixed_mode->clock; 52 adjusted_mode->clock = fixed_mode->clock;
53
54 drm_mode_set_crtcinfo(adjusted_mode, 0);
55} 53}
56 54
57/* adjusted_mode has been preset to be the panel's fixed mode */ 55/* adjusted_mode has been preset to be the panel's fixed mode */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 3aabe8dfe5c5..b59b6d5b7583 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -414,6 +414,16 @@ static int init_render_ring(struct intel_ring_buffer *ring)
414 return ret; 414 return ret;
415 } 415 }
416 416
417 if (IS_GEN6(dev)) {
418 /* From the Sandybridge PRM, volume 1 part 3, page 24:
419 * "If this bit is set, STCunit will have LRA as replacement
420 * policy. [...] This bit must be reset. LRA replacement
421 * policy is not supported."
422 */
423 I915_WRITE(CACHE_MODE_0,
424 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
425 }
426
417 if (INTEL_INFO(dev)->gen >= 6) 427 if (INTEL_INFO(dev)->gen >= 6)
418 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 428 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
419 429
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 9b3a5f999ad7..7d3f238e8265 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -745,6 +745,7 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
745 uint16_t width, height; 745 uint16_t width, height;
746 uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; 746 uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
747 uint16_t h_sync_offset, v_sync_offset; 747 uint16_t h_sync_offset, v_sync_offset;
748 int mode_clock;
748 749
749 width = mode->hdisplay; 750 width = mode->hdisplay;
750 height = mode->vdisplay; 751 height = mode->vdisplay;
@@ -759,7 +760,11 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
759 h_sync_offset = mode->hsync_start - mode->hdisplay; 760 h_sync_offset = mode->hsync_start - mode->hdisplay;
760 v_sync_offset = mode->vsync_start - mode->vdisplay; 761 v_sync_offset = mode->vsync_start - mode->vdisplay;
761 762
762 dtd->part1.clock = mode->clock / 10; 763 mode_clock = mode->clock;
764 mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
765 mode_clock /= 10;
766 dtd->part1.clock = mode_clock;
767
763 dtd->part1.h_active = width & 0xff; 768 dtd->part1.h_active = width & 0xff;
764 dtd->part1.h_blank = h_blank_len & 0xff; 769 dtd->part1.h_blank = h_blank_len & 0xff;
765 dtd->part1.h_high = (((width >> 8) & 0xf) << 4) | 770 dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
@@ -1010,7 +1015,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1010 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); 1015 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
1011 u32 sdvox; 1016 u32 sdvox;
1012 struct intel_sdvo_in_out_map in_out; 1017 struct intel_sdvo_in_out_map in_out;
1013 struct intel_sdvo_dtd input_dtd; 1018 struct intel_sdvo_dtd input_dtd, output_dtd;
1014 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 1019 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
1015 int rate; 1020 int rate;
1016 1021
@@ -1035,20 +1040,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1035 intel_sdvo->attached_output)) 1040 intel_sdvo->attached_output))
1036 return; 1041 return;
1037 1042
1038 /* We have tried to get input timing in mode_fixup, and filled into 1043 /* lvds has a special fixed output timing. */
1039 * adjusted_mode. 1044 if (intel_sdvo->is_lvds)
1040 */ 1045 intel_sdvo_get_dtd_from_mode(&output_dtd,
1041 if (intel_sdvo->is_tv || intel_sdvo->is_lvds) { 1046 intel_sdvo->sdvo_lvds_fixed_mode);
1042 input_dtd = intel_sdvo->input_dtd; 1047 else
1043 } else { 1048 intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
1044 /* Set the output timing to the screen */ 1049 (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd);
1045 if (!intel_sdvo_set_target_output(intel_sdvo,
1046 intel_sdvo->attached_output))
1047 return;
1048
1049 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1050 (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
1051 }
1052 1050
1053 /* Set the input timing to the screen. Assume always input 0. */ 1051 /* Set the input timing to the screen. Assume always input 0. */
1054 if (!intel_sdvo_set_target_input(intel_sdvo)) 1052 if (!intel_sdvo_set_target_input(intel_sdvo))
@@ -1066,6 +1064,10 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1066 !intel_sdvo_set_tv_format(intel_sdvo)) 1064 !intel_sdvo_set_tv_format(intel_sdvo))
1067 return; 1065 return;
1068 1066
1067 /* We have tried to get input timing in mode_fixup, and filled into
1068 * adjusted_mode.
1069 */
1070 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1069 (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); 1071 (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
1070 1072
1071 switch (pixel_multiplier) { 1073 switch (pixel_multiplier) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 7814a760c164..284bd25d5d21 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -270,7 +270,7 @@ static bool nouveau_dsm_detect(void)
270 struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; 270 struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
271 struct pci_dev *pdev = NULL; 271 struct pci_dev *pdev = NULL;
272 int has_dsm = 0; 272 int has_dsm = 0;
273 int has_optimus; 273 int has_optimus = 0;
274 int vga_count = 0; 274 int vga_count = 0;
275 bool guid_valid; 275 bool guid_valid;
276 int retval; 276 int retval;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 80963d05b54a..0be4a815e706 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6156,10 +6156,14 @@ dcb_fake_connectors(struct nvbios *bios)
6156 6156
6157 /* heuristic: if we ever get a non-zero connector field, assume 6157 /* heuristic: if we ever get a non-zero connector field, assume
6158 * that all the indices are valid and we don't need fake them. 6158 * that all the indices are valid and we don't need fake them.
6159 *
6160 * and, as usual, a blacklist of boards with bad bios data..
6159 */ 6161 */
6160 for (i = 0; i < dcbt->entries; i++) { 6162 if (!nv_match_device(bios->dev, 0x0392, 0x107d, 0x20a2)) {
6161 if (dcbt->entry[i].connector) 6163 for (i = 0; i < dcbt->entries; i++) {
6162 return; 6164 if (dcbt->entry[i].connector)
6165 return;
6166 }
6163 } 6167 }
6164 6168
6165 /* no useful connector info available, we need to make it up 6169 /* no useful connector info available, we need to make it up
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
index 59ea1c14eca0..c3de36384522 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hdmi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
@@ -32,7 +32,9 @@ static bool
32hdmi_sor(struct drm_encoder *encoder) 32hdmi_sor(struct drm_encoder *encoder)
33{ 33{
34 struct drm_nouveau_private *dev_priv = encoder->dev->dev_private; 34 struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
35 if (dev_priv->chipset < 0xa3) 35 if (dev_priv->chipset < 0xa3 ||
36 dev_priv->chipset == 0xaa ||
37 dev_priv->chipset == 0xac)
36 return false; 38 return false;
37 return true; 39 return true;
38} 40}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 34d591b7d4ef..da3e7c3abab7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -235,6 +235,7 @@ nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
235 return -EPERM; 235 return -EPERM;
236 236
237 strncpy(string, profile, sizeof(string)); 237 strncpy(string, profile, sizeof(string));
238 string[sizeof(string) - 1] = 0;
238 if ((ptr = strchr(string, '\n'))) 239 if ((ptr = strchr(string, '\n')))
239 *ptr = '\0'; 240 *ptr = '\0';
240 241
diff --git a/drivers/gpu/drm/nouveau/nv10_gpio.c b/drivers/gpu/drm/nouveau/nv10_gpio.c
index 550ad3fcf0af..9d79180069df 100644
--- a/drivers/gpu/drm/nouveau/nv10_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv10_gpio.c
@@ -65,7 +65,7 @@ nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out)
65 if (line < 10) { 65 if (line < 10) {
66 line = (line - 2) * 4; 66 line = (line - 2) * 4;
67 reg = NV_PCRTC_GPIO_EXT; 67 reg = NV_PCRTC_GPIO_EXT;
68 mask = 0x00000003 << ((line - 2) * 4); 68 mask = 0x00000003;
69 data = (dir << 1) | out; 69 data = (dir << 1) | out;
70 } else 70 } else
71 if (line < 14) { 71 if (line < 14) {
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index a7844ab6a50c..274640212475 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -42,7 +42,7 @@ nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_entry *dcb, u8 lane)
42 struct drm_nouveau_private *dev_priv = dev->dev_private; 42 struct drm_nouveau_private *dev_priv = dev->dev_private;
43 static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */ 43 static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
44 static const u8 nv50[] = { 16, 8, 0, 24 }; 44 static const u8 nv50[] = { 16, 8, 0, 24 };
45 if (dev_priv->card_type == 0xaf) 45 if (dev_priv->chipset == 0xaf)
46 return nvaf[lane]; 46 return nvaf[lane];
47 return nv50[lane]; 47 return nv50[lane];
48} 48}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c
index 5bf55038fd92..f704e942372e 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fb.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fb.c
@@ -54,6 +54,11 @@ nvc0_mfb_isr(struct drm_device *dev)
54 nvc0_mfb_subp_isr(dev, unit, subp); 54 nvc0_mfb_subp_isr(dev, unit, subp);
55 units &= ~(1 << unit); 55 units &= ~(1 << unit);
56 } 56 }
57
58 /* we do something horribly wrong and upset PMFB a lot, so mask off
59 * interrupts from it after the first one until it's fixed
60 */
61 nv_mask(dev, 0x000640, 0x02000000, 0x00000000);
57} 62}
58 63
59static void 64static void
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 2fab38f5a08e..01d77d1554f4 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -575,6 +575,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
575 575
576 if (rdev->family < CHIP_RV770) 576 if (rdev->family < CHIP_RV770)
577 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; 577 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
578 /* use frac fb div on APUs */
579 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
580 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
578 } else { 581 } else {
579 pll->flags |= RADEON_PLL_LEGACY; 582 pll->flags |= RADEON_PLL_LEGACY;
580 583
@@ -954,8 +957,8 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
954 break; 957 break;
955 } 958 }
956 959
957 if (radeon_encoder->active_device & 960 if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
958 (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) { 961 (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
959 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 962 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
960 struct drm_connector *connector = 963 struct drm_connector *connector =
961 radeon_get_connector_for_encoder(encoder); 964 radeon_get_connector_for_encoder(encoder);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index eed7acefb492..ecc29bc1cbe3 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2424,27 +2424,18 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
2424 u32 srbm_status; 2424 u32 srbm_status;
2425 u32 grbm_status; 2425 u32 grbm_status;
2426 u32 grbm_status_se0, grbm_status_se1; 2426 u32 grbm_status_se0, grbm_status_se1;
2427 struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
2428 int r;
2429 2427
2430 srbm_status = RREG32(SRBM_STATUS); 2428 srbm_status = RREG32(SRBM_STATUS);
2431 grbm_status = RREG32(GRBM_STATUS); 2429 grbm_status = RREG32(GRBM_STATUS);
2432 grbm_status_se0 = RREG32(GRBM_STATUS_SE0); 2430 grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
2433 grbm_status_se1 = RREG32(GRBM_STATUS_SE1); 2431 grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
2434 if (!(grbm_status & GUI_ACTIVE)) { 2432 if (!(grbm_status & GUI_ACTIVE)) {
2435 r100_gpu_lockup_update(lockup, ring); 2433 radeon_ring_lockup_update(ring);
2436 return false; 2434 return false;
2437 } 2435 }
2438 /* force CP activities */ 2436 /* force CP activities */
2439 r = radeon_ring_lock(rdev, ring, 2); 2437 radeon_ring_force_activity(rdev, ring);
2440 if (!r) { 2438 return radeon_ring_test_lockup(rdev, ring);
2441 /* PACKET2 NOP */
2442 radeon_ring_write(ring, 0x80000000);
2443 radeon_ring_write(ring, 0x80000000);
2444 radeon_ring_unlock_commit(rdev, ring);
2445 }
2446 ring->rptr = RREG32(CP_RB_RPTR);
2447 return r100_gpu_cp_is_lockup(rdev, lockup, ring);
2448} 2439}
2449 2440
2450static int evergreen_gpu_soft_reset(struct radeon_device *rdev) 2441static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@ -3376,12 +3367,9 @@ static int evergreen_startup(struct radeon_device *rdev)
3376 if (r) 3367 if (r)
3377 return r; 3368 return r;
3378 3369
3379 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 3370 r = radeon_ib_ring_tests(rdev);
3380 if (r) { 3371 if (r)
3381 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
3382 rdev->accel_working = false;
3383 return r; 3372 return r;
3384 }
3385 3373
3386 r = r600_audio_init(rdev); 3374 r = r600_audio_init(rdev);
3387 if (r) { 3375 if (r) {
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index a48ca53fcd6a..9cd2657eb2ca 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1392,35 +1392,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
1392 return 0; 1392 return 0;
1393} 1393}
1394 1394
1395bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1396{
1397 u32 srbm_status;
1398 u32 grbm_status;
1399 u32 grbm_status_se0, grbm_status_se1;
1400 struct r100_gpu_lockup *lockup = &rdev->config.cayman.lockup;
1401 int r;
1402
1403 srbm_status = RREG32(SRBM_STATUS);
1404 grbm_status = RREG32(GRBM_STATUS);
1405 grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
1406 grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
1407 if (!(grbm_status & GUI_ACTIVE)) {
1408 r100_gpu_lockup_update(lockup, ring);
1409 return false;
1410 }
1411 /* force CP activities */
1412 r = radeon_ring_lock(rdev, ring, 2);
1413 if (!r) {
1414 /* PACKET2 NOP */
1415 radeon_ring_write(ring, 0x80000000);
1416 radeon_ring_write(ring, 0x80000000);
1417 radeon_ring_unlock_commit(rdev, ring);
1418 }
1419 /* XXX deal with CP0,1,2 */
1420 ring->rptr = RREG32(ring->rptr_reg);
1421 return r100_gpu_cp_is_lockup(rdev, lockup, ring);
1422}
1423
1424static int cayman_gpu_soft_reset(struct radeon_device *rdev) 1395static int cayman_gpu_soft_reset(struct radeon_device *rdev)
1425{ 1396{
1426 struct evergreen_mc_save save; 1397 struct evergreen_mc_save save;
@@ -1601,12 +1572,9 @@ static int cayman_startup(struct radeon_device *rdev)
1601 if (r) 1572 if (r)
1602 return r; 1573 return r;
1603 1574
1604 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1575 r = radeon_ib_ring_tests(rdev);
1605 if (r) { 1576 if (r)
1606 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
1607 rdev->accel_working = false;
1608 return r; 1577 return r;
1609 }
1610 1578
1611 r = radeon_vm_manager_start(rdev); 1579 r = radeon_vm_manager_start(rdev);
1612 if (r) 1580 if (r)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index cb1141854282..ad6ceb731713 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -660,7 +660,7 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
660 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; 660 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
661 WREG32(RADEON_AIC_CNTL, tmp); 661 WREG32(RADEON_AIC_CNTL, tmp);
662 r100_pci_gart_tlb_flush(rdev); 662 r100_pci_gart_tlb_flush(rdev);
663 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 663 DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n",
664 (unsigned)(rdev->mc.gtt_size >> 20), 664 (unsigned)(rdev->mc.gtt_size >> 20),
665 (unsigned long long)rdev->gart.table_addr); 665 (unsigned long long)rdev->gart.table_addr);
666 rdev->gart.ready = true; 666 rdev->gart.ready = true;
@@ -2159,79 +2159,18 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
2159 return -1; 2159 return -1;
2160} 2160}
2161 2161
2162void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
2163{
2164 lockup->last_cp_rptr = ring->rptr;
2165 lockup->last_jiffies = jiffies;
2166}
2167
2168/**
2169 * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
2170 * @rdev: radeon device structure
2171 * @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
2172 * @cp: radeon_cp structure holding CP information
2173 *
2174 * We don't need to initialize the lockup tracking information as we will either
2175 * have CP rptr to a different value of jiffies wrap around which will force
2176 * initialization of the lockup tracking informations.
2177 *
2178 * A possible false positivie is if we get call after while and last_cp_rptr ==
2179 * the current CP rptr, even if it's unlikely it might happen. To avoid this
2180 * if the elapsed time since last call is bigger than 2 second than we return
2181 * false and update the tracking information. Due to this the caller must call
2182 * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
2183 * the fencing code should be cautious about that.
2184 *
2185 * Caller should write to the ring to force CP to do something so we don't get
2186 * false positive when CP is just gived nothing to do.
2187 *
2188 **/
2189bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
2190{
2191 unsigned long cjiffies, elapsed;
2192
2193 cjiffies = jiffies;
2194 if (!time_after(cjiffies, lockup->last_jiffies)) {
2195 /* likely a wrap around */
2196 lockup->last_cp_rptr = ring->rptr;
2197 lockup->last_jiffies = jiffies;
2198 return false;
2199 }
2200 if (ring->rptr != lockup->last_cp_rptr) {
2201 /* CP is still working no lockup */
2202 lockup->last_cp_rptr = ring->rptr;
2203 lockup->last_jiffies = jiffies;
2204 return false;
2205 }
2206 elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
2207 if (elapsed >= 10000) {
2208 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
2209 return true;
2210 }
2211 /* give a chance to the GPU ... */
2212 return false;
2213}
2214
2215bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 2162bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2216{ 2163{
2217 u32 rbbm_status; 2164 u32 rbbm_status;
2218 int r;
2219 2165
2220 rbbm_status = RREG32(R_000E40_RBBM_STATUS); 2166 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
2221 if (!G_000E40_GUI_ACTIVE(rbbm_status)) { 2167 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
2222 r100_gpu_lockup_update(&rdev->config.r100.lockup, ring); 2168 radeon_ring_lockup_update(ring);
2223 return false; 2169 return false;
2224 } 2170 }
2225 /* force CP activities */ 2171 /* force CP activities */
2226 r = radeon_ring_lock(rdev, ring, 2); 2172 radeon_ring_force_activity(rdev, ring);
2227 if (!r) { 2173 return radeon_ring_test_lockup(rdev, ring);
2228 /* PACKET2 NOP */
2229 radeon_ring_write(ring, 0x80000000);
2230 radeon_ring_write(ring, 0x80000000);
2231 radeon_ring_unlock_commit(rdev, ring);
2232 }
2233 ring->rptr = RREG32(ring->rptr_reg);
2234 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, ring);
2235} 2174}
2236 2175
2237void r100_bm_disable(struct radeon_device *rdev) 2176void r100_bm_disable(struct radeon_device *rdev)
@@ -2300,7 +2239,6 @@ int r100_asic_reset(struct radeon_device *rdev)
2300 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) || 2239 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
2301 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { 2240 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
2302 dev_err(rdev->dev, "failed to reset GPU\n"); 2241 dev_err(rdev->dev, "failed to reset GPU\n");
2303 rdev->gpu_lockup = true;
2304 ret = -1; 2242 ret = -1;
2305 } else 2243 } else
2306 dev_info(rdev->dev, "GPU reset succeed\n"); 2244 dev_info(rdev->dev, "GPU reset succeed\n");
@@ -3969,12 +3907,9 @@ static int r100_startup(struct radeon_device *rdev)
3969 if (r) 3907 if (r)
3970 return r; 3908 return r;
3971 3909
3972 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 3910 r = radeon_ib_ring_tests(rdev);
3973 if (r) { 3911 if (r)
3974 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
3975 rdev->accel_working = false;
3976 return r; 3912 return r;
3977 }
3978 3913
3979 return 0; 3914 return 0;
3980} 3915}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index fa14383f9ca0..6419a5900e67 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -377,28 +377,6 @@ void r300_gpu_init(struct radeon_device *rdev)
377 rdev->num_gb_pipes, rdev->num_z_pipes); 377 rdev->num_gb_pipes, rdev->num_z_pipes);
378} 378}
379 379
380bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
381{
382 u32 rbbm_status;
383 int r;
384
385 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
386 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
387 r100_gpu_lockup_update(&rdev->config.r300.lockup, ring);
388 return false;
389 }
390 /* force CP activities */
391 r = radeon_ring_lock(rdev, ring, 2);
392 if (!r) {
393 /* PACKET2 NOP */
394 radeon_ring_write(ring, 0x80000000);
395 radeon_ring_write(ring, 0x80000000);
396 radeon_ring_unlock_commit(rdev, ring);
397 }
398 ring->rptr = RREG32(RADEON_CP_RB_RPTR);
399 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, ring);
400}
401
402int r300_asic_reset(struct radeon_device *rdev) 380int r300_asic_reset(struct radeon_device *rdev)
403{ 381{
404 struct r100_mc_save save; 382 struct r100_mc_save save;
@@ -449,7 +427,6 @@ int r300_asic_reset(struct radeon_device *rdev)
449 /* Check if GPU is idle */ 427 /* Check if GPU is idle */
450 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 428 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
451 dev_err(rdev->dev, "failed to reset GPU\n"); 429 dev_err(rdev->dev, "failed to reset GPU\n");
452 rdev->gpu_lockup = true;
453 ret = -1; 430 ret = -1;
454 } else 431 } else
455 dev_info(rdev->dev, "GPU reset succeed\n"); 432 dev_info(rdev->dev, "GPU reset succeed\n");
@@ -1418,12 +1395,9 @@ static int r300_startup(struct radeon_device *rdev)
1418 if (r) 1395 if (r)
1419 return r; 1396 return r;
1420 1397
1421 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1398 r = radeon_ib_ring_tests(rdev);
1422 if (r) { 1399 if (r)
1423 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
1424 rdev->accel_working = false;
1425 return r; 1400 return r;
1426 }
1427 1401
1428 return 0; 1402 return 0;
1429} 1403}
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index f3fcaacfea01..99137be7a300 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -279,12 +279,9 @@ static int r420_startup(struct radeon_device *rdev)
279 if (r) 279 if (r)
280 return r; 280 return r;
281 281
282 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 282 r = radeon_ib_ring_tests(rdev);
283 if (r) { 283 if (r)
284 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
285 rdev->accel_working = false;
286 return r; 284 return r;
287 }
288 285
289 return 0; 286 return 0;
290} 287}
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index ebcc15b03c9f..b5cf8375cd25 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -207,12 +207,10 @@ static int r520_startup(struct radeon_device *rdev)
207 if (r) 207 if (r)
208 return r; 208 return r;
209 209
210 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 210 r = radeon_ib_ring_tests(rdev);
211 if (r) { 211 if (r)
212 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
213 rdev->accel_working = false;
214 return r; 212 return r;
215 } 213
216 return 0; 214 return 0;
217} 215}
218 216
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 222245d0138a..d02f13fdaa66 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1135,7 +1135,7 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc
1135 } 1135 }
1136 if (rdev->flags & RADEON_IS_AGP) { 1136 if (rdev->flags & RADEON_IS_AGP) {
1137 size_bf = mc->gtt_start; 1137 size_bf = mc->gtt_start;
1138 size_af = 0xFFFFFFFF - mc->gtt_end + 1; 1138 size_af = 0xFFFFFFFF - mc->gtt_end;
1139 if (size_bf > size_af) { 1139 if (size_bf > size_af) {
1140 if (mc->mc_vram_size > size_bf) { 1140 if (mc->mc_vram_size > size_bf) {
1141 dev_warn(rdev->dev, "limiting VRAM\n"); 1141 dev_warn(rdev->dev, "limiting VRAM\n");
@@ -1149,7 +1149,7 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc
1149 mc->real_vram_size = size_af; 1149 mc->real_vram_size = size_af;
1150 mc->mc_vram_size = size_af; 1150 mc->mc_vram_size = size_af;
1151 } 1151 }
1152 mc->vram_start = mc->gtt_end; 1152 mc->vram_start = mc->gtt_end + 1;
1153 } 1153 }
1154 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 1154 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1155 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", 1155 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
@@ -1350,31 +1350,17 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1350 u32 srbm_status; 1350 u32 srbm_status;
1351 u32 grbm_status; 1351 u32 grbm_status;
1352 u32 grbm_status2; 1352 u32 grbm_status2;
1353 struct r100_gpu_lockup *lockup;
1354 int r;
1355
1356 if (rdev->family >= CHIP_RV770)
1357 lockup = &rdev->config.rv770.lockup;
1358 else
1359 lockup = &rdev->config.r600.lockup;
1360 1353
1361 srbm_status = RREG32(R_000E50_SRBM_STATUS); 1354 srbm_status = RREG32(R_000E50_SRBM_STATUS);
1362 grbm_status = RREG32(R_008010_GRBM_STATUS); 1355 grbm_status = RREG32(R_008010_GRBM_STATUS);
1363 grbm_status2 = RREG32(R_008014_GRBM_STATUS2); 1356 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1364 if (!G_008010_GUI_ACTIVE(grbm_status)) { 1357 if (!G_008010_GUI_ACTIVE(grbm_status)) {
1365 r100_gpu_lockup_update(lockup, ring); 1358 radeon_ring_lockup_update(ring);
1366 return false; 1359 return false;
1367 } 1360 }
1368 /* force CP activities */ 1361 /* force CP activities */
1369 r = radeon_ring_lock(rdev, ring, 2); 1362 radeon_ring_force_activity(rdev, ring);
1370 if (!r) { 1363 return radeon_ring_test_lockup(rdev, ring);
1371 /* PACKET2 NOP */
1372 radeon_ring_write(ring, 0x80000000);
1373 radeon_ring_write(ring, 0x80000000);
1374 radeon_ring_unlock_commit(rdev, ring);
1375 }
1376 ring->rptr = RREG32(ring->rptr_reg);
1377 return r100_gpu_cp_is_lockup(rdev, lockup, ring);
1378} 1364}
1379 1365
1380int r600_asic_reset(struct radeon_device *rdev) 1366int r600_asic_reset(struct radeon_device *rdev)
@@ -2494,12 +2480,9 @@ int r600_startup(struct radeon_device *rdev)
2494 if (r) 2480 if (r)
2495 return r; 2481 return r;
2496 2482
2497 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 2483 r = radeon_ib_ring_tests(rdev);
2498 if (r) { 2484 if (r)
2499 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
2500 rdev->accel_working = false;
2501 return r; 2485 return r;
2502 }
2503 2486
2504 return 0; 2487 return 0;
2505} 2488}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 610acee74a3d..82ffa6a05cc6 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -94,6 +94,7 @@ extern int radeon_disp_priority;
94extern int radeon_hw_i2c; 94extern int radeon_hw_i2c;
95extern int radeon_pcie_gen2; 95extern int radeon_pcie_gen2;
96extern int radeon_msi; 96extern int radeon_msi;
97extern int radeon_lockup_timeout;
97 98
98/* 99/*
99 * Copy from radeon_drv.h so we don't have to include both and have conflicting 100 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -255,10 +256,8 @@ struct radeon_fence_driver {
255 volatile uint32_t *cpu_addr; 256 volatile uint32_t *cpu_addr;
256 atomic_t seq; 257 atomic_t seq;
257 uint32_t last_seq; 258 uint32_t last_seq;
258 unsigned long last_jiffies; 259 unsigned long last_activity;
259 unsigned long last_timeout;
260 wait_queue_head_t queue; 260 wait_queue_head_t queue;
261 struct list_head created;
262 struct list_head emitted; 261 struct list_head emitted;
263 struct list_head signaled; 262 struct list_head signaled;
264 bool initialized; 263 bool initialized;
@@ -286,7 +285,7 @@ void radeon_fence_process(struct radeon_device *rdev, int ring);
286bool radeon_fence_signaled(struct radeon_fence *fence); 285bool radeon_fence_signaled(struct radeon_fence *fence);
287int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); 286int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
288int radeon_fence_wait_next(struct radeon_device *rdev, int ring); 287int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
289int radeon_fence_wait_last(struct radeon_device *rdev, int ring); 288int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
290struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); 289struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
291void radeon_fence_unref(struct radeon_fence **fence); 290void radeon_fence_unref(struct radeon_fence **fence);
292int radeon_fence_count_emitted(struct radeon_device *rdev, int ring); 291int radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
@@ -462,6 +461,10 @@ void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
462 struct radeon_semaphore *semaphore); 461 struct radeon_semaphore *semaphore);
463void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, 462void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
464 struct radeon_semaphore *semaphore); 463 struct radeon_semaphore *semaphore);
464int radeon_semaphore_sync_rings(struct radeon_device *rdev,
465 struct radeon_semaphore *semaphore,
466 bool sync_to[RADEON_NUM_RINGS],
467 int dst_ring);
465void radeon_semaphore_free(struct radeon_device *rdev, 468void radeon_semaphore_free(struct radeon_device *rdev,
466 struct radeon_semaphore *semaphore); 469 struct radeon_semaphore *semaphore);
467 470
@@ -668,6 +671,8 @@ struct radeon_ring {
668 unsigned ring_size; 671 unsigned ring_size;
669 unsigned ring_free_dw; 672 unsigned ring_free_dw;
670 int count_dw; 673 int count_dw;
674 unsigned long last_activity;
675 unsigned last_rptr;
671 uint64_t gpu_addr; 676 uint64_t gpu_addr;
672 uint32_t align_mask; 677 uint32_t align_mask;
673 uint32_t ptr_mask; 678 uint32_t ptr_mask;
@@ -802,6 +807,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev);
802void radeon_ib_pool_fini(struct radeon_device *rdev); 807void radeon_ib_pool_fini(struct radeon_device *rdev);
803int radeon_ib_pool_start(struct radeon_device *rdev); 808int radeon_ib_pool_start(struct radeon_device *rdev);
804int radeon_ib_pool_suspend(struct radeon_device *rdev); 809int radeon_ib_pool_suspend(struct radeon_device *rdev);
810int radeon_ib_ring_tests(struct radeon_device *rdev);
805/* Ring access between begin & end cannot sleep */ 811/* Ring access between begin & end cannot sleep */
806int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp); 812int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
807void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); 813void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
@@ -811,6 +817,9 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
811void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp); 817void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
812void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp); 818void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
813int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); 819int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
820void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
821void radeon_ring_lockup_update(struct radeon_ring *ring);
822bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
814int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size, 823int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
815 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, 824 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
816 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop); 825 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
@@ -1162,7 +1171,6 @@ struct radeon_asic {
1162 int (*resume)(struct radeon_device *rdev); 1171 int (*resume)(struct radeon_device *rdev);
1163 int (*suspend)(struct radeon_device *rdev); 1172 int (*suspend)(struct radeon_device *rdev);
1164 void (*vga_set_state)(struct radeon_device *rdev, bool state); 1173 void (*vga_set_state)(struct radeon_device *rdev, bool state);
1165 bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
1166 int (*asic_reset)(struct radeon_device *rdev); 1174 int (*asic_reset)(struct radeon_device *rdev);
1167 /* ioctl hw specific callback. Some hw might want to perform special 1175 /* ioctl hw specific callback. Some hw might want to perform special
1168 * operation on specific ioctl. For instance on wait idle some hw 1176 * operation on specific ioctl. For instance on wait idle some hw
@@ -1191,6 +1199,7 @@ struct radeon_asic {
1191 void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp); 1199 void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
1192 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp); 1200 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1193 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp); 1201 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1202 bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
1194 } ring[RADEON_NUM_RINGS]; 1203 } ring[RADEON_NUM_RINGS];
1195 /* irqs */ 1204 /* irqs */
1196 struct { 1205 struct {
@@ -1269,16 +1278,10 @@ struct radeon_asic {
1269/* 1278/*
1270 * Asic structures 1279 * Asic structures
1271 */ 1280 */
1272struct r100_gpu_lockup {
1273 unsigned long last_jiffies;
1274 u32 last_cp_rptr;
1275};
1276
1277struct r100_asic { 1281struct r100_asic {
1278 const unsigned *reg_safe_bm; 1282 const unsigned *reg_safe_bm;
1279 unsigned reg_safe_bm_size; 1283 unsigned reg_safe_bm_size;
1280 u32 hdp_cntl; 1284 u32 hdp_cntl;
1281 struct r100_gpu_lockup lockup;
1282}; 1285};
1283 1286
1284struct r300_asic { 1287struct r300_asic {
@@ -1286,7 +1289,6 @@ struct r300_asic {
1286 unsigned reg_safe_bm_size; 1289 unsigned reg_safe_bm_size;
1287 u32 resync_scratch; 1290 u32 resync_scratch;
1288 u32 hdp_cntl; 1291 u32 hdp_cntl;
1289 struct r100_gpu_lockup lockup;
1290}; 1292};
1291 1293
1292struct r600_asic { 1294struct r600_asic {
@@ -1308,7 +1310,6 @@ struct r600_asic {
1308 unsigned tiling_group_size; 1310 unsigned tiling_group_size;
1309 unsigned tile_config; 1311 unsigned tile_config;
1310 unsigned backend_map; 1312 unsigned backend_map;
1311 struct r100_gpu_lockup lockup;
1312}; 1313};
1313 1314
1314struct rv770_asic { 1315struct rv770_asic {
@@ -1334,7 +1335,6 @@ struct rv770_asic {
1334 unsigned tiling_group_size; 1335 unsigned tiling_group_size;
1335 unsigned tile_config; 1336 unsigned tile_config;
1336 unsigned backend_map; 1337 unsigned backend_map;
1337 struct r100_gpu_lockup lockup;
1338}; 1338};
1339 1339
1340struct evergreen_asic { 1340struct evergreen_asic {
@@ -1361,7 +1361,6 @@ struct evergreen_asic {
1361 unsigned tiling_group_size; 1361 unsigned tiling_group_size;
1362 unsigned tile_config; 1362 unsigned tile_config;
1363 unsigned backend_map; 1363 unsigned backend_map;
1364 struct r100_gpu_lockup lockup;
1365}; 1364};
1366 1365
1367struct cayman_asic { 1366struct cayman_asic {
@@ -1400,7 +1399,6 @@ struct cayman_asic {
1400 unsigned multi_gpu_tile_size; 1399 unsigned multi_gpu_tile_size;
1401 1400
1402 unsigned tile_config; 1401 unsigned tile_config;
1403 struct r100_gpu_lockup lockup;
1404}; 1402};
1405 1403
1406struct si_asic { 1404struct si_asic {
@@ -1431,7 +1429,6 @@ struct si_asic {
1431 unsigned multi_gpu_tile_size; 1429 unsigned multi_gpu_tile_size;
1432 1430
1433 unsigned tile_config; 1431 unsigned tile_config;
1434 struct r100_gpu_lockup lockup;
1435}; 1432};
1436 1433
1437union radeon_asic_config { 1434union radeon_asic_config {
@@ -1547,7 +1544,6 @@ struct radeon_device {
1547 struct radeon_mutex cs_mutex; 1544 struct radeon_mutex cs_mutex;
1548 struct radeon_wb wb; 1545 struct radeon_wb wb;
1549 struct radeon_dummy_page dummy_page; 1546 struct radeon_dummy_page dummy_page;
1550 bool gpu_lockup;
1551 bool shutdown; 1547 bool shutdown;
1552 bool suspend; 1548 bool suspend;
1553 bool need_dma32; 1549 bool need_dma32;
@@ -1740,7 +1736,6 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1740#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) 1736#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
1741#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p)) 1737#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
1742#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) 1738#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
1743#define radeon_gpu_is_lockup(rdev, cp) (rdev)->asic->gpu_is_lockup((rdev), (cp))
1744#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) 1739#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
1745#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) 1740#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
1746#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p)) 1741#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
@@ -1749,6 +1744,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1749#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp)) 1744#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
1750#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib)) 1745#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
1751#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib)) 1746#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
1747#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
1752#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev)) 1748#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
1753#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev)) 1749#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
1754#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc)) 1750#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index be4dc2ff0e40..f533df5f7d50 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -134,7 +134,6 @@ static struct radeon_asic r100_asic = {
134 .suspend = &r100_suspend, 134 .suspend = &r100_suspend,
135 .resume = &r100_resume, 135 .resume = &r100_resume,
136 .vga_set_state = &r100_vga_set_state, 136 .vga_set_state = &r100_vga_set_state,
137 .gpu_is_lockup = &r100_gpu_is_lockup,
138 .asic_reset = &r100_asic_reset, 137 .asic_reset = &r100_asic_reset,
139 .ioctl_wait_idle = NULL, 138 .ioctl_wait_idle = NULL,
140 .gui_idle = &r100_gui_idle, 139 .gui_idle = &r100_gui_idle,
@@ -152,6 +151,7 @@ static struct radeon_asic r100_asic = {
152 .ring_start = &r100_ring_start, 151 .ring_start = &r100_ring_start,
153 .ring_test = &r100_ring_test, 152 .ring_test = &r100_ring_test,
154 .ib_test = &r100_ib_test, 153 .ib_test = &r100_ib_test,
154 .is_lockup = &r100_gpu_is_lockup,
155 } 155 }
156 }, 156 },
157 .irq = { 157 .irq = {
@@ -208,7 +208,6 @@ static struct radeon_asic r200_asic = {
208 .suspend = &r100_suspend, 208 .suspend = &r100_suspend,
209 .resume = &r100_resume, 209 .resume = &r100_resume,
210 .vga_set_state = &r100_vga_set_state, 210 .vga_set_state = &r100_vga_set_state,
211 .gpu_is_lockup = &r100_gpu_is_lockup,
212 .asic_reset = &r100_asic_reset, 211 .asic_reset = &r100_asic_reset,
213 .ioctl_wait_idle = NULL, 212 .ioctl_wait_idle = NULL,
214 .gui_idle = &r100_gui_idle, 213 .gui_idle = &r100_gui_idle,
@@ -226,6 +225,7 @@ static struct radeon_asic r200_asic = {
226 .ring_start = &r100_ring_start, 225 .ring_start = &r100_ring_start,
227 .ring_test = &r100_ring_test, 226 .ring_test = &r100_ring_test,
228 .ib_test = &r100_ib_test, 227 .ib_test = &r100_ib_test,
228 .is_lockup = &r100_gpu_is_lockup,
229 } 229 }
230 }, 230 },
231 .irq = { 231 .irq = {
@@ -282,7 +282,6 @@ static struct radeon_asic r300_asic = {
282 .suspend = &r300_suspend, 282 .suspend = &r300_suspend,
283 .resume = &r300_resume, 283 .resume = &r300_resume,
284 .vga_set_state = &r100_vga_set_state, 284 .vga_set_state = &r100_vga_set_state,
285 .gpu_is_lockup = &r300_gpu_is_lockup,
286 .asic_reset = &r300_asic_reset, 285 .asic_reset = &r300_asic_reset,
287 .ioctl_wait_idle = NULL, 286 .ioctl_wait_idle = NULL,
288 .gui_idle = &r100_gui_idle, 287 .gui_idle = &r100_gui_idle,
@@ -300,6 +299,7 @@ static struct radeon_asic r300_asic = {
300 .ring_start = &r300_ring_start, 299 .ring_start = &r300_ring_start,
301 .ring_test = &r100_ring_test, 300 .ring_test = &r100_ring_test,
302 .ib_test = &r100_ib_test, 301 .ib_test = &r100_ib_test,
302 .is_lockup = &r100_gpu_is_lockup,
303 } 303 }
304 }, 304 },
305 .irq = { 305 .irq = {
@@ -356,7 +356,6 @@ static struct radeon_asic r300_asic_pcie = {
356 .suspend = &r300_suspend, 356 .suspend = &r300_suspend,
357 .resume = &r300_resume, 357 .resume = &r300_resume,
358 .vga_set_state = &r100_vga_set_state, 358 .vga_set_state = &r100_vga_set_state,
359 .gpu_is_lockup = &r300_gpu_is_lockup,
360 .asic_reset = &r300_asic_reset, 359 .asic_reset = &r300_asic_reset,
361 .ioctl_wait_idle = NULL, 360 .ioctl_wait_idle = NULL,
362 .gui_idle = &r100_gui_idle, 361 .gui_idle = &r100_gui_idle,
@@ -374,6 +373,7 @@ static struct radeon_asic r300_asic_pcie = {
374 .ring_start = &r300_ring_start, 373 .ring_start = &r300_ring_start,
375 .ring_test = &r100_ring_test, 374 .ring_test = &r100_ring_test,
376 .ib_test = &r100_ib_test, 375 .ib_test = &r100_ib_test,
376 .is_lockup = &r100_gpu_is_lockup,
377 } 377 }
378 }, 378 },
379 .irq = { 379 .irq = {
@@ -430,7 +430,6 @@ static struct radeon_asic r420_asic = {
430 .suspend = &r420_suspend, 430 .suspend = &r420_suspend,
431 .resume = &r420_resume, 431 .resume = &r420_resume,
432 .vga_set_state = &r100_vga_set_state, 432 .vga_set_state = &r100_vga_set_state,
433 .gpu_is_lockup = &r300_gpu_is_lockup,
434 .asic_reset = &r300_asic_reset, 433 .asic_reset = &r300_asic_reset,
435 .ioctl_wait_idle = NULL, 434 .ioctl_wait_idle = NULL,
436 .gui_idle = &r100_gui_idle, 435 .gui_idle = &r100_gui_idle,
@@ -448,6 +447,7 @@ static struct radeon_asic r420_asic = {
448 .ring_start = &r300_ring_start, 447 .ring_start = &r300_ring_start,
449 .ring_test = &r100_ring_test, 448 .ring_test = &r100_ring_test,
450 .ib_test = &r100_ib_test, 449 .ib_test = &r100_ib_test,
450 .is_lockup = &r100_gpu_is_lockup,
451 } 451 }
452 }, 452 },
453 .irq = { 453 .irq = {
@@ -504,7 +504,6 @@ static struct radeon_asic rs400_asic = {
504 .suspend = &rs400_suspend, 504 .suspend = &rs400_suspend,
505 .resume = &rs400_resume, 505 .resume = &rs400_resume,
506 .vga_set_state = &r100_vga_set_state, 506 .vga_set_state = &r100_vga_set_state,
507 .gpu_is_lockup = &r300_gpu_is_lockup,
508 .asic_reset = &r300_asic_reset, 507 .asic_reset = &r300_asic_reset,
509 .ioctl_wait_idle = NULL, 508 .ioctl_wait_idle = NULL,
510 .gui_idle = &r100_gui_idle, 509 .gui_idle = &r100_gui_idle,
@@ -522,6 +521,7 @@ static struct radeon_asic rs400_asic = {
522 .ring_start = &r300_ring_start, 521 .ring_start = &r300_ring_start,
523 .ring_test = &r100_ring_test, 522 .ring_test = &r100_ring_test,
524 .ib_test = &r100_ib_test, 523 .ib_test = &r100_ib_test,
524 .is_lockup = &r100_gpu_is_lockup,
525 } 525 }
526 }, 526 },
527 .irq = { 527 .irq = {
@@ -578,7 +578,6 @@ static struct radeon_asic rs600_asic = {
578 .suspend = &rs600_suspend, 578 .suspend = &rs600_suspend,
579 .resume = &rs600_resume, 579 .resume = &rs600_resume,
580 .vga_set_state = &r100_vga_set_state, 580 .vga_set_state = &r100_vga_set_state,
581 .gpu_is_lockup = &r300_gpu_is_lockup,
582 .asic_reset = &rs600_asic_reset, 581 .asic_reset = &rs600_asic_reset,
583 .ioctl_wait_idle = NULL, 582 .ioctl_wait_idle = NULL,
584 .gui_idle = &r100_gui_idle, 583 .gui_idle = &r100_gui_idle,
@@ -596,6 +595,7 @@ static struct radeon_asic rs600_asic = {
596 .ring_start = &r300_ring_start, 595 .ring_start = &r300_ring_start,
597 .ring_test = &r100_ring_test, 596 .ring_test = &r100_ring_test,
598 .ib_test = &r100_ib_test, 597 .ib_test = &r100_ib_test,
598 .is_lockup = &r100_gpu_is_lockup,
599 } 599 }
600 }, 600 },
601 .irq = { 601 .irq = {
@@ -652,7 +652,6 @@ static struct radeon_asic rs690_asic = {
652 .suspend = &rs690_suspend, 652 .suspend = &rs690_suspend,
653 .resume = &rs690_resume, 653 .resume = &rs690_resume,
654 .vga_set_state = &r100_vga_set_state, 654 .vga_set_state = &r100_vga_set_state,
655 .gpu_is_lockup = &r300_gpu_is_lockup,
656 .asic_reset = &rs600_asic_reset, 655 .asic_reset = &rs600_asic_reset,
657 .ioctl_wait_idle = NULL, 656 .ioctl_wait_idle = NULL,
658 .gui_idle = &r100_gui_idle, 657 .gui_idle = &r100_gui_idle,
@@ -670,6 +669,7 @@ static struct radeon_asic rs690_asic = {
670 .ring_start = &r300_ring_start, 669 .ring_start = &r300_ring_start,
671 .ring_test = &r100_ring_test, 670 .ring_test = &r100_ring_test,
672 .ib_test = &r100_ib_test, 671 .ib_test = &r100_ib_test,
672 .is_lockup = &r100_gpu_is_lockup,
673 } 673 }
674 }, 674 },
675 .irq = { 675 .irq = {
@@ -726,7 +726,6 @@ static struct radeon_asic rv515_asic = {
726 .suspend = &rv515_suspend, 726 .suspend = &rv515_suspend,
727 .resume = &rv515_resume, 727 .resume = &rv515_resume,
728 .vga_set_state = &r100_vga_set_state, 728 .vga_set_state = &r100_vga_set_state,
729 .gpu_is_lockup = &r300_gpu_is_lockup,
730 .asic_reset = &rs600_asic_reset, 729 .asic_reset = &rs600_asic_reset,
731 .ioctl_wait_idle = NULL, 730 .ioctl_wait_idle = NULL,
732 .gui_idle = &r100_gui_idle, 731 .gui_idle = &r100_gui_idle,
@@ -744,6 +743,7 @@ static struct radeon_asic rv515_asic = {
744 .ring_start = &rv515_ring_start, 743 .ring_start = &rv515_ring_start,
745 .ring_test = &r100_ring_test, 744 .ring_test = &r100_ring_test,
746 .ib_test = &r100_ib_test, 745 .ib_test = &r100_ib_test,
746 .is_lockup = &r100_gpu_is_lockup,
747 } 747 }
748 }, 748 },
749 .irq = { 749 .irq = {
@@ -800,7 +800,6 @@ static struct radeon_asic r520_asic = {
800 .suspend = &rv515_suspend, 800 .suspend = &rv515_suspend,
801 .resume = &r520_resume, 801 .resume = &r520_resume,
802 .vga_set_state = &r100_vga_set_state, 802 .vga_set_state = &r100_vga_set_state,
803 .gpu_is_lockup = &r300_gpu_is_lockup,
804 .asic_reset = &rs600_asic_reset, 803 .asic_reset = &rs600_asic_reset,
805 .ioctl_wait_idle = NULL, 804 .ioctl_wait_idle = NULL,
806 .gui_idle = &r100_gui_idle, 805 .gui_idle = &r100_gui_idle,
@@ -818,6 +817,7 @@ static struct radeon_asic r520_asic = {
818 .ring_start = &rv515_ring_start, 817 .ring_start = &rv515_ring_start,
819 .ring_test = &r100_ring_test, 818 .ring_test = &r100_ring_test,
820 .ib_test = &r100_ib_test, 819 .ib_test = &r100_ib_test,
820 .is_lockup = &r100_gpu_is_lockup,
821 } 821 }
822 }, 822 },
823 .irq = { 823 .irq = {
@@ -874,7 +874,6 @@ static struct radeon_asic r600_asic = {
874 .suspend = &r600_suspend, 874 .suspend = &r600_suspend,
875 .resume = &r600_resume, 875 .resume = &r600_resume,
876 .vga_set_state = &r600_vga_set_state, 876 .vga_set_state = &r600_vga_set_state,
877 .gpu_is_lockup = &r600_gpu_is_lockup,
878 .asic_reset = &r600_asic_reset, 877 .asic_reset = &r600_asic_reset,
879 .ioctl_wait_idle = r600_ioctl_wait_idle, 878 .ioctl_wait_idle = r600_ioctl_wait_idle,
880 .gui_idle = &r600_gui_idle, 879 .gui_idle = &r600_gui_idle,
@@ -891,6 +890,7 @@ static struct radeon_asic r600_asic = {
891 .cs_parse = &r600_cs_parse, 890 .cs_parse = &r600_cs_parse,
892 .ring_test = &r600_ring_test, 891 .ring_test = &r600_ring_test,
893 .ib_test = &r600_ib_test, 892 .ib_test = &r600_ib_test,
893 .is_lockup = &r600_gpu_is_lockup,
894 } 894 }
895 }, 895 },
896 .irq = { 896 .irq = {
@@ -946,7 +946,6 @@ static struct radeon_asic rs780_asic = {
946 .fini = &r600_fini, 946 .fini = &r600_fini,
947 .suspend = &r600_suspend, 947 .suspend = &r600_suspend,
948 .resume = &r600_resume, 948 .resume = &r600_resume,
949 .gpu_is_lockup = &r600_gpu_is_lockup,
950 .vga_set_state = &r600_vga_set_state, 949 .vga_set_state = &r600_vga_set_state,
951 .asic_reset = &r600_asic_reset, 950 .asic_reset = &r600_asic_reset,
952 .ioctl_wait_idle = r600_ioctl_wait_idle, 951 .ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -964,6 +963,7 @@ static struct radeon_asic rs780_asic = {
964 .cs_parse = &r600_cs_parse, 963 .cs_parse = &r600_cs_parse,
965 .ring_test = &r600_ring_test, 964 .ring_test = &r600_ring_test,
966 .ib_test = &r600_ib_test, 965 .ib_test = &r600_ib_test,
966 .is_lockup = &r600_gpu_is_lockup,
967 } 967 }
968 }, 968 },
969 .irq = { 969 .irq = {
@@ -1020,7 +1020,6 @@ static struct radeon_asic rv770_asic = {
1020 .suspend = &rv770_suspend, 1020 .suspend = &rv770_suspend,
1021 .resume = &rv770_resume, 1021 .resume = &rv770_resume,
1022 .asic_reset = &r600_asic_reset, 1022 .asic_reset = &r600_asic_reset,
1023 .gpu_is_lockup = &r600_gpu_is_lockup,
1024 .vga_set_state = &r600_vga_set_state, 1023 .vga_set_state = &r600_vga_set_state,
1025 .ioctl_wait_idle = r600_ioctl_wait_idle, 1024 .ioctl_wait_idle = r600_ioctl_wait_idle,
1026 .gui_idle = &r600_gui_idle, 1025 .gui_idle = &r600_gui_idle,
@@ -1037,6 +1036,7 @@ static struct radeon_asic rv770_asic = {
1037 .cs_parse = &r600_cs_parse, 1036 .cs_parse = &r600_cs_parse,
1038 .ring_test = &r600_ring_test, 1037 .ring_test = &r600_ring_test,
1039 .ib_test = &r600_ib_test, 1038 .ib_test = &r600_ib_test,
1039 .is_lockup = &r600_gpu_is_lockup,
1040 } 1040 }
1041 }, 1041 },
1042 .irq = { 1042 .irq = {
@@ -1092,7 +1092,6 @@ static struct radeon_asic evergreen_asic = {
1092 .fini = &evergreen_fini, 1092 .fini = &evergreen_fini,
1093 .suspend = &evergreen_suspend, 1093 .suspend = &evergreen_suspend,
1094 .resume = &evergreen_resume, 1094 .resume = &evergreen_resume,
1095 .gpu_is_lockup = &evergreen_gpu_is_lockup,
1096 .asic_reset = &evergreen_asic_reset, 1095 .asic_reset = &evergreen_asic_reset,
1097 .vga_set_state = &r600_vga_set_state, 1096 .vga_set_state = &r600_vga_set_state,
1098 .ioctl_wait_idle = r600_ioctl_wait_idle, 1097 .ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1110,6 +1109,7 @@ static struct radeon_asic evergreen_asic = {
1110 .cs_parse = &evergreen_cs_parse, 1109 .cs_parse = &evergreen_cs_parse,
1111 .ring_test = &r600_ring_test, 1110 .ring_test = &r600_ring_test,
1112 .ib_test = &r600_ib_test, 1111 .ib_test = &r600_ib_test,
1112 .is_lockup = &evergreen_gpu_is_lockup,
1113 } 1113 }
1114 }, 1114 },
1115 .irq = { 1115 .irq = {
@@ -1165,7 +1165,6 @@ static struct radeon_asic sumo_asic = {
1165 .fini = &evergreen_fini, 1165 .fini = &evergreen_fini,
1166 .suspend = &evergreen_suspend, 1166 .suspend = &evergreen_suspend,
1167 .resume = &evergreen_resume, 1167 .resume = &evergreen_resume,
1168 .gpu_is_lockup = &evergreen_gpu_is_lockup,
1169 .asic_reset = &evergreen_asic_reset, 1168 .asic_reset = &evergreen_asic_reset,
1170 .vga_set_state = &r600_vga_set_state, 1169 .vga_set_state = &r600_vga_set_state,
1171 .ioctl_wait_idle = r600_ioctl_wait_idle, 1170 .ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1183,6 +1182,7 @@ static struct radeon_asic sumo_asic = {
1183 .cs_parse = &evergreen_cs_parse, 1182 .cs_parse = &evergreen_cs_parse,
1184 .ring_test = &r600_ring_test, 1183 .ring_test = &r600_ring_test,
1185 .ib_test = &r600_ib_test, 1184 .ib_test = &r600_ib_test,
1185 .is_lockup = &evergreen_gpu_is_lockup,
1186 }, 1186 },
1187 }, 1187 },
1188 .irq = { 1188 .irq = {
@@ -1238,7 +1238,6 @@ static struct radeon_asic btc_asic = {
1238 .fini = &evergreen_fini, 1238 .fini = &evergreen_fini,
1239 .suspend = &evergreen_suspend, 1239 .suspend = &evergreen_suspend,
1240 .resume = &evergreen_resume, 1240 .resume = &evergreen_resume,
1241 .gpu_is_lockup = &evergreen_gpu_is_lockup,
1242 .asic_reset = &evergreen_asic_reset, 1241 .asic_reset = &evergreen_asic_reset,
1243 .vga_set_state = &r600_vga_set_state, 1242 .vga_set_state = &r600_vga_set_state,
1244 .ioctl_wait_idle = r600_ioctl_wait_idle, 1243 .ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1256,6 +1255,7 @@ static struct radeon_asic btc_asic = {
1256 .cs_parse = &evergreen_cs_parse, 1255 .cs_parse = &evergreen_cs_parse,
1257 .ring_test = &r600_ring_test, 1256 .ring_test = &r600_ring_test,
1258 .ib_test = &r600_ib_test, 1257 .ib_test = &r600_ib_test,
1258 .is_lockup = &evergreen_gpu_is_lockup,
1259 } 1259 }
1260 }, 1260 },
1261 .irq = { 1261 .irq = {
@@ -1321,7 +1321,6 @@ static struct radeon_asic cayman_asic = {
1321 .fini = &cayman_fini, 1321 .fini = &cayman_fini,
1322 .suspend = &cayman_suspend, 1322 .suspend = &cayman_suspend,
1323 .resume = &cayman_resume, 1323 .resume = &cayman_resume,
1324 .gpu_is_lockup = &cayman_gpu_is_lockup,
1325 .asic_reset = &cayman_asic_reset, 1324 .asic_reset = &cayman_asic_reset,
1326 .vga_set_state = &r600_vga_set_state, 1325 .vga_set_state = &r600_vga_set_state,
1327 .ioctl_wait_idle = r600_ioctl_wait_idle, 1326 .ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1340,6 +1339,7 @@ static struct radeon_asic cayman_asic = {
1340 .cs_parse = &evergreen_cs_parse, 1339 .cs_parse = &evergreen_cs_parse,
1341 .ring_test = &r600_ring_test, 1340 .ring_test = &r600_ring_test,
1342 .ib_test = &r600_ib_test, 1341 .ib_test = &r600_ib_test,
1342 .is_lockup = &evergreen_gpu_is_lockup,
1343 }, 1343 },
1344 [CAYMAN_RING_TYPE_CP1_INDEX] = { 1344 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1345 .ib_execute = &cayman_ring_ib_execute, 1345 .ib_execute = &cayman_ring_ib_execute,
@@ -1349,6 +1349,7 @@ static struct radeon_asic cayman_asic = {
1349 .cs_parse = &evergreen_cs_parse, 1349 .cs_parse = &evergreen_cs_parse,
1350 .ring_test = &r600_ring_test, 1350 .ring_test = &r600_ring_test,
1351 .ib_test = &r600_ib_test, 1351 .ib_test = &r600_ib_test,
1352 .is_lockup = &evergreen_gpu_is_lockup,
1352 }, 1353 },
1353 [CAYMAN_RING_TYPE_CP2_INDEX] = { 1354 [CAYMAN_RING_TYPE_CP2_INDEX] = {
1354 .ib_execute = &cayman_ring_ib_execute, 1355 .ib_execute = &cayman_ring_ib_execute,
@@ -1358,6 +1359,7 @@ static struct radeon_asic cayman_asic = {
1358 .cs_parse = &evergreen_cs_parse, 1359 .cs_parse = &evergreen_cs_parse,
1359 .ring_test = &r600_ring_test, 1360 .ring_test = &r600_ring_test,
1360 .ib_test = &r600_ib_test, 1361 .ib_test = &r600_ib_test,
1362 .is_lockup = &evergreen_gpu_is_lockup,
1361 } 1363 }
1362 }, 1364 },
1363 .irq = { 1365 .irq = {
@@ -1413,7 +1415,6 @@ static struct radeon_asic trinity_asic = {
1413 .fini = &cayman_fini, 1415 .fini = &cayman_fini,
1414 .suspend = &cayman_suspend, 1416 .suspend = &cayman_suspend,
1415 .resume = &cayman_resume, 1417 .resume = &cayman_resume,
1416 .gpu_is_lockup = &cayman_gpu_is_lockup,
1417 .asic_reset = &cayman_asic_reset, 1418 .asic_reset = &cayman_asic_reset,
1418 .vga_set_state = &r600_vga_set_state, 1419 .vga_set_state = &r600_vga_set_state,
1419 .ioctl_wait_idle = r600_ioctl_wait_idle, 1420 .ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1432,6 +1433,7 @@ static struct radeon_asic trinity_asic = {
1432 .cs_parse = &evergreen_cs_parse, 1433 .cs_parse = &evergreen_cs_parse,
1433 .ring_test = &r600_ring_test, 1434 .ring_test = &r600_ring_test,
1434 .ib_test = &r600_ib_test, 1435 .ib_test = &r600_ib_test,
1436 .is_lockup = &evergreen_gpu_is_lockup,
1435 }, 1437 },
1436 [CAYMAN_RING_TYPE_CP1_INDEX] = { 1438 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1437 .ib_execute = &cayman_ring_ib_execute, 1439 .ib_execute = &cayman_ring_ib_execute,
@@ -1441,6 +1443,7 @@ static struct radeon_asic trinity_asic = {
1441 .cs_parse = &evergreen_cs_parse, 1443 .cs_parse = &evergreen_cs_parse,
1442 .ring_test = &r600_ring_test, 1444 .ring_test = &r600_ring_test,
1443 .ib_test = &r600_ib_test, 1445 .ib_test = &r600_ib_test,
1446 .is_lockup = &evergreen_gpu_is_lockup,
1444 }, 1447 },
1445 [CAYMAN_RING_TYPE_CP2_INDEX] = { 1448 [CAYMAN_RING_TYPE_CP2_INDEX] = {
1446 .ib_execute = &cayman_ring_ib_execute, 1449 .ib_execute = &cayman_ring_ib_execute,
@@ -1450,6 +1453,7 @@ static struct radeon_asic trinity_asic = {
1450 .cs_parse = &evergreen_cs_parse, 1453 .cs_parse = &evergreen_cs_parse,
1451 .ring_test = &r600_ring_test, 1454 .ring_test = &r600_ring_test,
1452 .ib_test = &r600_ib_test, 1455 .ib_test = &r600_ib_test,
1456 .is_lockup = &evergreen_gpu_is_lockup,
1453 } 1457 }
1454 }, 1458 },
1455 .irq = { 1459 .irq = {
@@ -1515,7 +1519,6 @@ static struct radeon_asic si_asic = {
1515 .fini = &si_fini, 1519 .fini = &si_fini,
1516 .suspend = &si_suspend, 1520 .suspend = &si_suspend,
1517 .resume = &si_resume, 1521 .resume = &si_resume,
1518 .gpu_is_lockup = &si_gpu_is_lockup,
1519 .asic_reset = &si_asic_reset, 1522 .asic_reset = &si_asic_reset,
1520 .vga_set_state = &r600_vga_set_state, 1523 .vga_set_state = &r600_vga_set_state,
1521 .ioctl_wait_idle = r600_ioctl_wait_idle, 1524 .ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1534,6 +1537,7 @@ static struct radeon_asic si_asic = {
1534 .cs_parse = NULL, 1537 .cs_parse = NULL,
1535 .ring_test = &r600_ring_test, 1538 .ring_test = &r600_ring_test,
1536 .ib_test = &r600_ib_test, 1539 .ib_test = &r600_ib_test,
1540 .is_lockup = &si_gpu_is_lockup,
1537 }, 1541 },
1538 [CAYMAN_RING_TYPE_CP1_INDEX] = { 1542 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1539 .ib_execute = &si_ring_ib_execute, 1543 .ib_execute = &si_ring_ib_execute,
@@ -1543,6 +1547,7 @@ static struct radeon_asic si_asic = {
1543 .cs_parse = NULL, 1547 .cs_parse = NULL,
1544 .ring_test = &r600_ring_test, 1548 .ring_test = &r600_ring_test,
1545 .ib_test = &r600_ib_test, 1549 .ib_test = &r600_ib_test,
1550 .is_lockup = &si_gpu_is_lockup,
1546 }, 1551 },
1547 [CAYMAN_RING_TYPE_CP2_INDEX] = { 1552 [CAYMAN_RING_TYPE_CP2_INDEX] = {
1548 .ib_execute = &si_ring_ib_execute, 1553 .ib_execute = &si_ring_ib_execute,
@@ -1552,6 +1557,7 @@ static struct radeon_asic si_asic = {
1552 .cs_parse = NULL, 1557 .cs_parse = NULL,
1553 .ring_test = &r600_ring_test, 1558 .ring_test = &r600_ring_test,
1554 .ib_test = &r600_ib_test, 1559 .ib_test = &r600_ib_test,
1560 .is_lockup = &si_gpu_is_lockup,
1555 } 1561 }
1556 }, 1562 },
1557 .irq = { 1563 .irq = {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index b135bec649d1..78309318bd3f 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -103,11 +103,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev);
103void r100_pci_gart_disable(struct radeon_device *rdev); 103void r100_pci_gart_disable(struct radeon_device *rdev);
104int r100_debugfs_mc_info_init(struct radeon_device *rdev); 104int r100_debugfs_mc_info_init(struct radeon_device *rdev);
105int r100_gui_wait_for_idle(struct radeon_device *rdev); 105int r100_gui_wait_for_idle(struct radeon_device *rdev);
106void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
107 struct radeon_ring *cp);
108bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
109 struct r100_gpu_lockup *lockup,
110 struct radeon_ring *cp);
111void r100_ib_fini(struct radeon_device *rdev); 106void r100_ib_fini(struct radeon_device *rdev);
112int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); 107int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
113void r100_irq_disable(struct radeon_device *rdev); 108void r100_irq_disable(struct radeon_device *rdev);
@@ -159,7 +154,6 @@ extern int r300_init(struct radeon_device *rdev);
159extern void r300_fini(struct radeon_device *rdev); 154extern void r300_fini(struct radeon_device *rdev);
160extern int r300_suspend(struct radeon_device *rdev); 155extern int r300_suspend(struct radeon_device *rdev);
161extern int r300_resume(struct radeon_device *rdev); 156extern int r300_resume(struct radeon_device *rdev);
162extern bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
163extern int r300_asic_reset(struct radeon_device *rdev); 157extern int r300_asic_reset(struct radeon_device *rdev);
164extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); 158extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
165extern void r300_fence_ring_emit(struct radeon_device *rdev, 159extern void r300_fence_ring_emit(struct radeon_device *rdev,
@@ -443,7 +437,6 @@ int cayman_init(struct radeon_device *rdev);
443void cayman_fini(struct radeon_device *rdev); 437void cayman_fini(struct radeon_device *rdev);
444int cayman_suspend(struct radeon_device *rdev); 438int cayman_suspend(struct radeon_device *rdev);
445int cayman_resume(struct radeon_device *rdev); 439int cayman_resume(struct radeon_device *rdev);
446bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
447int cayman_asic_reset(struct radeon_device *rdev); 440int cayman_asic_reset(struct radeon_device *rdev);
448void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 441void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
449int cayman_vm_init(struct radeon_device *rdev); 442int cayman_vm_init(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 2cad9fde92fc..576f4f6919f2 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1561,6 +1561,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1561 (rdev->pdev->subsystem_device == 0x4150)) { 1561 (rdev->pdev->subsystem_device == 0x4150)) {
1562 /* Mac G5 tower 9600 */ 1562 /* Mac G5 tower 9600 */
1563 rdev->mode_info.connector_table = CT_MAC_G5_9600; 1563 rdev->mode_info.connector_table = CT_MAC_G5_9600;
1564 } else if ((rdev->pdev->device == 0x4c66) &&
1565 (rdev->pdev->subsystem_vendor == 0x1002) &&
1566 (rdev->pdev->subsystem_device == 0x4c66)) {
1567 /* SAM440ep RV250 embedded board */
1568 rdev->mode_info.connector_table = CT_SAM440EP;
1564 } else 1569 } else
1565#endif /* CONFIG_PPC_PMAC */ 1570#endif /* CONFIG_PPC_PMAC */
1566#ifdef CONFIG_PPC64 1571#ifdef CONFIG_PPC64
@@ -2134,6 +2139,67 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
2134 CONNECTOR_OBJECT_ID_SVIDEO, 2139 CONNECTOR_OBJECT_ID_SVIDEO,
2135 &hpd); 2140 &hpd);
2136 break; 2141 break;
2142 case CT_SAM440EP:
2143 DRM_INFO("Connector Table: %d (SAM440ep embedded board)\n",
2144 rdev->mode_info.connector_table);
2145 /* LVDS */
2146 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
2147 hpd.hpd = RADEON_HPD_NONE;
2148 radeon_add_legacy_encoder(dev,
2149 radeon_get_encoder_enum(dev,
2150 ATOM_DEVICE_LCD1_SUPPORT,
2151 0),
2152 ATOM_DEVICE_LCD1_SUPPORT);
2153 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
2154 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
2155 CONNECTOR_OBJECT_ID_LVDS,
2156 &hpd);
2157 /* DVI-I - secondary dac, int tmds */
2158 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
2159 hpd.hpd = RADEON_HPD_1; /* ??? */
2160 radeon_add_legacy_encoder(dev,
2161 radeon_get_encoder_enum(dev,
2162 ATOM_DEVICE_DFP1_SUPPORT,
2163 0),
2164 ATOM_DEVICE_DFP1_SUPPORT);
2165 radeon_add_legacy_encoder(dev,
2166 radeon_get_encoder_enum(dev,
2167 ATOM_DEVICE_CRT2_SUPPORT,
2168 2),
2169 ATOM_DEVICE_CRT2_SUPPORT);
2170 radeon_add_legacy_connector(dev, 1,
2171 ATOM_DEVICE_DFP1_SUPPORT |
2172 ATOM_DEVICE_CRT2_SUPPORT,
2173 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
2174 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
2175 &hpd);
2176 /* VGA - primary dac */
2177 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
2178 hpd.hpd = RADEON_HPD_NONE;
2179 radeon_add_legacy_encoder(dev,
2180 radeon_get_encoder_enum(dev,
2181 ATOM_DEVICE_CRT1_SUPPORT,
2182 1),
2183 ATOM_DEVICE_CRT1_SUPPORT);
2184 radeon_add_legacy_connector(dev, 2,
2185 ATOM_DEVICE_CRT1_SUPPORT,
2186 DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
2187 CONNECTOR_OBJECT_ID_VGA,
2188 &hpd);
2189 /* TV - TV DAC */
2190 ddc_i2c.valid = false;
2191 hpd.hpd = RADEON_HPD_NONE;
2192 radeon_add_legacy_encoder(dev,
2193 radeon_get_encoder_enum(dev,
2194 ATOM_DEVICE_TV1_SUPPORT,
2195 2),
2196 ATOM_DEVICE_TV1_SUPPORT);
2197 radeon_add_legacy_connector(dev, 3, ATOM_DEVICE_TV1_SUPPORT,
2198 DRM_MODE_CONNECTOR_SVIDEO,
2199 &ddc_i2c,
2200 CONNECTOR_OBJECT_ID_SVIDEO,
2201 &hpd);
2202 break;
2137 default: 2203 default:
2138 DRM_INFO("Connector table: %d (invalid)\n", 2204 DRM_INFO("Connector table: %d (invalid)\n",
2139 rdev->mode_info.connector_table); 2205 rdev->mode_info.connector_table);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 71fa389e10fe..2914c5761cfc 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1026,7 +1026,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1026 1026
1027 encoder = obj_to_encoder(obj); 1027 encoder = obj_to_encoder(obj);
1028 1028
1029 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC || 1029 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
1030 encoder->encoder_type != DRM_MODE_ENCODER_TVDAC) 1030 encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
1031 continue; 1031 continue;
1032 1032
@@ -1056,6 +1056,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1056 * cases the DVI port is actually a virtual KVM port connected to the service 1056 * cases the DVI port is actually a virtual KVM port connected to the service
1057 * processor. 1057 * processor.
1058 */ 1058 */
1059out:
1059 if ((!rdev->is_atom_bios) && 1060 if ((!rdev->is_atom_bios) &&
1060 (ret == connector_status_disconnected) && 1061 (ret == connector_status_disconnected) &&
1061 rdev->mode_info.bios_hardcoded_edid_size) { 1062 rdev->mode_info.bios_hardcoded_edid_size) {
@@ -1063,7 +1064,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1063 ret = connector_status_connected; 1064 ret = connector_status_connected;
1064 } 1065 }
1065 1066
1066out:
1067 /* updated in get modes as well since we need to know if it's analog or digital */ 1067 /* updated in get modes as well since we need to know if it's analog or digital */
1068 radeon_connector_update_scratch_regs(connector, ret); 1068 radeon_connector_update_scratch_regs(connector, ret);
1069 return ret; 1069 return ret;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index e7b0b5d51bc3..c66beb1662b5 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -118,6 +118,7 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
118static int radeon_cs_sync_rings(struct radeon_cs_parser *p) 118static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
119{ 119{
120 bool sync_to_ring[RADEON_NUM_RINGS] = { }; 120 bool sync_to_ring[RADEON_NUM_RINGS] = { };
121 bool need_sync = false;
121 int i, r; 122 int i, r;
122 123
123 for (i = 0; i < p->nrelocs; i++) { 124 for (i = 0; i < p->nrelocs; i++) {
@@ -126,36 +127,24 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
126 127
127 if (!(p->relocs[i].flags & RADEON_RELOC_DONT_SYNC)) { 128 if (!(p->relocs[i].flags & RADEON_RELOC_DONT_SYNC)) {
128 struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj; 129 struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj;
129 if (!radeon_fence_signaled(fence)) { 130 if (fence->ring != p->ring && !radeon_fence_signaled(fence)) {
130 sync_to_ring[fence->ring] = true; 131 sync_to_ring[fence->ring] = true;
132 need_sync = true;
131 } 133 }
132 } 134 }
133 } 135 }
134 136
135 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 137 if (!need_sync) {
136 /* no need to sync to our own or unused rings */ 138 return 0;
137 if (i == p->ring || !sync_to_ring[i] || !p->rdev->ring[i].ready) 139 }
138 continue;
139
140 if (!p->ib->fence->semaphore) {
141 r = radeon_semaphore_create(p->rdev, &p->ib->fence->semaphore);
142 if (r)
143 return r;
144 }
145
146 r = radeon_ring_lock(p->rdev, &p->rdev->ring[i], 3);
147 if (r)
148 return r;
149 radeon_semaphore_emit_signal(p->rdev, i, p->ib->fence->semaphore);
150 radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[i]);
151 140
152 r = radeon_ring_lock(p->rdev, &p->rdev->ring[p->ring], 3); 141 r = radeon_semaphore_create(p->rdev, &p->ib->fence->semaphore);
153 if (r) 142 if (r) {
154 return r; 143 return r;
155 radeon_semaphore_emit_wait(p->rdev, p->ring, p->ib->fence->semaphore);
156 radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[p->ring]);
157 } 144 }
158 return 0; 145
146 return radeon_semaphore_sync_rings(p->rdev, p->ib->fence->semaphore,
147 sync_to_ring, p->ring);
159} 148}
160 149
161int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) 150int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -172,6 +161,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
172 /* get chunks */ 161 /* get chunks */
173 INIT_LIST_HEAD(&p->validated); 162 INIT_LIST_HEAD(&p->validated);
174 p->idx = 0; 163 p->idx = 0;
164 p->ib = NULL;
165 p->const_ib = NULL;
175 p->chunk_ib_idx = -1; 166 p->chunk_ib_idx = -1;
176 p->chunk_relocs_idx = -1; 167 p->chunk_relocs_idx = -1;
177 p->chunk_flags_idx = -1; 168 p->chunk_flags_idx = -1;
@@ -336,6 +327,9 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
336 kfree(parser->chunks); 327 kfree(parser->chunks);
337 kfree(parser->chunks_array); 328 kfree(parser->chunks_array);
338 radeon_ib_free(parser->rdev, &parser->ib); 329 radeon_ib_free(parser->rdev, &parser->ib);
330 if (parser->const_ib) {
331 radeon_ib_free(parser->rdev, &parser->const_ib);
332 }
339} 333}
340 334
341static int radeon_cs_ib_chunk(struct radeon_device *rdev, 335static int radeon_cs_ib_chunk(struct radeon_device *rdev,
@@ -507,6 +501,16 @@ out:
507 return r; 501 return r;
508} 502}
509 503
504static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
505{
506 if (r == -EDEADLK) {
507 r = radeon_gpu_reset(rdev);
508 if (!r)
509 r = -EAGAIN;
510 }
511 return r;
512}
513
510int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 514int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
511{ 515{
512 struct radeon_device *rdev = dev->dev_private; 516 struct radeon_device *rdev = dev->dev_private;
@@ -528,6 +532,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
528 if (r) { 532 if (r) {
529 DRM_ERROR("Failed to initialize parser !\n"); 533 DRM_ERROR("Failed to initialize parser !\n");
530 radeon_cs_parser_fini(&parser, r); 534 radeon_cs_parser_fini(&parser, r);
535 r = radeon_cs_handle_lockup(rdev, r);
531 radeon_mutex_unlock(&rdev->cs_mutex); 536 radeon_mutex_unlock(&rdev->cs_mutex);
532 return r; 537 return r;
533 } 538 }
@@ -536,6 +541,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
536 if (r != -ERESTARTSYS) 541 if (r != -ERESTARTSYS)
537 DRM_ERROR("Failed to parse relocation %d!\n", r); 542 DRM_ERROR("Failed to parse relocation %d!\n", r);
538 radeon_cs_parser_fini(&parser, r); 543 radeon_cs_parser_fini(&parser, r);
544 r = radeon_cs_handle_lockup(rdev, r);
539 radeon_mutex_unlock(&rdev->cs_mutex); 545 radeon_mutex_unlock(&rdev->cs_mutex);
540 return r; 546 return r;
541 } 547 }
@@ -549,6 +555,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
549 } 555 }
550out: 556out:
551 radeon_cs_parser_fini(&parser, r); 557 radeon_cs_parser_fini(&parser, r);
558 r = radeon_cs_handle_lockup(rdev, r);
552 radeon_mutex_unlock(&rdev->cs_mutex); 559 radeon_mutex_unlock(&rdev->cs_mutex);
553 return r; 560 return r;
554} 561}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0fb4f8993cae..ff28210dedec 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -241,8 +241,8 @@ int radeon_wb_init(struct radeon_device *rdev)
241 rdev->wb.use_event = true; 241 rdev->wb.use_event = true;
242 } 242 }
243 } 243 }
244 /* always use writeback/events on NI */ 244 /* always use writeback/events on NI, APUs */
245 if (ASIC_IS_DCE5(rdev)) { 245 if (rdev->family >= CHIP_PALM) {
246 rdev->wb.enabled = true; 246 rdev->wb.enabled = true;
247 rdev->wb.use_event = true; 247 rdev->wb.use_event = true;
248 } 248 }
@@ -714,7 +714,6 @@ int radeon_device_init(struct radeon_device *rdev,
714 rdev->is_atom_bios = false; 714 rdev->is_atom_bios = false;
715 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; 715 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
716 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 716 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
717 rdev->gpu_lockup = false;
718 rdev->accel_working = false; 717 rdev->accel_working = false;
719 718
720 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", 719 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
@@ -916,7 +915,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
916 radeon_bo_evict_vram(rdev); 915 radeon_bo_evict_vram(rdev);
917 /* wait for gpu to finish processing current batch */ 916 /* wait for gpu to finish processing current batch */
918 for (i = 0; i < RADEON_NUM_RINGS; i++) 917 for (i = 0; i < RADEON_NUM_RINGS; i++)
919 radeon_fence_wait_last(rdev, i); 918 radeon_fence_wait_empty(rdev, i);
920 919
921 radeon_save_bios_scratch_regs(rdev); 920 radeon_save_bios_scratch_regs(rdev);
922 921
@@ -987,9 +986,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
987 int r; 986 int r;
988 int resched; 987 int resched;
989 988
990 /* Prevent CS ioctl from interfering */
991 radeon_mutex_lock(&rdev->cs_mutex);
992
993 radeon_save_bios_scratch_regs(rdev); 989 radeon_save_bios_scratch_regs(rdev);
994 /* block TTM */ 990 /* block TTM */
995 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 991 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
@@ -1004,8 +1000,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
1004 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 1000 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1005 } 1001 }
1006 1002
1007 radeon_mutex_unlock(&rdev->cs_mutex);
1008
1009 if (r) { 1003 if (r) {
1010 /* bad news, how to tell it to userspace ? */ 1004 /* bad news, how to tell it to userspace ? */
1011 dev_info(rdev->dev, "GPU reset failed\n"); 1005 dev_info(rdev->dev, "GPU reset failed\n");
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 8086c96e0b06..0a1d4bd65edc 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -533,7 +533,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
533 radeon_legacy_init_crtc(dev, radeon_crtc); 533 radeon_legacy_init_crtc(dev, radeon_crtc);
534} 534}
535 535
536static const char *encoder_names[36] = { 536static const char *encoder_names[37] = {
537 "NONE", 537 "NONE",
538 "INTERNAL_LVDS", 538 "INTERNAL_LVDS",
539 "INTERNAL_TMDS1", 539 "INTERNAL_TMDS1",
@@ -570,6 +570,7 @@ static const char *encoder_names[36] = {
570 "INTERNAL_UNIPHY2", 570 "INTERNAL_UNIPHY2",
571 "NUTMEG", 571 "NUTMEG",
572 "TRAVIS", 572 "TRAVIS",
573 "INTERNAL_VCE"
573}; 574};
574 575
575static const char *connector_names[15] = { 576static const char *connector_names[15] = {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index ef7bb3f6ecae..e62e56a57ee4 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -128,6 +128,7 @@ int radeon_disp_priority = 0;
128int radeon_hw_i2c = 0; 128int radeon_hw_i2c = 0;
129int radeon_pcie_gen2 = 0; 129int radeon_pcie_gen2 = 0;
130int radeon_msi = -1; 130int radeon_msi = -1;
131int radeon_lockup_timeout = 10000;
131 132
132MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 133MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
133module_param_named(no_wb, radeon_no_wb, int, 0444); 134module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -177,6 +178,9 @@ module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
177MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)"); 178MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
178module_param_named(msi, radeon_msi, int, 0444); 179module_param_named(msi, radeon_msi, int, 0444);
179 180
181MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
182module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
183
180static int radeon_suspend(struct drm_device *dev, pm_message_t state) 184static int radeon_suspend(struct drm_device *dev, pm_message_t state)
181{ 185{
182 drm_radeon_private_t *dev_priv = dev->dev_private; 186 drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 4bd36a354fbe..5bb78bf547ea 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -71,16 +71,13 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
71 return 0; 71 return 0;
72 } 72 }
73 fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq); 73 fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
74 if (!rdev->ring[fence->ring].ready) 74 radeon_fence_ring_emit(rdev, fence->ring, fence);
75 /* FIXME: cp is not running assume everythings is done right
76 * away
77 */
78 radeon_fence_write(rdev, fence->seq, fence->ring);
79 else
80 radeon_fence_ring_emit(rdev, fence->ring, fence);
81
82 trace_radeon_fence_emit(rdev->ddev, fence->seq); 75 trace_radeon_fence_emit(rdev->ddev, fence->seq);
83 fence->emitted = true; 76 fence->emitted = true;
77 /* are we the first fence on a previusly idle ring? */
78 if (list_empty(&rdev->fence_drv[fence->ring].emitted)) {
79 rdev->fence_drv[fence->ring].last_activity = jiffies;
80 }
84 list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted); 81 list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
85 write_unlock_irqrestore(&rdev->fence_lock, irq_flags); 82 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
86 return 0; 83 return 0;
@@ -92,34 +89,14 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
92 struct list_head *i, *n; 89 struct list_head *i, *n;
93 uint32_t seq; 90 uint32_t seq;
94 bool wake = false; 91 bool wake = false;
95 unsigned long cjiffies;
96 92
97 seq = radeon_fence_read(rdev, ring); 93 seq = radeon_fence_read(rdev, ring);
98 if (seq != rdev->fence_drv[ring].last_seq) { 94 if (seq == rdev->fence_drv[ring].last_seq)
99 rdev->fence_drv[ring].last_seq = seq;
100 rdev->fence_drv[ring].last_jiffies = jiffies;
101 rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
102 } else {
103 cjiffies = jiffies;
104 if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) {
105 cjiffies -= rdev->fence_drv[ring].last_jiffies;
106 if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) {
107 /* update the timeout */
108 rdev->fence_drv[ring].last_timeout -= cjiffies;
109 } else {
110 /* the 500ms timeout is elapsed we should test
111 * for GPU lockup
112 */
113 rdev->fence_drv[ring].last_timeout = 1;
114 }
115 } else {
116 /* wrap around update last jiffies, we will just wait
117 * a little longer
118 */
119 rdev->fence_drv[ring].last_jiffies = cjiffies;
120 }
121 return false; 95 return false;
122 } 96
97 rdev->fence_drv[ring].last_seq = seq;
98 rdev->fence_drv[ring].last_activity = jiffies;
99
123 n = NULL; 100 n = NULL;
124 list_for_each(i, &rdev->fence_drv[ring].emitted) { 101 list_for_each(i, &rdev->fence_drv[ring].emitted) {
125 fence = list_entry(i, struct radeon_fence, list); 102 fence = list_entry(i, struct radeon_fence, list);
@@ -162,8 +139,6 @@ int radeon_fence_create(struct radeon_device *rdev,
162 struct radeon_fence **fence, 139 struct radeon_fence **fence,
163 int ring) 140 int ring)
164{ 141{
165 unsigned long irq_flags;
166
167 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); 142 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
168 if ((*fence) == NULL) { 143 if ((*fence) == NULL) {
169 return -ENOMEM; 144 return -ENOMEM;
@@ -176,10 +151,6 @@ int radeon_fence_create(struct radeon_device *rdev,
176 (*fence)->ring = ring; 151 (*fence)->ring = ring;
177 (*fence)->semaphore = NULL; 152 (*fence)->semaphore = NULL;
178 INIT_LIST_HEAD(&(*fence)->list); 153 INIT_LIST_HEAD(&(*fence)->list);
179
180 write_lock_irqsave(&rdev->fence_lock, irq_flags);
181 list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
182 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
183 return 0; 154 return 0;
184} 155}
185 156
@@ -191,9 +162,6 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
191 if (!fence) 162 if (!fence)
192 return true; 163 return true;
193 164
194 if (fence->rdev->gpu_lockup)
195 return true;
196
197 write_lock_irqsave(&fence->rdev->fence_lock, irq_flags); 165 write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
198 signaled = fence->signaled; 166 signaled = fence->signaled;
199 /* if we are shuting down report all fence as signaled */ 167 /* if we are shuting down report all fence as signaled */
@@ -217,68 +185,80 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
217 struct radeon_device *rdev; 185 struct radeon_device *rdev;
218 unsigned long irq_flags, timeout; 186 unsigned long irq_flags, timeout;
219 u32 seq; 187 u32 seq;
220 int r; 188 int i, r;
189 bool signaled;
221 190
222 if (fence == NULL) { 191 if (fence == NULL) {
223 WARN(1, "Querying an invalid fence : %p !\n", fence); 192 WARN(1, "Querying an invalid fence : %p !\n", fence);
224 return 0; 193 return -EINVAL;
225 } 194 }
195
226 rdev = fence->rdev; 196 rdev = fence->rdev;
227 if (radeon_fence_signaled(fence)) { 197 signaled = radeon_fence_signaled(fence);
228 return 0; 198 while (!signaled) {
229 } 199 read_lock_irqsave(&rdev->fence_lock, irq_flags);
230 timeout = rdev->fence_drv[fence->ring].last_timeout; 200 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
231retry: 201 if (time_after(rdev->fence_drv[fence->ring].last_activity, timeout)) {
232 /* save current sequence used to check for GPU lockup */ 202 /* the normal case, timeout is somewhere before last_activity */
233 seq = rdev->fence_drv[fence->ring].last_seq; 203 timeout = rdev->fence_drv[fence->ring].last_activity - timeout;
234 trace_radeon_fence_wait_begin(rdev->ddev, seq); 204 } else {
235 if (intr) { 205 /* either jiffies wrapped around, or no fence was signaled in the last 500ms
206 * anyway we will just wait for the minimum amount and then check for a lockup */
207 timeout = 1;
208 }
209 /* save current sequence value used to check for GPU lockups */
210 seq = rdev->fence_drv[fence->ring].last_seq;
211 read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
212
213 trace_radeon_fence_wait_begin(rdev->ddev, seq);
236 radeon_irq_kms_sw_irq_get(rdev, fence->ring); 214 radeon_irq_kms_sw_irq_get(rdev, fence->ring);
237 r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue, 215 if (intr) {
238 radeon_fence_signaled(fence), timeout); 216 r = wait_event_interruptible_timeout(
217 rdev->fence_drv[fence->ring].queue,
218 (signaled = radeon_fence_signaled(fence)), timeout);
219 } else {
220 r = wait_event_timeout(
221 rdev->fence_drv[fence->ring].queue,
222 (signaled = radeon_fence_signaled(fence)), timeout);
223 }
239 radeon_irq_kms_sw_irq_put(rdev, fence->ring); 224 radeon_irq_kms_sw_irq_put(rdev, fence->ring);
240 if (unlikely(r < 0)) { 225 if (unlikely(r < 0)) {
241 return r; 226 return r;
242 } 227 }
243 } else { 228 trace_radeon_fence_wait_end(rdev->ddev, seq);
244 radeon_irq_kms_sw_irq_get(rdev, fence->ring); 229
245 r = wait_event_timeout(rdev->fence_drv[fence->ring].queue, 230 if (unlikely(!signaled)) {
246 radeon_fence_signaled(fence), timeout); 231 /* we were interrupted for some reason and fence
247 radeon_irq_kms_sw_irq_put(rdev, fence->ring); 232 * isn't signaled yet, resume waiting */
248 } 233 if (r) {
249 trace_radeon_fence_wait_end(rdev->ddev, seq); 234 continue;
250 if (unlikely(!radeon_fence_signaled(fence))) { 235 }
251 /* we were interrupted for some reason and fence isn't 236
252 * isn't signaled yet, resume wait 237 write_lock_irqsave(&rdev->fence_lock, irq_flags);
253 */ 238 /* check if sequence value has changed since last_activity */
254 if (r) { 239 if (seq != rdev->fence_drv[fence->ring].last_seq) {
255 timeout = r; 240 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
256 goto retry; 241 continue;
257 } 242 }
258 /* don't protect read access to rdev->fence_drv[t].last_seq 243
259 * if we experiencing a lockup the value doesn't change 244 /* change sequence value on all rings, so nobody else things there is a lockup */
260 */ 245 for (i = 0; i < RADEON_NUM_RINGS; ++i)
261 if (seq == rdev->fence_drv[fence->ring].last_seq && 246 rdev->fence_drv[i].last_seq -= 0x10000;
262 radeon_gpu_is_lockup(rdev, &rdev->ring[fence->ring])) { 247
263 /* good news we believe it's a lockup */ 248 rdev->fence_drv[fence->ring].last_activity = jiffies;
264 printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", 249 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
265 fence->seq, seq); 250
266 /* FIXME: what should we do ? marking everyone 251 if (radeon_ring_is_lockup(rdev, fence->ring, &rdev->ring[fence->ring])) {
267 * as signaled for now 252
268 */ 253 /* good news we believe it's a lockup */
269 rdev->gpu_lockup = true; 254 printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
270 r = radeon_gpu_reset(rdev); 255 fence->seq, seq);
271 if (r) 256
272 return r; 257 /* mark the ring as not ready any more */
273 radeon_fence_write(rdev, fence->seq, fence->ring); 258 rdev->ring[fence->ring].ready = false;
274 rdev->gpu_lockup = false; 259 return -EDEADLK;
260 }
275 } 261 }
276 timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
277 write_lock_irqsave(&rdev->fence_lock, irq_flags);
278 rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
279 rdev->fence_drv[fence->ring].last_jiffies = jiffies;
280 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
281 goto retry;
282 } 262 }
283 return 0; 263 return 0;
284} 264}
@@ -289,13 +269,14 @@ int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
289 struct radeon_fence *fence; 269 struct radeon_fence *fence;
290 int r; 270 int r;
291 271
292 if (rdev->gpu_lockup) {
293 return 0;
294 }
295 write_lock_irqsave(&rdev->fence_lock, irq_flags); 272 write_lock_irqsave(&rdev->fence_lock, irq_flags);
273 if (!rdev->ring[ring].ready) {
274 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
275 return -EBUSY;
276 }
296 if (list_empty(&rdev->fence_drv[ring].emitted)) { 277 if (list_empty(&rdev->fence_drv[ring].emitted)) {
297 write_unlock_irqrestore(&rdev->fence_lock, irq_flags); 278 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
298 return 0; 279 return -ENOENT;
299 } 280 }
300 fence = list_entry(rdev->fence_drv[ring].emitted.next, 281 fence = list_entry(rdev->fence_drv[ring].emitted.next,
301 struct radeon_fence, list); 282 struct radeon_fence, list);
@@ -306,16 +287,17 @@ int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
306 return r; 287 return r;
307} 288}
308 289
309int radeon_fence_wait_last(struct radeon_device *rdev, int ring) 290int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
310{ 291{
311 unsigned long irq_flags; 292 unsigned long irq_flags;
312 struct radeon_fence *fence; 293 struct radeon_fence *fence;
313 int r; 294 int r;
314 295
315 if (rdev->gpu_lockup) {
316 return 0;
317 }
318 write_lock_irqsave(&rdev->fence_lock, irq_flags); 296 write_lock_irqsave(&rdev->fence_lock, irq_flags);
297 if (!rdev->ring[ring].ready) {
298 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
299 return -EBUSY;
300 }
319 if (list_empty(&rdev->fence_drv[ring].emitted)) { 301 if (list_empty(&rdev->fence_drv[ring].emitted)) {
320 write_unlock_irqrestore(&rdev->fence_lock, irq_flags); 302 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
321 return 0; 303 return 0;
@@ -419,7 +401,6 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
419 rdev->fence_drv[ring].cpu_addr = NULL; 401 rdev->fence_drv[ring].cpu_addr = NULL;
420 rdev->fence_drv[ring].gpu_addr = 0; 402 rdev->fence_drv[ring].gpu_addr = 0;
421 atomic_set(&rdev->fence_drv[ring].seq, 0); 403 atomic_set(&rdev->fence_drv[ring].seq, 0);
422 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
423 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted); 404 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
424 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled); 405 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
425 init_waitqueue_head(&rdev->fence_drv[ring].queue); 406 init_waitqueue_head(&rdev->fence_drv[ring].queue);
@@ -450,7 +431,7 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
450 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 431 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
451 if (!rdev->fence_drv[ring].initialized) 432 if (!rdev->fence_drv[ring].initialized)
452 continue; 433 continue;
453 radeon_fence_wait_last(rdev, ring); 434 radeon_fence_wait_empty(rdev, ring);
454 wake_up_all(&rdev->fence_drv[ring].queue); 435 wake_up_all(&rdev->fence_drv[ring].queue);
455 write_lock_irqsave(&rdev->fence_lock, irq_flags); 436 write_lock_irqsave(&rdev->fence_lock, irq_flags);
456 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 437 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index c7008b5210f7..e15cb1fe2c39 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -154,6 +154,17 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
154 radeon_bo_unreserve(rbo); 154 radeon_bo_unreserve(rbo);
155} 155}
156 156
157static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
158{
159 if (r == -EDEADLK) {
160 radeon_mutex_lock(&rdev->cs_mutex);
161 r = radeon_gpu_reset(rdev);
162 if (!r)
163 r = -EAGAIN;
164 radeon_mutex_unlock(&rdev->cs_mutex);
165 }
166 return r;
167}
157 168
158/* 169/*
159 * GEM ioctls. 170 * GEM ioctls.
@@ -210,12 +221,14 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
210 args->initial_domain, false, 221 args->initial_domain, false,
211 false, &gobj); 222 false, &gobj);
212 if (r) { 223 if (r) {
224 r = radeon_gem_handle_lockup(rdev, r);
213 return r; 225 return r;
214 } 226 }
215 r = drm_gem_handle_create(filp, gobj, &handle); 227 r = drm_gem_handle_create(filp, gobj, &handle);
216 /* drop reference from allocate - handle holds it now */ 228 /* drop reference from allocate - handle holds it now */
217 drm_gem_object_unreference_unlocked(gobj); 229 drm_gem_object_unreference_unlocked(gobj);
218 if (r) { 230 if (r) {
231 r = radeon_gem_handle_lockup(rdev, r);
219 return r; 232 return r;
220 } 233 }
221 args->handle = handle; 234 args->handle = handle;
@@ -245,6 +258,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
245 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 258 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
246 259
247 drm_gem_object_unreference_unlocked(gobj); 260 drm_gem_object_unreference_unlocked(gobj);
261 r = radeon_gem_handle_lockup(robj->rdev, r);
248 return r; 262 return r;
249} 263}
250 264
@@ -301,6 +315,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
301 break; 315 break;
302 } 316 }
303 drm_gem_object_unreference_unlocked(gobj); 317 drm_gem_object_unreference_unlocked(gobj);
318 r = radeon_gem_handle_lockup(robj->rdev, r);
304 return r; 319 return r;
305} 320}
306 321
@@ -322,6 +337,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
322 if (robj->rdev->asic->ioctl_wait_idle) 337 if (robj->rdev->asic->ioctl_wait_idle)
323 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); 338 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
324 drm_gem_object_unreference_unlocked(gobj); 339 drm_gem_object_unreference_unlocked(gobj);
340 r = radeon_gem_handle_lockup(robj->rdev, r);
325 return r; 341 return r;
326} 342}
327 343
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 170f1718d92a..5df58d1aba06 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -149,6 +149,12 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
149 (rdev->pdev->subsystem_device == 0x01fd)) 149 (rdev->pdev->subsystem_device == 0x01fd))
150 return true; 150 return true;
151 151
152 /* RV515 seems to have MSI issues where it loses
153 * MSI rearms occasionally. This leads to lockups and freezes.
154 * disable it by default.
155 */
156 if (rdev->family == CHIP_RV515)
157 return false;
152 if (rdev->flags & RADEON_IS_IGP) { 158 if (rdev->flags & RADEON_IS_IGP) {
153 /* APUs work fine with MSIs */ 159 /* APUs work fine with MSIs */
154 if (rdev->family >= CHIP_PALM) 160 if (rdev->family >= CHIP_PALM)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 0c3cdbd614d2..499a5fed8b26 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -210,6 +210,7 @@ enum radeon_connector_table {
210 CT_RN50_POWER, 210 CT_RN50_POWER,
211 CT_MAC_X800, 211 CT_MAC_X800,
212 CT_MAC_G5_9600, 212 CT_MAC_G5_9600,
213 CT_SAM440EP
213}; 214};
214 215
215enum radeon_dvo_chip { 216enum radeon_dvo_chip {
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index cc33b3d7c33b..2eb4c6ed198a 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -34,7 +34,7 @@
34#include "atom.h" 34#include "atom.h"
35 35
36int radeon_debugfs_ib_init(struct radeon_device *rdev); 36int radeon_debugfs_ib_init(struct radeon_device *rdev);
37int radeon_debugfs_ring_init(struct radeon_device *rdev); 37int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
38 38
39u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) 39u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
40{ 40{
@@ -237,9 +237,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
237 if (radeon_debugfs_ib_init(rdev)) { 237 if (radeon_debugfs_ib_init(rdev)) {
238 DRM_ERROR("Failed to register debugfs file for IB !\n"); 238 DRM_ERROR("Failed to register debugfs file for IB !\n");
239 } 239 }
240 if (radeon_debugfs_ring_init(rdev)) {
241 DRM_ERROR("Failed to register debugfs file for rings !\n");
242 }
243 radeon_mutex_unlock(&rdev->ib_pool.mutex); 240 radeon_mutex_unlock(&rdev->ib_pool.mutex);
244 return 0; 241 return 0;
245} 242}
@@ -270,6 +267,36 @@ int radeon_ib_pool_suspend(struct radeon_device *rdev)
270 return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager); 267 return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
271} 268}
272 269
270int radeon_ib_ring_tests(struct radeon_device *rdev)
271{
272 unsigned i;
273 int r;
274
275 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
276 struct radeon_ring *ring = &rdev->ring[i];
277
278 if (!ring->ready)
279 continue;
280
281 r = radeon_ib_test(rdev, i, ring);
282 if (r) {
283 ring->ready = false;
284
285 if (i == RADEON_RING_TYPE_GFX_INDEX) {
286 /* oh, oh, that's really bad */
287 DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
288 rdev->accel_working = false;
289 return r;
290
291 } else {
292 /* still not good, but we can live with it */
293 DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
294 }
295 }
296 }
297 return 0;
298}
299
273/* 300/*
274 * Ring. 301 * Ring.
275 */ 302 */
@@ -319,7 +346,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
319 if (ndw < ring->ring_free_dw) { 346 if (ndw < ring->ring_free_dw) {
320 break; 347 break;
321 } 348 }
349 mutex_unlock(&ring->mutex);
322 r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring)); 350 r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
351 mutex_lock(&ring->mutex);
323 if (r) 352 if (r)
324 return r; 353 return r;
325 } 354 }
@@ -369,6 +398,75 @@ void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *rin
369 mutex_unlock(&ring->mutex); 398 mutex_unlock(&ring->mutex);
370} 399}
371 400
401void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
402{
403 int r;
404
405 mutex_lock(&ring->mutex);
406 radeon_ring_free_size(rdev, ring);
407 if (ring->rptr == ring->wptr) {
408 r = radeon_ring_alloc(rdev, ring, 1);
409 if (!r) {
410 radeon_ring_write(ring, ring->nop);
411 radeon_ring_commit(rdev, ring);
412 }
413 }
414 mutex_unlock(&ring->mutex);
415}
416
417void radeon_ring_lockup_update(struct radeon_ring *ring)
418{
419 ring->last_rptr = ring->rptr;
420 ring->last_activity = jiffies;
421}
422
423/**
424 * radeon_ring_test_lockup() - check if ring is lockedup by recording information
425 * @rdev: radeon device structure
426 * @ring: radeon_ring structure holding ring information
427 *
428 * We don't need to initialize the lockup tracking information as we will either
429 * have CP rptr to a different value of jiffies wrap around which will force
430 * initialization of the lockup tracking informations.
431 *
432 * A possible false positivie is if we get call after while and last_cp_rptr ==
433 * the current CP rptr, even if it's unlikely it might happen. To avoid this
434 * if the elapsed time since last call is bigger than 2 second than we return
435 * false and update the tracking information. Due to this the caller must call
436 * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
437 * the fencing code should be cautious about that.
438 *
439 * Caller should write to the ring to force CP to do something so we don't get
440 * false positive when CP is just gived nothing to do.
441 *
442 **/
443bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
444{
445 unsigned long cjiffies, elapsed;
446 uint32_t rptr;
447
448 cjiffies = jiffies;
449 if (!time_after(cjiffies, ring->last_activity)) {
450 /* likely a wrap around */
451 radeon_ring_lockup_update(ring);
452 return false;
453 }
454 rptr = RREG32(ring->rptr_reg);
455 ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
456 if (ring->rptr != ring->last_rptr) {
457 /* CP is still working no lockup */
458 radeon_ring_lockup_update(ring);
459 return false;
460 }
461 elapsed = jiffies_to_msecs(cjiffies - ring->last_activity);
462 if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
463 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
464 return true;
465 }
466 /* give a chance to the GPU ... */
467 return false;
468}
469
372int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, 470int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
373 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, 471 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
374 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop) 472 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
@@ -411,6 +509,9 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
411 } 509 }
412 ring->ptr_mask = (ring->ring_size / 4) - 1; 510 ring->ptr_mask = (ring->ring_size / 4) - 1;
413 ring->ring_free_dw = ring->ring_size / 4; 511 ring->ring_free_dw = ring->ring_size / 4;
512 if (radeon_debugfs_ring_init(rdev, ring)) {
513 DRM_ERROR("Failed to register debugfs file for rings !\n");
514 }
414 return 0; 515 return 0;
415} 516}
416 517
@@ -501,17 +602,24 @@ static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
501static unsigned radeon_debugfs_ib_idx[RADEON_IB_POOL_SIZE]; 602static unsigned radeon_debugfs_ib_idx[RADEON_IB_POOL_SIZE];
502#endif 603#endif
503 604
504int radeon_debugfs_ring_init(struct radeon_device *rdev) 605int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
505{ 606{
506#if defined(CONFIG_DEBUG_FS) 607#if defined(CONFIG_DEBUG_FS)
507 if (rdev->family >= CHIP_CAYMAN) 608 unsigned i;
508 return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list, 609 for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) {
509 ARRAY_SIZE(radeon_debugfs_ring_info_list)); 610 struct drm_info_list *info = &radeon_debugfs_ring_info_list[i];
510 else 611 int ridx = *(int*)radeon_debugfs_ring_info_list[i].data;
511 return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list, 1); 612 unsigned r;
512#else 613
513 return 0; 614 if (&rdev->ring[ridx] != ring)
615 continue;
616
617 r = radeon_debugfs_add_files(rdev, info, 1);
618 if (r)
619 return r;
620 }
514#endif 621#endif
622 return 0;
515} 623}
516 624
517int radeon_debugfs_ib_init(struct radeon_device *rdev) 625int radeon_debugfs_ib_init(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 4cce47e7dc0d..8fbfe69b7bcb 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -150,7 +150,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
150 offset = 0; 150 offset = 0;
151 list_for_each_entry(tmp, &sa_manager->sa_bo, list) { 151 list_for_each_entry(tmp, &sa_manager->sa_bo, list) {
152 /* room before this object ? */ 152 /* room before this object ? */
153 if ((tmp->offset - offset) >= size) { 153 if (offset < tmp->offset && (tmp->offset - offset) >= size) {
154 head = tmp->list.prev; 154 head = tmp->list.prev;
155 goto out; 155 goto out;
156 } 156 }
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 61dd4e3c9209..930a08af900f 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -149,6 +149,62 @@ void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
149 radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true); 149 radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
150} 150}
151 151
152int radeon_semaphore_sync_rings(struct radeon_device *rdev,
153 struct radeon_semaphore *semaphore,
154 bool sync_to[RADEON_NUM_RINGS],
155 int dst_ring)
156{
157 int i, r;
158
159 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
160 unsigned num_ops = i == dst_ring ? RADEON_NUM_RINGS : 1;
161
162 /* don't lock unused rings */
163 if (!sync_to[i] && i != dst_ring)
164 continue;
165
166 /* prevent GPU deadlocks */
167 if (!rdev->ring[i].ready) {
168 dev_err(rdev->dev, "Trying to sync to a disabled ring!");
169 r = -EINVAL;
170 goto error;
171 }
172
173 r = radeon_ring_lock(rdev, &rdev->ring[i], num_ops * 8);
174 if (r)
175 goto error;
176 }
177
178 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
179 /* no need to sync to our own or unused rings */
180 if (!sync_to[i] || i == dst_ring)
181 continue;
182
183 radeon_semaphore_emit_signal(rdev, i, semaphore);
184 radeon_semaphore_emit_wait(rdev, dst_ring, semaphore);
185 }
186
187 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
188
189 /* don't unlock unused rings */
190 if (!sync_to[i] && i != dst_ring)
191 continue;
192
193 radeon_ring_unlock_commit(rdev, &rdev->ring[i]);
194 }
195
196 return 0;
197
198error:
199 /* unlock all locks taken so far */
200 for (--i; i >= 0; --i) {
201 if (sync_to[i] || i == dst_ring) {
202 radeon_ring_unlock_undo(rdev, &rdev->ring[i]);
203 }
204 }
205 return r;
206}
207
152void radeon_semaphore_free(struct radeon_device *rdev, 208void radeon_semaphore_free(struct radeon_device *rdev,
153 struct radeon_semaphore *semaphore) 209 struct radeon_semaphore *semaphore)
154{ 210{
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index f493c6403af5..5e3d54ded1b3 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -222,8 +222,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
222{ 222{
223 struct radeon_device *rdev; 223 struct radeon_device *rdev;
224 uint64_t old_start, new_start; 224 uint64_t old_start, new_start;
225 struct radeon_fence *fence; 225 struct radeon_fence *fence, *old_fence;
226 int r, i; 226 int r;
227 227
228 rdev = radeon_get_rdev(bo->bdev); 228 rdev = radeon_get_rdev(bo->bdev);
229 r = radeon_fence_create(rdev, &fence, radeon_copy_ring_index(rdev)); 229 r = radeon_fence_create(rdev, &fence, radeon_copy_ring_index(rdev));
@@ -242,6 +242,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
242 break; 242 break;
243 default: 243 default:
244 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); 244 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
245 radeon_fence_unref(&fence);
245 return -EINVAL; 246 return -EINVAL;
246 } 247 }
247 switch (new_mem->mem_type) { 248 switch (new_mem->mem_type) {
@@ -253,42 +254,35 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
253 break; 254 break;
254 default: 255 default:
255 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); 256 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
257 radeon_fence_unref(&fence);
256 return -EINVAL; 258 return -EINVAL;
257 } 259 }
258 if (!rdev->ring[radeon_copy_ring_index(rdev)].ready) { 260 if (!rdev->ring[radeon_copy_ring_index(rdev)].ready) {
259 DRM_ERROR("Trying to move memory with ring turned off.\n"); 261 DRM_ERROR("Trying to move memory with ring turned off.\n");
262 radeon_fence_unref(&fence);
260 return -EINVAL; 263 return -EINVAL;
261 } 264 }
262 265
263 BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); 266 BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
264 267
265 /* sync other rings */ 268 /* sync other rings */
266 if (rdev->family >= CHIP_R600) { 269 old_fence = bo->sync_obj;
267 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 270 if (old_fence && old_fence->ring != fence->ring
268 /* no need to sync to our own or unused rings */ 271 && !radeon_fence_signaled(old_fence)) {
269 if (i == radeon_copy_ring_index(rdev) || !rdev->ring[i].ready) 272 bool sync_to_ring[RADEON_NUM_RINGS] = { };
270 continue; 273 sync_to_ring[old_fence->ring] = true;
271 274
272 if (!fence->semaphore) { 275 r = radeon_semaphore_create(rdev, &fence->semaphore);
273 r = radeon_semaphore_create(rdev, &fence->semaphore); 276 if (r) {
274 /* FIXME: handle semaphore error */ 277 radeon_fence_unref(&fence);
275 if (r) 278 return r;
276 continue; 279 }
277 }
278 280
279 r = radeon_ring_lock(rdev, &rdev->ring[i], 3); 281 r = radeon_semaphore_sync_rings(rdev, fence->semaphore,
280 /* FIXME: handle ring lock error */ 282 sync_to_ring, fence->ring);
281 if (r) 283 if (r) {
282 continue; 284 radeon_fence_unref(&fence);
283 radeon_semaphore_emit_signal(rdev, i, fence->semaphore); 285 return r;
284 radeon_ring_unlock_commit(rdev, &rdev->ring[i]);
285
286 r = radeon_ring_lock(rdev, &rdev->ring[radeon_copy_ring_index(rdev)], 3);
287 /* FIXME: handle ring lock error */
288 if (r)
289 continue;
290 radeon_semaphore_emit_wait(rdev, radeon_copy_ring_index(rdev), fence->semaphore);
291 radeon_ring_unlock_commit(rdev, &rdev->ring[radeon_copy_ring_index(rdev)]);
292 } 286 }
293 } 287 }
294 288
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 4cf381b3a6d8..a464eb5e2df2 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -430,12 +430,9 @@ static int rs400_startup(struct radeon_device *rdev)
430 if (r) 430 if (r)
431 return r; 431 return r;
432 432
433 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 433 r = radeon_ib_ring_tests(rdev);
434 if (r) { 434 if (r)
435 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
436 rdev->accel_working = false;
437 return r; 435 return r;
438 }
439 436
440 return 0; 437 return 0;
441} 438}
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 10706c66b84b..25f9eef12c42 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -396,7 +396,6 @@ int rs600_asic_reset(struct radeon_device *rdev)
396 /* Check if GPU is idle */ 396 /* Check if GPU is idle */
397 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 397 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
398 dev_err(rdev->dev, "failed to reset GPU\n"); 398 dev_err(rdev->dev, "failed to reset GPU\n");
399 rdev->gpu_lockup = true;
400 ret = -1; 399 ret = -1;
401 } else 400 } else
402 dev_info(rdev->dev, "GPU reset succeed\n"); 401 dev_info(rdev->dev, "GPU reset succeed\n");
@@ -919,12 +918,9 @@ static int rs600_startup(struct radeon_device *rdev)
919 if (r) 918 if (r)
920 return r; 919 return r;
921 920
922 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 921 r = radeon_ib_ring_tests(rdev);
923 if (r) { 922 if (r)
924 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
925 rdev->accel_working = false;
926 return r; 923 return r;
927 }
928 924
929 return 0; 925 return 0;
930} 926}
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index f2c3b9d75f18..3277ddecfe9f 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -647,12 +647,9 @@ static int rs690_startup(struct radeon_device *rdev)
647 if (r) 647 if (r)
648 return r; 648 return r;
649 649
650 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 650 r = radeon_ib_ring_tests(rdev);
651 if (r) { 651 if (r)
652 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
653 rdev->accel_working = false;
654 return r; 652 return r;
655 }
656 653
657 return 0; 654 return 0;
658} 655}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index d8d78fe17946..7f08cedb5333 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -412,12 +412,10 @@ static int rv515_startup(struct radeon_device *rdev)
412 if (r) 412 if (r)
413 return r; 413 return r;
414 414
415 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 415 r = radeon_ib_ring_tests(rdev);
416 if (r) { 416 if (r)
417 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
418 rdev->accel_working = false;
419 return r; 417 return r;
420 } 418
421 return 0; 419 return 0;
422} 420}
423 421
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index c62ae4be3845..a8b001641e4b 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -969,7 +969,7 @@ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
969 } 969 }
970 if (rdev->flags & RADEON_IS_AGP) { 970 if (rdev->flags & RADEON_IS_AGP) {
971 size_bf = mc->gtt_start; 971 size_bf = mc->gtt_start;
972 size_af = 0xFFFFFFFF - mc->gtt_end + 1; 972 size_af = 0xFFFFFFFF - mc->gtt_end;
973 if (size_bf > size_af) { 973 if (size_bf > size_af) {
974 if (mc->mc_vram_size > size_bf) { 974 if (mc->mc_vram_size > size_bf) {
975 dev_warn(rdev->dev, "limiting VRAM\n"); 975 dev_warn(rdev->dev, "limiting VRAM\n");
@@ -983,7 +983,7 @@ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
983 mc->real_vram_size = size_af; 983 mc->real_vram_size = size_af;
984 mc->mc_vram_size = size_af; 984 mc->mc_vram_size = size_af;
985 } 985 }
986 mc->vram_start = mc->gtt_end; 986 mc->vram_start = mc->gtt_end + 1;
987 } 987 }
988 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 988 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
989 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", 989 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
@@ -1114,12 +1114,9 @@ static int rv770_startup(struct radeon_device *rdev)
1114 if (r) 1114 if (r)
1115 return r; 1115 return r;
1116 1116
1117 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1117 r = radeon_ib_ring_tests(rdev);
1118 if (r) { 1118 if (r)
1119 dev_err(rdev->dev, "IB test failed (%d).\n", r);
1120 rdev->accel_working = false;
1121 return r; 1119 return r;
1122 }
1123 1120
1124 return 0; 1121 return 0;
1125} 1122}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 14919e1539fa..779f0b604fad 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2217,8 +2217,6 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2217 u32 srbm_status; 2217 u32 srbm_status;
2218 u32 grbm_status, grbm_status2; 2218 u32 grbm_status, grbm_status2;
2219 u32 grbm_status_se0, grbm_status_se1; 2219 u32 grbm_status_se0, grbm_status_se1;
2220 struct r100_gpu_lockup *lockup = &rdev->config.si.lockup;
2221 int r;
2222 2220
2223 srbm_status = RREG32(SRBM_STATUS); 2221 srbm_status = RREG32(SRBM_STATUS);
2224 grbm_status = RREG32(GRBM_STATUS); 2222 grbm_status = RREG32(GRBM_STATUS);
@@ -2226,20 +2224,12 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2226 grbm_status_se0 = RREG32(GRBM_STATUS_SE0); 2224 grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
2227 grbm_status_se1 = RREG32(GRBM_STATUS_SE1); 2225 grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
2228 if (!(grbm_status & GUI_ACTIVE)) { 2226 if (!(grbm_status & GUI_ACTIVE)) {
2229 r100_gpu_lockup_update(lockup, ring); 2227 radeon_ring_lockup_update(ring);
2230 return false; 2228 return false;
2231 } 2229 }
2232 /* force CP activities */ 2230 /* force CP activities */
2233 r = radeon_ring_lock(rdev, ring, 2); 2231 radeon_ring_force_activity(rdev, ring);
2234 if (!r) { 2232 return radeon_ring_test_lockup(rdev, ring);
2235 /* PACKET2 NOP */
2236 radeon_ring_write(ring, 0x80000000);
2237 radeon_ring_write(ring, 0x80000000);
2238 radeon_ring_unlock_commit(rdev, ring);
2239 }
2240 /* XXX deal with CP0,1,2 */
2241 ring->rptr = RREG32(ring->rptr_reg);
2242 return r100_gpu_cp_is_lockup(rdev, lockup, ring);
2243} 2233}
2244 2234
2245static int si_gpu_soft_reset(struct radeon_device *rdev) 2235static int si_gpu_soft_reset(struct radeon_device *rdev)
@@ -2999,8 +2989,8 @@ int si_rlc_init(struct radeon_device *rdev)
2999 } 2989 }
3000 r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM, 2990 r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
3001 &rdev->rlc.save_restore_gpu_addr); 2991 &rdev->rlc.save_restore_gpu_addr);
2992 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
3002 if (r) { 2993 if (r) {
3003 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
3004 dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r); 2994 dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
3005 si_rlc_fini(rdev); 2995 si_rlc_fini(rdev);
3006 return r; 2996 return r;
@@ -3023,9 +3013,8 @@ int si_rlc_init(struct radeon_device *rdev)
3023 } 3013 }
3024 r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM, 3014 r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
3025 &rdev->rlc.clear_state_gpu_addr); 3015 &rdev->rlc.clear_state_gpu_addr);
3016 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
3026 if (r) { 3017 if (r) {
3027
3028 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
3029 dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r); 3018 dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
3030 si_rlc_fini(rdev); 3019 si_rlc_fini(rdev);
3031 return r; 3020 return r;