aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/Makefile2
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/drm_gem.c13
-rw-r--r--drivers/gpu/drm/drm_irq.c27
-rw-r--r--drivers/gpu/drm/drm_sysfs.c4
-rw-r--r--drivers/gpu/drm/drm_vm.c8
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c6
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c214
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c134
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h117
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c890
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c102
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c22
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h65
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c457
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h316
-rw-r--r--drivers/gpu/drm/i915/i915_trace_points.c11
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c17
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c9
-rw-r--r--drivers/gpu/drm/i915/intel_display.c797
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c6
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h7
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c88
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c502
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c4
-rw-r--r--drivers/gpu/drm/mga/mga_state.c4
-rw-r--r--drivers/gpu/drm/radeon/r100.c14
-rw-r--r--drivers/gpu/drm/radeon/r600.c14
-rw-r--r--drivers/gpu/drm/radeon/r600d.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c2
-rw-r--r--drivers/gpu/vga/Kconfig10
-rw-r--r--drivers/gpu/vga/Makefile1
-rw-r--r--drivers/gpu/vga/vgaarb.c1205
39 files changed, 4559 insertions, 551 deletions
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index de566cf0414c..30879df3daea 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1 +1 @@
obj-y += drm/ obj-y += drm/ vga/
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index e4d971c8b9d0..f831ea159291 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -102,6 +102,7 @@ config DRM_I915
102 select BACKLIGHT_CLASS_DEVICE if ACPI 102 select BACKLIGHT_CLASS_DEVICE if ACPI
103 select INPUT if ACPI 103 select INPUT if ACPI
104 select ACPI_VIDEO if ACPI 104 select ACPI_VIDEO if ACPI
105 select ACPI_BUTTON if ACPI
105 help 106 help
106 Choose this option if you have a system that has Intel 830M, 845G, 107 Choose this option if you have a system that has Intel 830M, 845G,
107 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the 108 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 230c9ffdd5e9..80391995bdec 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -142,6 +142,19 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
142 if (IS_ERR(obj->filp)) 142 if (IS_ERR(obj->filp))
143 goto free; 143 goto free;
144 144
145 /* Basically we want to disable the OOM killer and handle ENOMEM
146 * ourselves by sacrificing pages from cached buffers.
147 * XXX shmem_file_[gs]et_gfp_mask()
148 */
149 mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
150 GFP_HIGHUSER |
151 __GFP_COLD |
152 __GFP_FS |
153 __GFP_RECLAIMABLE |
154 __GFP_NORETRY |
155 __GFP_NOWARN |
156 __GFP_NOMEMALLOC);
157
145 kref_init(&obj->refcount); 158 kref_init(&obj->refcount);
146 kref_init(&obj->handlecount); 159 kref_init(&obj->handlecount);
147 obj->size = size; 160 obj->size = size;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f85aaf21e783..0a6f0b3bdc78 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -37,6 +37,7 @@
37 37
38#include <linux/interrupt.h> /* For task queue support */ 38#include <linux/interrupt.h> /* For task queue support */
39 39
40#include <linux/vgaarb.h>
40/** 41/**
41 * Get interrupt from bus id. 42 * Get interrupt from bus id.
42 * 43 *
@@ -171,6 +172,26 @@ err:
171} 172}
172EXPORT_SYMBOL(drm_vblank_init); 173EXPORT_SYMBOL(drm_vblank_init);
173 174
175static void drm_irq_vgaarb_nokms(void *cookie, bool state)
176{
177 struct drm_device *dev = cookie;
178
179 if (dev->driver->vgaarb_irq) {
180 dev->driver->vgaarb_irq(dev, state);
181 return;
182 }
183
184 if (!dev->irq_enabled)
185 return;
186
187 if (state)
188 dev->driver->irq_uninstall(dev);
189 else {
190 dev->driver->irq_preinstall(dev);
191 dev->driver->irq_postinstall(dev);
192 }
193}
194
174/** 195/**
175 * Install IRQ handler. 196 * Install IRQ handler.
176 * 197 *
@@ -231,6 +252,9 @@ int drm_irq_install(struct drm_device *dev)
231 return ret; 252 return ret;
232 } 253 }
233 254
255 if (!drm_core_check_feature(dev, DRIVER_MODESET))
256 vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL);
257
234 /* After installing handler */ 258 /* After installing handler */
235 ret = dev->driver->irq_postinstall(dev); 259 ret = dev->driver->irq_postinstall(dev);
236 if (ret < 0) { 260 if (ret < 0) {
@@ -279,6 +303,9 @@ int drm_irq_uninstall(struct drm_device * dev)
279 303
280 DRM_DEBUG("irq=%d\n", dev->pdev->irq); 304 DRM_DEBUG("irq=%d\n", dev->pdev->irq);
281 305
306 if (!drm_core_check_feature(dev, DRIVER_MODESET))
307 vga_client_register(dev->pdev, NULL, NULL, NULL);
308
282 dev->driver->irq_uninstall(dev); 309 dev->driver->irq_uninstall(dev);
283 310
284 free_irq(dev->pdev->irq, dev); 311 free_irq(dev->pdev->irq, dev);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 51611722aa02..7e42b7e9d43a 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -77,7 +77,7 @@ static ssize_t version_show(struct class *dev, char *buf)
77 CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); 77 CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
78} 78}
79 79
80static char *drm_nodename(struct device *dev) 80static char *drm_devnode(struct device *dev, mode_t *mode)
81{ 81{
82 return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev)); 82 return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
83} 83}
@@ -113,7 +113,7 @@ struct class *drm_sysfs_create(struct module *owner, char *name)
113 if (err) 113 if (err)
114 goto err_out_class; 114 goto err_out_class;
115 115
116 class->nodename = drm_nodename; 116 class->devnode = drm_devnode;
117 117
118 return class; 118 return class;
119 119
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 7e1fbe5d4779..4ac900f4647f 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -369,28 +369,28 @@ static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
369} 369}
370 370
371/** AGP virtual memory operations */ 371/** AGP virtual memory operations */
372static struct vm_operations_struct drm_vm_ops = { 372static const struct vm_operations_struct drm_vm_ops = {
373 .fault = drm_vm_fault, 373 .fault = drm_vm_fault,
374 .open = drm_vm_open, 374 .open = drm_vm_open,
375 .close = drm_vm_close, 375 .close = drm_vm_close,
376}; 376};
377 377
378/** Shared virtual memory operations */ 378/** Shared virtual memory operations */
379static struct vm_operations_struct drm_vm_shm_ops = { 379static const struct vm_operations_struct drm_vm_shm_ops = {
380 .fault = drm_vm_shm_fault, 380 .fault = drm_vm_shm_fault,
381 .open = drm_vm_open, 381 .open = drm_vm_open,
382 .close = drm_vm_shm_close, 382 .close = drm_vm_shm_close,
383}; 383};
384 384
385/** DMA virtual memory operations */ 385/** DMA virtual memory operations */
386static struct vm_operations_struct drm_vm_dma_ops = { 386static const struct vm_operations_struct drm_vm_dma_ops = {
387 .fault = drm_vm_dma_fault, 387 .fault = drm_vm_dma_fault,
388 .open = drm_vm_open, 388 .open = drm_vm_open,
389 .close = drm_vm_close, 389 .close = drm_vm_close,
390}; 390};
391 391
392/** Scatter-gather virtual memory operations */ 392/** Scatter-gather virtual memory operations */
393static struct vm_operations_struct drm_vm_sg_ops = { 393static const struct vm_operations_struct drm_vm_sg_ops = {
394 .fault = drm_vm_sg_fault, 394 .fault = drm_vm_sg_fault,
395 .open = drm_vm_open, 395 .open = drm_vm_open,
396 .close = drm_vm_close, 396 .close = drm_vm_close,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 5269dfa5f620..fa7b9be096bc 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -9,6 +9,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
9 i915_gem.o \ 9 i915_gem.o \
10 i915_gem_debug.o \ 10 i915_gem_debug.o \
11 i915_gem_tiling.o \ 11 i915_gem_tiling.o \
12 i915_trace_points.o \
12 intel_display.o \ 13 intel_display.o \
13 intel_crt.o \ 14 intel_crt.o \
14 intel_lvds.o \ 15 intel_lvds.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1e3bdcee863c..f8ce9a3a420d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -96,11 +96,13 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
96 { 96 {
97 struct drm_gem_object *obj = obj_priv->obj; 97 struct drm_gem_object *obj = obj_priv->obj;
98 98
99 seq_printf(m, " %p: %s %08x %08x %d", 99 seq_printf(m, " %p: %s %8zd %08x %08x %d %s",
100 obj, 100 obj,
101 get_pin_flag(obj_priv), 101 get_pin_flag(obj_priv),
102 obj->size,
102 obj->read_domains, obj->write_domain, 103 obj->read_domains, obj->write_domain,
103 obj_priv->last_rendering_seqno); 104 obj_priv->last_rendering_seqno,
105 obj_priv->dirty ? "dirty" : "");
104 106
105 if (obj->name) 107 if (obj->name)
106 seq_printf(m, " (name: %d)", obj->name); 108 seq_printf(m, " (name: %d)", obj->name);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 9909505d070a..e5b138be45fa 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -33,6 +33,8 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include "i915_drm.h" 34#include "i915_drm.h"
35#include "i915_drv.h" 35#include "i915_drv.h"
36#include "i915_trace.h"
37#include <linux/vgaarb.h>
36 38
37/* Really want an OS-independent resettable timer. Would like to have 39/* Really want an OS-independent resettable timer. Would like to have
38 * this loop run for (eg) 3 sec, but have the timer reset every time 40 * this loop run for (eg) 3 sec, but have the timer reset every time
@@ -49,14 +51,18 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
49 u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 51 u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
50 int i; 52 int i;
51 53
54 trace_i915_ring_wait_begin (dev);
55
52 for (i = 0; i < 100000; i++) { 56 for (i = 0; i < 100000; i++) {
53 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 57 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
54 acthd = I915_READ(acthd_reg); 58 acthd = I915_READ(acthd_reg);
55 ring->space = ring->head - (ring->tail + 8); 59 ring->space = ring->head - (ring->tail + 8);
56 if (ring->space < 0) 60 if (ring->space < 0)
57 ring->space += ring->Size; 61 ring->space += ring->Size;
58 if (ring->space >= n) 62 if (ring->space >= n) {
63 trace_i915_ring_wait_end (dev);
59 return 0; 64 return 0;
65 }
60 66
61 if (dev->primary->master) { 67 if (dev->primary->master) {
62 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 68 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -76,6 +82,7 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
76 82
77 } 83 }
78 84
85 trace_i915_ring_wait_end (dev);
79 return -EBUSY; 86 return -EBUSY;
80} 87}
81 88
@@ -921,7 +928,8 @@ static int i915_get_bridge_dev(struct drm_device *dev)
921 * how much was set aside so we can use it for our own purposes. 928 * how much was set aside so we can use it for our own purposes.
922 */ 929 */
923static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, 930static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
924 uint32_t *preallocated_size) 931 uint32_t *preallocated_size,
932 uint32_t *start)
925{ 933{
926 struct drm_i915_private *dev_priv = dev->dev_private; 934 struct drm_i915_private *dev_priv = dev->dev_private;
927 u16 tmp = 0; 935 u16 tmp = 0;
@@ -1008,11 +1016,174 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
1008 return -1; 1016 return -1;
1009 } 1017 }
1010 *preallocated_size = stolen - overhead; 1018 *preallocated_size = stolen - overhead;
1019 *start = overhead;
1011 1020
1012 return 0; 1021 return 0;
1013} 1022}
1014 1023
1024#define PTE_ADDRESS_MASK 0xfffff000
1025#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
1026#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
1027#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
1028#define PTE_MAPPING_TYPE_CACHED (3 << 1)
1029#define PTE_MAPPING_TYPE_MASK (3 << 1)
1030#define PTE_VALID (1 << 0)
1031
1032/**
1033 * i915_gtt_to_phys - take a GTT address and turn it into a physical one
1034 * @dev: drm device
1035 * @gtt_addr: address to translate
1036 *
1037 * Some chip functions require allocations from stolen space but need the
1038 * physical address of the memory in question. We use this routine
1039 * to get a physical address suitable for register programming from a given
1040 * GTT address.
1041 */
1042static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1043 unsigned long gtt_addr)
1044{
1045 unsigned long *gtt;
1046 unsigned long entry, phys;
1047 int gtt_bar = IS_I9XX(dev) ? 0 : 1;
1048 int gtt_offset, gtt_size;
1049
1050 if (IS_I965G(dev)) {
1051 if (IS_G4X(dev) || IS_IGDNG(dev)) {
1052 gtt_offset = 2*1024*1024;
1053 gtt_size = 2*1024*1024;
1054 } else {
1055 gtt_offset = 512*1024;
1056 gtt_size = 512*1024;
1057 }
1058 } else {
1059 gtt_bar = 3;
1060 gtt_offset = 0;
1061 gtt_size = pci_resource_len(dev->pdev, gtt_bar);
1062 }
1063
1064 gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset,
1065 gtt_size);
1066 if (!gtt) {
1067 DRM_ERROR("ioremap of GTT failed\n");
1068 return 0;
1069 }
1070
1071 entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
1072
1073 DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
1074
1075 /* Mask out these reserved bits on this hardware. */
1076 if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
1077 IS_I945G(dev) || IS_I945GM(dev)) {
1078 entry &= ~PTE_ADDRESS_MASK_HIGH;
1079 }
1080
1081 /* If it's not a mapping type we know, then bail. */
1082 if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
1083 (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) {
1084 iounmap(gtt);
1085 return 0;
1086 }
1087
1088 if (!(entry & PTE_VALID)) {
1089 DRM_ERROR("bad GTT entry in stolen space\n");
1090 iounmap(gtt);
1091 return 0;
1092 }
1093
1094 iounmap(gtt);
1095
1096 phys =(entry & PTE_ADDRESS_MASK) |
1097 ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
1098
1099 DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
1100
1101 return phys;
1102}
1103
1104static void i915_warn_stolen(struct drm_device *dev)
1105{
1106 DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
1107 DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
1108}
1109
1110static void i915_setup_compression(struct drm_device *dev, int size)
1111{
1112 struct drm_i915_private *dev_priv = dev->dev_private;
1113 struct drm_mm_node *compressed_fb, *compressed_llb;
1114 unsigned long cfb_base, ll_base;
1115
1116 /* Leave 1M for line length buffer & misc. */
1117 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
1118 if (!compressed_fb) {
1119 i915_warn_stolen(dev);
1120 return;
1121 }
1122
1123 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
1124 if (!compressed_fb) {
1125 i915_warn_stolen(dev);
1126 return;
1127 }
1128
1129 cfb_base = i915_gtt_to_phys(dev, compressed_fb->start);
1130 if (!cfb_base) {
1131 DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
1132 drm_mm_put_block(compressed_fb);
1133 }
1134
1135 if (!IS_GM45(dev)) {
1136 compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096,
1137 4096, 0);
1138 if (!compressed_llb) {
1139 i915_warn_stolen(dev);
1140 return;
1141 }
1142
1143 compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096);
1144 if (!compressed_llb) {
1145 i915_warn_stolen(dev);
1146 return;
1147 }
1148
1149 ll_base = i915_gtt_to_phys(dev, compressed_llb->start);
1150 if (!ll_base) {
1151 DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
1152 drm_mm_put_block(compressed_fb);
1153 drm_mm_put_block(compressed_llb);
1154 }
1155 }
1156
1157 dev_priv->cfb_size = size;
1158
1159 if (IS_GM45(dev)) {
1160 g4x_disable_fbc(dev);
1161 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
1162 } else {
1163 i8xx_disable_fbc(dev);
1164 I915_WRITE(FBC_CFB_BASE, cfb_base);
1165 I915_WRITE(FBC_LL_BASE, ll_base);
1166 }
1167
1168 DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
1169 ll_base, size >> 20);
1170}
1171
1172/* true = enable decode, false = disable decoder */
1173static unsigned int i915_vga_set_decode(void *cookie, bool state)
1174{
1175 struct drm_device *dev = cookie;
1176
1177 intel_modeset_vga_set_state(dev, state);
1178 if (state)
1179 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1180 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1181 else
1182 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1183}
1184
1015static int i915_load_modeset_init(struct drm_device *dev, 1185static int i915_load_modeset_init(struct drm_device *dev,
1186 unsigned long prealloc_start,
1016 unsigned long prealloc_size, 1187 unsigned long prealloc_size,
1017 unsigned long agp_size) 1188 unsigned long agp_size)
1018{ 1189{
@@ -1033,6 +1204,10 @@ static int i915_load_modeset_init(struct drm_device *dev,
1033 1204
1034 /* Basic memrange allocator for stolen space (aka vram) */ 1205 /* Basic memrange allocator for stolen space (aka vram) */
1035 drm_mm_init(&dev_priv->vram, 0, prealloc_size); 1206 drm_mm_init(&dev_priv->vram, 0, prealloc_size);
1207 DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
1208
1209 /* We're off and running w/KMS */
1210 dev_priv->mm.suspended = 0;
1036 1211
1037 /* Let GEM Manage from end of prealloc space to end of aperture. 1212 /* Let GEM Manage from end of prealloc space to end of aperture.
1038 * 1213 *
@@ -1045,10 +1220,24 @@ static int i915_load_modeset_init(struct drm_device *dev,
1045 */ 1220 */
1046 i915_gem_do_init(dev, prealloc_size, agp_size - 4096); 1221 i915_gem_do_init(dev, prealloc_size, agp_size - 4096);
1047 1222
1223 mutex_lock(&dev->struct_mutex);
1048 ret = i915_gem_init_ringbuffer(dev); 1224 ret = i915_gem_init_ringbuffer(dev);
1225 mutex_unlock(&dev->struct_mutex);
1049 if (ret) 1226 if (ret)
1050 goto out; 1227 goto out;
1051 1228
1229 /* Try to set up FBC with a reasonable compressed buffer size */
1230 if (I915_HAS_FBC(dev) && i915_powersave) {
1231 int cfb_size;
1232
1233 /* Try to get an 8M buffer... */
1234 if (prealloc_size > (9*1024*1024))
1235 cfb_size = 8*1024*1024;
1236 else /* fall back to 7/8 of the stolen space */
1237 cfb_size = prealloc_size * 7 / 8;
1238 i915_setup_compression(dev, cfb_size);
1239 }
1240
1052 /* Allow hardware batchbuffers unless told otherwise. 1241 /* Allow hardware batchbuffers unless told otherwise.
1053 */ 1242 */
1054 dev_priv->allow_batchbuffer = 1; 1243 dev_priv->allow_batchbuffer = 1;
@@ -1057,6 +1246,11 @@ static int i915_load_modeset_init(struct drm_device *dev,
1057 if (ret) 1246 if (ret)
1058 DRM_INFO("failed to find VBIOS tables\n"); 1247 DRM_INFO("failed to find VBIOS tables\n");
1059 1248
1249 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
1250 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1251 if (ret)
1252 goto destroy_ringbuffer;
1253
1060 ret = drm_irq_install(dev); 1254 ret = drm_irq_install(dev);
1061 if (ret) 1255 if (ret)
1062 goto destroy_ringbuffer; 1256 goto destroy_ringbuffer;
@@ -1161,7 +1355,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1161 struct drm_i915_private *dev_priv = dev->dev_private; 1355 struct drm_i915_private *dev_priv = dev->dev_private;
1162 resource_size_t base, size; 1356 resource_size_t base, size;
1163 int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; 1357 int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
1164 uint32_t agp_size, prealloc_size; 1358 uint32_t agp_size, prealloc_size, prealloc_start;
1165 1359
1166 /* i915 has 4 more counters */ 1360 /* i915 has 4 more counters */
1167 dev->counters += 4; 1361 dev->counters += 4;
@@ -1215,7 +1409,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1215 "performance may suffer.\n"); 1409 "performance may suffer.\n");
1216 } 1410 }
1217 1411
1218 ret = i915_probe_agp(dev, &agp_size, &prealloc_size); 1412 ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start);
1219 if (ret) 1413 if (ret)
1220 goto out_iomapfree; 1414 goto out_iomapfree;
1221 1415
@@ -1273,6 +1467,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1273 spin_lock_init(&dev_priv->user_irq_lock); 1467 spin_lock_init(&dev_priv->user_irq_lock);
1274 spin_lock_init(&dev_priv->error_lock); 1468 spin_lock_init(&dev_priv->error_lock);
1275 dev_priv->user_irq_refcount = 0; 1469 dev_priv->user_irq_refcount = 0;
1470 dev_priv->trace_irq_seqno = 0;
1276 1471
1277 ret = drm_vblank_init(dev, I915_NUM_PIPE); 1472 ret = drm_vblank_init(dev, I915_NUM_PIPE);
1278 1473
@@ -1281,8 +1476,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1281 return ret; 1476 return ret;
1282 } 1477 }
1283 1478
1479 /* Start out suspended */
1480 dev_priv->mm.suspended = 1;
1481
1284 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1482 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1285 ret = i915_load_modeset_init(dev, prealloc_size, agp_size); 1483 ret = i915_load_modeset_init(dev, prealloc_start,
1484 prealloc_size, agp_size);
1286 if (ret < 0) { 1485 if (ret < 0) {
1287 DRM_ERROR("failed to init modeset\n"); 1486 DRM_ERROR("failed to init modeset\n");
1288 goto out_workqueue_free; 1487 goto out_workqueue_free;
@@ -1294,6 +1493,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1294 if (!IS_IGDNG(dev)) 1493 if (!IS_IGDNG(dev))
1295 intel_opregion_init(dev, 0); 1494 intel_opregion_init(dev, 0);
1296 1495
1496 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
1497 (unsigned long) dev);
1297 return 0; 1498 return 0;
1298 1499
1299out_workqueue_free: 1500out_workqueue_free:
@@ -1314,6 +1515,7 @@ int i915_driver_unload(struct drm_device *dev)
1314 struct drm_i915_private *dev_priv = dev->dev_private; 1515 struct drm_i915_private *dev_priv = dev->dev_private;
1315 1516
1316 destroy_workqueue(dev_priv->wq); 1517 destroy_workqueue(dev_priv->wq);
1518 del_timer_sync(&dev_priv->hangcheck_timer);
1317 1519
1318 io_mapping_free(dev_priv->mm.gtt_mapping); 1520 io_mapping_free(dev_priv->mm.gtt_mapping);
1319 if (dev_priv->mm.gtt_mtrr >= 0) { 1521 if (dev_priv->mm.gtt_mtrr >= 0) {
@@ -1324,6 +1526,7 @@ int i915_driver_unload(struct drm_device *dev)
1324 1526
1325 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1527 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1326 drm_irq_uninstall(dev); 1528 drm_irq_uninstall(dev);
1529 vga_client_register(dev->pdev, NULL, NULL, NULL);
1327 } 1530 }
1328 1531
1329 if (dev->pdev->msi_enabled) 1532 if (dev->pdev->msi_enabled)
@@ -1452,6 +1655,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
1452 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), 1655 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
1453 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), 1656 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
1454 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), 1657 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
1658 DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0),
1455}; 1659};
1456 1660
1457int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 1661int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index dbe568c9327b..7f436ec075f6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -89,6 +89,9 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
89 pci_set_power_state(dev->pdev, PCI_D3hot); 89 pci_set_power_state(dev->pdev, PCI_D3hot);
90 } 90 }
91 91
92 /* Modeset on resume, not lid events */
93 dev_priv->modeset_on_lid = 0;
94
92 return 0; 95 return 0;
93} 96}
94 97
@@ -97,8 +100,6 @@ static int i915_resume(struct drm_device *dev)
97 struct drm_i915_private *dev_priv = dev->dev_private; 100 struct drm_i915_private *dev_priv = dev->dev_private;
98 int ret = 0; 101 int ret = 0;
99 102
100 pci_set_power_state(dev->pdev, PCI_D0);
101 pci_restore_state(dev->pdev);
102 if (pci_enable_device(dev->pdev)) 103 if (pci_enable_device(dev->pdev))
103 return -1; 104 return -1;
104 pci_set_master(dev->pdev); 105 pci_set_master(dev->pdev);
@@ -124,9 +125,135 @@ static int i915_resume(struct drm_device *dev)
124 drm_helper_resume_force_mode(dev); 125 drm_helper_resume_force_mode(dev);
125 } 126 }
126 127
128 dev_priv->modeset_on_lid = 0;
129
127 return ret; 130 return ret;
128} 131}
129 132
133/**
134 * i965_reset - reset chip after a hang
135 * @dev: drm device to reset
136 * @flags: reset domains
137 *
138 * Reset the chip. Useful if a hang is detected. Returns zero on successful
139 * reset or otherwise an error code.
140 *
141 * Procedure is fairly simple:
142 * - reset the chip using the reset reg
143 * - re-init context state
144 * - re-init hardware status page
145 * - re-init ring buffer
146 * - re-init interrupt state
147 * - re-init display
148 */
149int i965_reset(struct drm_device *dev, u8 flags)
150{
151 drm_i915_private_t *dev_priv = dev->dev_private;
152 unsigned long timeout;
153 u8 gdrst;
154 /*
155 * We really should only reset the display subsystem if we actually
156 * need to
157 */
158 bool need_display = true;
159
160 mutex_lock(&dev->struct_mutex);
161
162 /*
163 * Clear request list
164 */
165 i915_gem_retire_requests(dev);
166
167 if (need_display)
168 i915_save_display(dev);
169
170 if (IS_I965G(dev) || IS_G4X(dev)) {
171 /*
172 * Set the domains we want to reset, then the reset bit (bit 0).
173 * Clear the reset bit after a while and wait for hardware status
174 * bit (bit 1) to be set
175 */
176 pci_read_config_byte(dev->pdev, GDRST, &gdrst);
177 pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0));
178 udelay(50);
179 pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe);
180
181 /* ...we don't want to loop forever though, 500ms should be plenty */
182 timeout = jiffies + msecs_to_jiffies(500);
183 do {
184 udelay(100);
185 pci_read_config_byte(dev->pdev, GDRST, &gdrst);
186 } while ((gdrst & 0x1) && time_after(timeout, jiffies));
187
188 if (gdrst & 0x1) {
189 WARN(true, "i915: Failed to reset chip\n");
190 mutex_unlock(&dev->struct_mutex);
191 return -EIO;
192 }
193 } else {
194 DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
195 return -ENODEV;
196 }
197
198 /* Ok, now get things going again... */
199
200 /*
201 * Everything depends on having the GTT running, so we need to start
202 * there. Fortunately we don't need to do this unless we reset the
203 * chip at a PCI level.
204 *
205 * Next we need to restore the context, but we don't use those
206 * yet either...
207 *
208 * Ring buffer needs to be re-initialized in the KMS case, or if X
209 * was running at the time of the reset (i.e. we weren't VT
210 * switched away).
211 */
212 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
213 !dev_priv->mm.suspended) {
214 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
215 struct drm_gem_object *obj = ring->ring_obj;
216 struct drm_i915_gem_object *obj_priv = obj->driver_private;
217 dev_priv->mm.suspended = 0;
218
219 /* Stop the ring if it's running. */
220 I915_WRITE(PRB0_CTL, 0);
221 I915_WRITE(PRB0_TAIL, 0);
222 I915_WRITE(PRB0_HEAD, 0);
223
224 /* Initialize the ring. */
225 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
226 I915_WRITE(PRB0_CTL,
227 ((obj->size - 4096) & RING_NR_PAGES) |
228 RING_NO_REPORT |
229 RING_VALID);
230 if (!drm_core_check_feature(dev, DRIVER_MODESET))
231 i915_kernel_lost_context(dev);
232 else {
233 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
234 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
235 ring->space = ring->head - (ring->tail + 8);
236 if (ring->space < 0)
237 ring->space += ring->Size;
238 }
239
240 mutex_unlock(&dev->struct_mutex);
241 drm_irq_uninstall(dev);
242 drm_irq_install(dev);
243 mutex_lock(&dev->struct_mutex);
244 }
245
246 /*
247 * Display needs restore too...
248 */
249 if (need_display)
250 i915_restore_display(dev);
251
252 mutex_unlock(&dev->struct_mutex);
253 return 0;
254}
255
256
130static int __devinit 257static int __devinit
131i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 258i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
132{ 259{
@@ -234,6 +361,8 @@ static int __init i915_init(void)
234{ 361{
235 driver.num_ioctls = i915_max_ioctl; 362 driver.num_ioctls = i915_max_ioctl;
236 363
364 i915_gem_shrinker_init();
365
237 /* 366 /*
238 * If CONFIG_DRM_I915_KMS is set, default to KMS unless 367 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
239 * explicitly disabled with the module pararmeter. 368 * explicitly disabled with the module pararmeter.
@@ -260,6 +389,7 @@ static int __init i915_init(void)
260 389
261static void __exit i915_exit(void) 390static void __exit i915_exit(void)
262{ 391{
392 i915_gem_shrinker_exit();
263 drm_exit(&driver); 393 drm_exit(&driver);
264} 394}
265 395
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 77ed060b4292..57204e298975 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -48,6 +48,11 @@ enum pipe {
48 PIPE_B, 48 PIPE_B,
49}; 49};
50 50
51enum plane {
52 PLANE_A = 0,
53 PLANE_B,
54};
55
51#define I915_NUM_PIPE 2 56#define I915_NUM_PIPE 2
52 57
53/* Interface history: 58/* Interface history:
@@ -148,6 +153,23 @@ struct drm_i915_error_state {
148 struct timeval time; 153 struct timeval time;
149}; 154};
150 155
156struct drm_i915_display_funcs {
157 void (*dpms)(struct drm_crtc *crtc, int mode);
158 bool (*fbc_enabled)(struct drm_crtc *crtc);
159 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
160 void (*disable_fbc)(struct drm_device *dev);
161 int (*get_display_clock_speed)(struct drm_device *dev);
162 int (*get_fifo_size)(struct drm_device *dev, int plane);
163 void (*update_wm)(struct drm_device *dev, int planea_clock,
164 int planeb_clock, int sr_hdisplay, int pixel_size);
165 /* clock updates for mode set */
166 /* cursor updates */
167 /* render clock increase/decrease */
168 /* display clock increase/decrease */
169 /* pll clock increase/decrease */
170 /* clock gating init */
171};
172
151typedef struct drm_i915_private { 173typedef struct drm_i915_private {
152 struct drm_device *dev; 174 struct drm_device *dev;
153 175
@@ -180,6 +202,7 @@ typedef struct drm_i915_private {
180 spinlock_t user_irq_lock; 202 spinlock_t user_irq_lock;
181 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */ 203 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
182 int user_irq_refcount; 204 int user_irq_refcount;
205 u32 trace_irq_seqno;
183 /** Cached value of IMR to avoid reads in updating the bitfield */ 206 /** Cached value of IMR to avoid reads in updating the bitfield */
184 u32 irq_mask_reg; 207 u32 irq_mask_reg;
185 u32 pipestat[2]; 208 u32 pipestat[2];
@@ -198,10 +221,21 @@ typedef struct drm_i915_private {
198 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 221 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
199 int vblank_pipe; 222 int vblank_pipe;
200 223
224 /* For hangcheck timer */
225#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
226 struct timer_list hangcheck_timer;
227 int hangcheck_count;
228 uint32_t last_acthd;
229
201 bool cursor_needs_physical; 230 bool cursor_needs_physical;
202 231
203 struct drm_mm vram; 232 struct drm_mm vram;
204 233
234 unsigned long cfb_size;
235 unsigned long cfb_pitch;
236 int cfb_fence;
237 int cfb_plane;
238
205 int irq_enabled; 239 int irq_enabled;
206 240
207 struct intel_opregion opregion; 241 struct intel_opregion opregion;
@@ -222,6 +256,8 @@ typedef struct drm_i915_private {
222 unsigned int edp_support:1; 256 unsigned int edp_support:1;
223 int lvds_ssc_freq; 257 int lvds_ssc_freq;
224 258
259 struct notifier_block lid_notifier;
260
225 int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */ 261 int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */
226 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ 262 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
227 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 263 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
@@ -234,7 +270,11 @@ typedef struct drm_i915_private {
234 struct work_struct error_work; 270 struct work_struct error_work;
235 struct workqueue_struct *wq; 271 struct workqueue_struct *wq;
236 272
273 /* Display functions */
274 struct drm_i915_display_funcs display;
275
237 /* Register state */ 276 /* Register state */
277 bool modeset_on_lid;
238 u8 saveLBB; 278 u8 saveLBB;
239 u32 saveDSPACNTR; 279 u32 saveDSPACNTR;
240 u32 saveDSPBCNTR; 280 u32 saveDSPBCNTR;
@@ -256,6 +296,12 @@ typedef struct drm_i915_private {
256 u32 saveVBLANK_A; 296 u32 saveVBLANK_A;
257 u32 saveVSYNC_A; 297 u32 saveVSYNC_A;
258 u32 saveBCLRPAT_A; 298 u32 saveBCLRPAT_A;
299 u32 saveTRANS_HTOTAL_A;
300 u32 saveTRANS_HBLANK_A;
301 u32 saveTRANS_HSYNC_A;
302 u32 saveTRANS_VTOTAL_A;
303 u32 saveTRANS_VBLANK_A;
304 u32 saveTRANS_VSYNC_A;
259 u32 savePIPEASTAT; 305 u32 savePIPEASTAT;
260 u32 saveDSPASTRIDE; 306 u32 saveDSPASTRIDE;
261 u32 saveDSPASIZE; 307 u32 saveDSPASIZE;
@@ -264,8 +310,11 @@ typedef struct drm_i915_private {
264 u32 saveDSPASURF; 310 u32 saveDSPASURF;
265 u32 saveDSPATILEOFF; 311 u32 saveDSPATILEOFF;
266 u32 savePFIT_PGM_RATIOS; 312 u32 savePFIT_PGM_RATIOS;
313 u32 saveBLC_HIST_CTL;
267 u32 saveBLC_PWM_CTL; 314 u32 saveBLC_PWM_CTL;
268 u32 saveBLC_PWM_CTL2; 315 u32 saveBLC_PWM_CTL2;
316 u32 saveBLC_CPU_PWM_CTL;
317 u32 saveBLC_CPU_PWM_CTL2;
269 u32 saveFPB0; 318 u32 saveFPB0;
270 u32 saveFPB1; 319 u32 saveFPB1;
271 u32 saveDPLL_B; 320 u32 saveDPLL_B;
@@ -277,6 +326,12 @@ typedef struct drm_i915_private {
277 u32 saveVBLANK_B; 326 u32 saveVBLANK_B;
278 u32 saveVSYNC_B; 327 u32 saveVSYNC_B;
279 u32 saveBCLRPAT_B; 328 u32 saveBCLRPAT_B;
329 u32 saveTRANS_HTOTAL_B;
330 u32 saveTRANS_HBLANK_B;
331 u32 saveTRANS_HSYNC_B;
332 u32 saveTRANS_VTOTAL_B;
333 u32 saveTRANS_VBLANK_B;
334 u32 saveTRANS_VSYNC_B;
280 u32 savePIPEBSTAT; 335 u32 savePIPEBSTAT;
281 u32 saveDSPBSTRIDE; 336 u32 saveDSPBSTRIDE;
282 u32 saveDSPBSIZE; 337 u32 saveDSPBSIZE;
@@ -302,6 +357,7 @@ typedef struct drm_i915_private {
302 u32 savePFIT_CONTROL; 357 u32 savePFIT_CONTROL;
303 u32 save_palette_a[256]; 358 u32 save_palette_a[256];
304 u32 save_palette_b[256]; 359 u32 save_palette_b[256];
360 u32 saveDPFC_CB_BASE;
305 u32 saveFBC_CFB_BASE; 361 u32 saveFBC_CFB_BASE;
306 u32 saveFBC_LL_BASE; 362 u32 saveFBC_LL_BASE;
307 u32 saveFBC_CONTROL; 363 u32 saveFBC_CONTROL;
@@ -309,6 +365,12 @@ typedef struct drm_i915_private {
309 u32 saveIER; 365 u32 saveIER;
310 u32 saveIIR; 366 u32 saveIIR;
311 u32 saveIMR; 367 u32 saveIMR;
368 u32 saveDEIER;
369 u32 saveDEIMR;
370 u32 saveGTIER;
371 u32 saveGTIMR;
372 u32 saveFDI_RXA_IMR;
373 u32 saveFDI_RXB_IMR;
312 u32 saveCACHE_MODE_0; 374 u32 saveCACHE_MODE_0;
313 u32 saveD_STATE; 375 u32 saveD_STATE;
314 u32 saveDSPCLK_GATE_D; 376 u32 saveDSPCLK_GATE_D;
@@ -342,6 +404,16 @@ typedef struct drm_i915_private {
342 u32 savePIPEB_DP_LINK_M; 404 u32 savePIPEB_DP_LINK_M;
343 u32 savePIPEA_DP_LINK_N; 405 u32 savePIPEA_DP_LINK_N;
344 u32 savePIPEB_DP_LINK_N; 406 u32 savePIPEB_DP_LINK_N;
407 u32 saveFDI_RXA_CTL;
408 u32 saveFDI_TXA_CTL;
409 u32 saveFDI_RXB_CTL;
410 u32 saveFDI_TXB_CTL;
411 u32 savePFA_CTL_1;
412 u32 savePFB_CTL_1;
413 u32 savePFA_WIN_SZ;
414 u32 savePFB_WIN_SZ;
415 u32 savePFA_WIN_POS;
416 u32 savePFB_WIN_POS;
345 417
346 struct { 418 struct {
347 struct drm_mm gtt_space; 419 struct drm_mm gtt_space;
@@ -350,6 +422,15 @@ typedef struct drm_i915_private {
350 int gtt_mtrr; 422 int gtt_mtrr;
351 423
352 /** 424 /**
425 * Membership on list of all loaded devices, used to evict
426 * inactive buffers under memory pressure.
427 *
428 * Modifications should only be done whilst holding the
429 * shrink_list_lock spinlock.
430 */
431 struct list_head shrink_list;
432
433 /**
353 * List of objects currently involved in rendering from the 434 * List of objects currently involved in rendering from the
354 * ringbuffer. 435 * ringbuffer.
355 * 436 *
@@ -432,7 +513,7 @@ typedef struct drm_i915_private {
432 * It prevents command submission from occuring and makes 513 * It prevents command submission from occuring and makes
433 * every pending request fail 514 * every pending request fail
434 */ 515 */
435 int wedged; 516 atomic_t wedged;
436 517
437 /** Bit 6 swizzling required for X tiling */ 518 /** Bit 6 swizzling required for X tiling */
438 uint32_t bit_6_swizzle_x; 519 uint32_t bit_6_swizzle_x;
@@ -443,6 +524,8 @@ typedef struct drm_i915_private {
443 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; 524 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
444 } mm; 525 } mm;
445 struct sdvo_device_mapping sdvo_mappings[2]; 526 struct sdvo_device_mapping sdvo_mappings[2];
527 /* indicate whether the LVDS_BORDER should be enabled or not */
528 unsigned int lvds_border_bits;
446 529
447 /* Reclocking support */ 530 /* Reclocking support */
448 bool render_reclock_avail; 531 bool render_reclock_avail;
@@ -491,10 +574,7 @@ struct drm_i915_gem_object {
491 * This is the same as gtt_space->start 574 * This is the same as gtt_space->start
492 */ 575 */
493 uint32_t gtt_offset; 576 uint32_t gtt_offset;
494 /** 577
495 * Required alignment for the object
496 */
497 uint32_t gtt_alignment;
498 /** 578 /**
499 * Fake offset for use by mmap(2) 579 * Fake offset for use by mmap(2)
500 */ 580 */
@@ -541,6 +621,11 @@ struct drm_i915_gem_object {
541 * in an execbuffer object list. 621 * in an execbuffer object list.
542 */ 622 */
543 int in_execbuffer; 623 int in_execbuffer;
624
625 /**
626 * Advice: are the backing pages purgeable?
627 */
628 int madv;
544}; 629};
545 630
546/** 631/**
@@ -585,6 +670,8 @@ extern int i915_max_ioctl;
585extern unsigned int i915_fbpercrtc; 670extern unsigned int i915_fbpercrtc;
586extern unsigned int i915_powersave; 671extern unsigned int i915_powersave;
587 672
673extern void i915_save_display(struct drm_device *dev);
674extern void i915_restore_display(struct drm_device *dev);
588extern int i915_master_create(struct drm_device *dev, struct drm_master *master); 675extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
589extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); 676extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
590 677
@@ -604,13 +691,16 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
604extern int i915_emit_box(struct drm_device *dev, 691extern int i915_emit_box(struct drm_device *dev,
605 struct drm_clip_rect *boxes, 692 struct drm_clip_rect *boxes,
606 int i, int DR1, int DR4); 693 int i, int DR1, int DR4);
694extern int i965_reset(struct drm_device *dev, u8 flags);
607 695
608/* i915_irq.c */ 696/* i915_irq.c */
697void i915_hangcheck_elapsed(unsigned long data);
609extern int i915_irq_emit(struct drm_device *dev, void *data, 698extern int i915_irq_emit(struct drm_device *dev, void *data,
610 struct drm_file *file_priv); 699 struct drm_file *file_priv);
611extern int i915_irq_wait(struct drm_device *dev, void *data, 700extern int i915_irq_wait(struct drm_device *dev, void *data,
612 struct drm_file *file_priv); 701 struct drm_file *file_priv);
613void i915_user_irq_get(struct drm_device *dev); 702void i915_user_irq_get(struct drm_device *dev);
703void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
614void i915_user_irq_put(struct drm_device *dev); 704void i915_user_irq_put(struct drm_device *dev);
615extern void i915_enable_interrupt (struct drm_device *dev); 705extern void i915_enable_interrupt (struct drm_device *dev);
616 706
@@ -676,6 +766,8 @@ int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
676 struct drm_file *file_priv); 766 struct drm_file *file_priv);
677int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 767int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
678 struct drm_file *file_priv); 768 struct drm_file *file_priv);
769int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
770 struct drm_file *file_priv);
679int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 771int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
680 struct drm_file *file_priv); 772 struct drm_file *file_priv);
681int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 773int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
@@ -695,6 +787,7 @@ int i915_gem_object_unbind(struct drm_gem_object *obj);
695void i915_gem_release_mmap(struct drm_gem_object *obj); 787void i915_gem_release_mmap(struct drm_gem_object *obj);
696void i915_gem_lastclose(struct drm_device *dev); 788void i915_gem_lastclose(struct drm_device *dev);
697uint32_t i915_get_gem_seqno(struct drm_device *dev); 789uint32_t i915_get_gem_seqno(struct drm_device *dev);
790bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
698int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); 791int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
699int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); 792int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
700void i915_gem_retire_requests(struct drm_device *dev); 793void i915_gem_retire_requests(struct drm_device *dev);
@@ -720,6 +813,9 @@ int i915_gem_object_get_pages(struct drm_gem_object *obj);
720void i915_gem_object_put_pages(struct drm_gem_object *obj); 813void i915_gem_object_put_pages(struct drm_gem_object *obj);
721void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); 814void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
722 815
816void i915_gem_shrinker_init(void);
817void i915_gem_shrinker_exit(void);
818
723/* i915_gem_tiling.c */ 819/* i915_gem_tiling.c */
724void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 820void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
725void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); 821void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
@@ -766,6 +862,9 @@ static inline void opregion_enable_asle(struct drm_device *dev) { return; }
766/* modesetting */ 862/* modesetting */
767extern void intel_modeset_init(struct drm_device *dev); 863extern void intel_modeset_init(struct drm_device *dev);
768extern void intel_modeset_cleanup(struct drm_device *dev); 864extern void intel_modeset_cleanup(struct drm_device *dev);
865extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
866extern void i8xx_disable_fbc(struct drm_device *dev);
867extern void g4x_disable_fbc(struct drm_device *dev);
769 868
770/** 869/**
771 * Lock test for when it's just for synchronization of ring access. 870 * Lock test for when it's just for synchronization of ring access.
@@ -863,6 +962,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
863 (dev)->pci_device == 0x2E12 || \ 962 (dev)->pci_device == 0x2E12 || \
864 (dev)->pci_device == 0x2E22 || \ 963 (dev)->pci_device == 0x2E22 || \
865 (dev)->pci_device == 0x2E32 || \ 964 (dev)->pci_device == 0x2E32 || \
965 (dev)->pci_device == 0x2E42 || \
866 (dev)->pci_device == 0x0042 || \ 966 (dev)->pci_device == 0x0042 || \
867 (dev)->pci_device == 0x0046) 967 (dev)->pci_device == 0x0046)
868 968
@@ -875,6 +975,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
875 (dev)->pci_device == 0x2E12 || \ 975 (dev)->pci_device == 0x2E12 || \
876 (dev)->pci_device == 0x2E22 || \ 976 (dev)->pci_device == 0x2E22 || \
877 (dev)->pci_device == 0x2E32 || \ 977 (dev)->pci_device == 0x2E32 || \
978 (dev)->pci_device == 0x2E42 || \
878 IS_GM45(dev)) 979 IS_GM45(dev))
879 980
880#define IS_IGDG(dev) ((dev)->pci_device == 0xa001) 981#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
@@ -908,12 +1009,16 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
908#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 1009#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
909#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 1010#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev))
910#define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev)) 1011#define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev))
911#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) 1012#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
912/* dsparb controlled by hw only */ 1013/* dsparb controlled by hw only */
913#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 1014#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev))
914 1015
915#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev)) 1016#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev))
916#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 1017#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev))
1018#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \
1019 (IS_I9XX(dev) || IS_GM45(dev)) && \
1020 !IS_IGD(dev) && \
1021 !IS_IGDNG(dev))
917 1022
918#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1023#define PRIMARY_RINGBUFFER_SIZE (128*1024)
919 1024
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 954fb699131b..abfc27b0c2ea 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,6 +29,7 @@
29#include "drm.h" 29#include "drm.h"
30#include "i915_drm.h" 30#include "i915_drm.h"
31#include "i915_drv.h" 31#include "i915_drv.h"
32#include "i915_trace.h"
32#include "intel_drv.h" 33#include "intel_drv.h"
33#include <linux/swap.h> 34#include <linux/swap.h>
34#include <linux/pci.h> 35#include <linux/pci.h>
@@ -48,11 +49,15 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
48static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
49 unsigned alignment); 50 unsigned alignment);
50static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); 51static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
51static int i915_gem_evict_something(struct drm_device *dev); 52static int i915_gem_evict_something(struct drm_device *dev, int min_size);
53static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
52static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 54static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
53 struct drm_i915_gem_pwrite *args, 55 struct drm_i915_gem_pwrite *args,
54 struct drm_file *file_priv); 56 struct drm_file *file_priv);
55 57
58static LIST_HEAD(shrink_list);
59static DEFINE_SPINLOCK(shrink_list_lock);
60
56int i915_gem_do_init(struct drm_device *dev, unsigned long start, 61int i915_gem_do_init(struct drm_device *dev, unsigned long start,
57 unsigned long end) 62 unsigned long end)
58{ 63{
@@ -316,6 +321,45 @@ fail_unlock:
316 return ret; 321 return ret;
317} 322}
318 323
324static inline gfp_t
325i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
326{
327 return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
328}
329
330static inline void
331i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
332{
333 mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
334}
335
336static int
337i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
338{
339 int ret;
340
341 ret = i915_gem_object_get_pages(obj);
342
343 /* If we've insufficient memory to map in the pages, attempt
344 * to make some space by throwing out some old buffers.
345 */
346 if (ret == -ENOMEM) {
347 struct drm_device *dev = obj->dev;
348 gfp_t gfp;
349
350 ret = i915_gem_evict_something(dev, obj->size);
351 if (ret)
352 return ret;
353
354 gfp = i915_gem_object_get_page_gfp_mask(obj);
355 i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
356 ret = i915_gem_object_get_pages(obj);
357 i915_gem_object_set_page_gfp_mask (obj, gfp);
358 }
359
360 return ret;
361}
362
319/** 363/**
320 * This is the fallback shmem pread path, which allocates temporary storage 364 * This is the fallback shmem pread path, which allocates temporary storage
321 * in kernel space to copy_to_user into outside of the struct_mutex, so we 365 * in kernel space to copy_to_user into outside of the struct_mutex, so we
@@ -367,8 +411,8 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
367 411
368 mutex_lock(&dev->struct_mutex); 412 mutex_lock(&dev->struct_mutex);
369 413
370 ret = i915_gem_object_get_pages(obj); 414 ret = i915_gem_object_get_pages_or_evict(obj);
371 if (ret != 0) 415 if (ret)
372 goto fail_unlock; 416 goto fail_unlock;
373 417
374 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, 418 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
@@ -842,8 +886,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
842 886
843 mutex_lock(&dev->struct_mutex); 887 mutex_lock(&dev->struct_mutex);
844 888
845 ret = i915_gem_object_get_pages(obj); 889 ret = i915_gem_object_get_pages_or_evict(obj);
846 if (ret != 0) 890 if (ret)
847 goto fail_unlock; 891 goto fail_unlock;
848 892
849 ret = i915_gem_object_set_to_cpu_domain(obj, 1); 893 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
@@ -1155,28 +1199,22 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1155 /* Now bind it into the GTT if needed */ 1199 /* Now bind it into the GTT if needed */
1156 mutex_lock(&dev->struct_mutex); 1200 mutex_lock(&dev->struct_mutex);
1157 if (!obj_priv->gtt_space) { 1201 if (!obj_priv->gtt_space) {
1158 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); 1202 ret = i915_gem_object_bind_to_gtt(obj, 0);
1159 if (ret) { 1203 if (ret)
1160 mutex_unlock(&dev->struct_mutex); 1204 goto unlock;
1161 return VM_FAULT_SIGBUS;
1162 }
1163
1164 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1165 if (ret) {
1166 mutex_unlock(&dev->struct_mutex);
1167 return VM_FAULT_SIGBUS;
1168 }
1169 1205
1170 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1206 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1207
1208 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1209 if (ret)
1210 goto unlock;
1171 } 1211 }
1172 1212
1173 /* Need a new fence register? */ 1213 /* Need a new fence register? */
1174 if (obj_priv->tiling_mode != I915_TILING_NONE) { 1214 if (obj_priv->tiling_mode != I915_TILING_NONE) {
1175 ret = i915_gem_object_get_fence_reg(obj); 1215 ret = i915_gem_object_get_fence_reg(obj);
1176 if (ret) { 1216 if (ret)
1177 mutex_unlock(&dev->struct_mutex); 1217 goto unlock;
1178 return VM_FAULT_SIGBUS;
1179 }
1180 } 1218 }
1181 1219
1182 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + 1220 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
@@ -1184,18 +1222,18 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1184 1222
1185 /* Finally, remap it using the new GTT offset */ 1223 /* Finally, remap it using the new GTT offset */
1186 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); 1224 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1187 1225unlock:
1188 mutex_unlock(&dev->struct_mutex); 1226 mutex_unlock(&dev->struct_mutex);
1189 1227
1190 switch (ret) { 1228 switch (ret) {
1229 case 0:
1230 case -ERESTARTSYS:
1231 return VM_FAULT_NOPAGE;
1191 case -ENOMEM: 1232 case -ENOMEM:
1192 case -EAGAIN: 1233 case -EAGAIN:
1193 return VM_FAULT_OOM; 1234 return VM_FAULT_OOM;
1194 case -EFAULT:
1195 case -EINVAL:
1196 return VM_FAULT_SIGBUS;
1197 default: 1235 default:
1198 return VM_FAULT_NOPAGE; 1236 return VM_FAULT_SIGBUS;
1199 } 1237 }
1200} 1238}
1201 1239
@@ -1388,6 +1426,14 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1388 1426
1389 obj_priv = obj->driver_private; 1427 obj_priv = obj->driver_private;
1390 1428
1429 if (obj_priv->madv != I915_MADV_WILLNEED) {
1430 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1431 drm_gem_object_unreference(obj);
1432 mutex_unlock(&dev->struct_mutex);
1433 return -EINVAL;
1434 }
1435
1436
1391 if (!obj_priv->mmap_offset) { 1437 if (!obj_priv->mmap_offset) {
1392 ret = i915_gem_create_mmap_offset(obj); 1438 ret = i915_gem_create_mmap_offset(obj);
1393 if (ret) { 1439 if (ret) {
@@ -1399,22 +1445,12 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1399 1445
1400 args->offset = obj_priv->mmap_offset; 1446 args->offset = obj_priv->mmap_offset;
1401 1447
1402 obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
1403
1404 /* Make sure the alignment is correct for fence regs etc */
1405 if (obj_priv->agp_mem &&
1406 (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
1407 drm_gem_object_unreference(obj);
1408 mutex_unlock(&dev->struct_mutex);
1409 return -EINVAL;
1410 }
1411
1412 /* 1448 /*
1413 * Pull it into the GTT so that we have a page list (makes the 1449 * Pull it into the GTT so that we have a page list (makes the
1414 * initial fault faster and any subsequent flushing possible). 1450 * initial fault faster and any subsequent flushing possible).
1415 */ 1451 */
1416 if (!obj_priv->agp_mem) { 1452 if (!obj_priv->agp_mem) {
1417 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); 1453 ret = i915_gem_object_bind_to_gtt(obj, 0);
1418 if (ret) { 1454 if (ret) {
1419 drm_gem_object_unreference(obj); 1455 drm_gem_object_unreference(obj);
1420 mutex_unlock(&dev->struct_mutex); 1456 mutex_unlock(&dev->struct_mutex);
@@ -1437,6 +1473,7 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1437 int i; 1473 int i;
1438 1474
1439 BUG_ON(obj_priv->pages_refcount == 0); 1475 BUG_ON(obj_priv->pages_refcount == 0);
1476 BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
1440 1477
1441 if (--obj_priv->pages_refcount != 0) 1478 if (--obj_priv->pages_refcount != 0)
1442 return; 1479 return;
@@ -1444,13 +1481,21 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1444 if (obj_priv->tiling_mode != I915_TILING_NONE) 1481 if (obj_priv->tiling_mode != I915_TILING_NONE)
1445 i915_gem_object_save_bit_17_swizzle(obj); 1482 i915_gem_object_save_bit_17_swizzle(obj);
1446 1483
1447 for (i = 0; i < page_count; i++) 1484 if (obj_priv->madv == I915_MADV_DONTNEED)
1448 if (obj_priv->pages[i] != NULL) { 1485 obj_priv->dirty = 0;
1449 if (obj_priv->dirty) 1486
1450 set_page_dirty(obj_priv->pages[i]); 1487 for (i = 0; i < page_count; i++) {
1488 if (obj_priv->pages[i] == NULL)
1489 break;
1490
1491 if (obj_priv->dirty)
1492 set_page_dirty(obj_priv->pages[i]);
1493
1494 if (obj_priv->madv == I915_MADV_WILLNEED)
1451 mark_page_accessed(obj_priv->pages[i]); 1495 mark_page_accessed(obj_priv->pages[i]);
1452 page_cache_release(obj_priv->pages[i]); 1496
1453 } 1497 page_cache_release(obj_priv->pages[i]);
1498 }
1454 obj_priv->dirty = 0; 1499 obj_priv->dirty = 0;
1455 1500
1456 drm_free_large(obj_priv->pages); 1501 drm_free_large(obj_priv->pages);
@@ -1489,6 +1534,26 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1489 obj_priv->last_rendering_seqno = 0; 1534 obj_priv->last_rendering_seqno = 0;
1490} 1535}
1491 1536
1537/* Immediately discard the backing storage */
1538static void
1539i915_gem_object_truncate(struct drm_gem_object *obj)
1540{
1541 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1542 struct inode *inode;
1543
1544 inode = obj->filp->f_path.dentry->d_inode;
1545 if (inode->i_op->truncate)
1546 inode->i_op->truncate (inode);
1547
1548 obj_priv->madv = __I915_MADV_PURGED;
1549}
1550
1551static inline int
1552i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
1553{
1554 return obj_priv->madv == I915_MADV_DONTNEED;
1555}
1556
1492static void 1557static void
1493i915_gem_object_move_to_inactive(struct drm_gem_object *obj) 1558i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1494{ 1559{
@@ -1577,15 +1642,24 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1577 1642
1578 if ((obj->write_domain & flush_domains) == 1643 if ((obj->write_domain & flush_domains) ==
1579 obj->write_domain) { 1644 obj->write_domain) {
1645 uint32_t old_write_domain = obj->write_domain;
1646
1580 obj->write_domain = 0; 1647 obj->write_domain = 0;
1581 i915_gem_object_move_to_active(obj, seqno); 1648 i915_gem_object_move_to_active(obj, seqno);
1649
1650 trace_i915_gem_object_change_domain(obj,
1651 obj->read_domains,
1652 old_write_domain);
1582 } 1653 }
1583 } 1654 }
1584 1655
1585 } 1656 }
1586 1657
1587 if (was_empty && !dev_priv->mm.suspended) 1658 if (!dev_priv->mm.suspended) {
1588 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 1659 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1660 if (was_empty)
1661 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1662 }
1589 return seqno; 1663 return seqno;
1590} 1664}
1591 1665
@@ -1623,6 +1697,8 @@ i915_gem_retire_request(struct drm_device *dev,
1623{ 1697{
1624 drm_i915_private_t *dev_priv = dev->dev_private; 1698 drm_i915_private_t *dev_priv = dev->dev_private;
1625 1699
1700 trace_i915_gem_request_retire(dev, request->seqno);
1701
1626 /* Move any buffers on the active list that are no longer referenced 1702 /* Move any buffers on the active list that are no longer referenced
1627 * by the ringbuffer to the flushing/inactive lists as appropriate. 1703 * by the ringbuffer to the flushing/inactive lists as appropriate.
1628 */ 1704 */
@@ -1671,7 +1747,7 @@ out:
1671/** 1747/**
1672 * Returns true if seq1 is later than seq2. 1748 * Returns true if seq1 is later than seq2.
1673 */ 1749 */
1674static int 1750bool
1675i915_seqno_passed(uint32_t seq1, uint32_t seq2) 1751i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1676{ 1752{
1677 return (int32_t)(seq1 - seq2) >= 0; 1753 return (int32_t)(seq1 - seq2) >= 0;
@@ -1694,7 +1770,7 @@ i915_gem_retire_requests(struct drm_device *dev)
1694 drm_i915_private_t *dev_priv = dev->dev_private; 1770 drm_i915_private_t *dev_priv = dev->dev_private;
1695 uint32_t seqno; 1771 uint32_t seqno;
1696 1772
1697 if (!dev_priv->hw_status_page) 1773 if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
1698 return; 1774 return;
1699 1775
1700 seqno = i915_get_gem_seqno(dev); 1776 seqno = i915_get_gem_seqno(dev);
@@ -1709,7 +1785,7 @@ i915_gem_retire_requests(struct drm_device *dev)
1709 retiring_seqno = request->seqno; 1785 retiring_seqno = request->seqno;
1710 1786
1711 if (i915_seqno_passed(seqno, retiring_seqno) || 1787 if (i915_seqno_passed(seqno, retiring_seqno) ||
1712 dev_priv->mm.wedged) { 1788 atomic_read(&dev_priv->mm.wedged)) {
1713 i915_gem_retire_request(dev, request); 1789 i915_gem_retire_request(dev, request);
1714 1790
1715 list_del(&request->list); 1791 list_del(&request->list);
@@ -1718,6 +1794,12 @@ i915_gem_retire_requests(struct drm_device *dev)
1718 } else 1794 } else
1719 break; 1795 break;
1720 } 1796 }
1797
1798 if (unlikely (dev_priv->trace_irq_seqno &&
1799 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1800 i915_user_irq_put(dev);
1801 dev_priv->trace_irq_seqno = 0;
1802 }
1721} 1803}
1722 1804
1723void 1805void
@@ -1751,6 +1833,9 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1751 1833
1752 BUG_ON(seqno == 0); 1834 BUG_ON(seqno == 0);
1753 1835
1836 if (atomic_read(&dev_priv->mm.wedged))
1837 return -EIO;
1838
1754 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 1839 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1755 if (IS_IGDNG(dev)) 1840 if (IS_IGDNG(dev))
1756 ier = I915_READ(DEIER) | I915_READ(GTIER); 1841 ier = I915_READ(DEIER) | I915_READ(GTIER);
@@ -1763,16 +1848,20 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1763 i915_driver_irq_postinstall(dev); 1848 i915_driver_irq_postinstall(dev);
1764 } 1849 }
1765 1850
1851 trace_i915_gem_request_wait_begin(dev, seqno);
1852
1766 dev_priv->mm.waiting_gem_seqno = seqno; 1853 dev_priv->mm.waiting_gem_seqno = seqno;
1767 i915_user_irq_get(dev); 1854 i915_user_irq_get(dev);
1768 ret = wait_event_interruptible(dev_priv->irq_queue, 1855 ret = wait_event_interruptible(dev_priv->irq_queue,
1769 i915_seqno_passed(i915_get_gem_seqno(dev), 1856 i915_seqno_passed(i915_get_gem_seqno(dev),
1770 seqno) || 1857 seqno) ||
1771 dev_priv->mm.wedged); 1858 atomic_read(&dev_priv->mm.wedged));
1772 i915_user_irq_put(dev); 1859 i915_user_irq_put(dev);
1773 dev_priv->mm.waiting_gem_seqno = 0; 1860 dev_priv->mm.waiting_gem_seqno = 0;
1861
1862 trace_i915_gem_request_wait_end(dev, seqno);
1774 } 1863 }
1775 if (dev_priv->mm.wedged) 1864 if (atomic_read(&dev_priv->mm.wedged))
1776 ret = -EIO; 1865 ret = -EIO;
1777 1866
1778 if (ret && ret != -ERESTARTSYS) 1867 if (ret && ret != -ERESTARTSYS)
@@ -1803,6 +1892,8 @@ i915_gem_flush(struct drm_device *dev,
1803 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, 1892 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1804 invalidate_domains, flush_domains); 1893 invalidate_domains, flush_domains);
1805#endif 1894#endif
1895 trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
1896 invalidate_domains, flush_domains);
1806 1897
1807 if (flush_domains & I915_GEM_DOMAIN_CPU) 1898 if (flush_domains & I915_GEM_DOMAIN_CPU)
1808 drm_agp_chipset_flush(dev); 1899 drm_agp_chipset_flush(dev);
@@ -1915,6 +2006,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1915 return -EINVAL; 2006 return -EINVAL;
1916 } 2007 }
1917 2008
2009 /* blow away mappings if mapped through GTT */
2010 i915_gem_release_mmap(obj);
2011
2012 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2013 i915_gem_clear_fence_reg(obj);
2014
1918 /* Move the object to the CPU domain to ensure that 2015 /* Move the object to the CPU domain to ensure that
1919 * any possible CPU writes while it's not in the GTT 2016 * any possible CPU writes while it's not in the GTT
1920 * are flushed when we go to remap it. This will 2017 * are flushed when we go to remap it. This will
@@ -1928,21 +2025,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1928 return ret; 2025 return ret;
1929 } 2026 }
1930 2027
2028 BUG_ON(obj_priv->active);
2029
1931 if (obj_priv->agp_mem != NULL) { 2030 if (obj_priv->agp_mem != NULL) {
1932 drm_unbind_agp(obj_priv->agp_mem); 2031 drm_unbind_agp(obj_priv->agp_mem);
1933 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); 2032 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1934 obj_priv->agp_mem = NULL; 2033 obj_priv->agp_mem = NULL;
1935 } 2034 }
1936 2035
1937 BUG_ON(obj_priv->active);
1938
1939 /* blow away mappings if mapped through GTT */
1940 i915_gem_release_mmap(obj);
1941
1942 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1943 i915_gem_clear_fence_reg(obj);
1944
1945 i915_gem_object_put_pages(obj); 2036 i915_gem_object_put_pages(obj);
2037 BUG_ON(obj_priv->pages_refcount);
1946 2038
1947 if (obj_priv->gtt_space) { 2039 if (obj_priv->gtt_space) {
1948 atomic_dec(&dev->gtt_count); 2040 atomic_dec(&dev->gtt_count);
@@ -1956,40 +2048,113 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1956 if (!list_empty(&obj_priv->list)) 2048 if (!list_empty(&obj_priv->list))
1957 list_del_init(&obj_priv->list); 2049 list_del_init(&obj_priv->list);
1958 2050
2051 if (i915_gem_object_is_purgeable(obj_priv))
2052 i915_gem_object_truncate(obj);
2053
2054 trace_i915_gem_object_unbind(obj);
2055
2056 return 0;
2057}
2058
2059static struct drm_gem_object *
2060i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
2061{
2062 drm_i915_private_t *dev_priv = dev->dev_private;
2063 struct drm_i915_gem_object *obj_priv;
2064 struct drm_gem_object *best = NULL;
2065 struct drm_gem_object *first = NULL;
2066
2067 /* Try to find the smallest clean object */
2068 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
2069 struct drm_gem_object *obj = obj_priv->obj;
2070 if (obj->size >= min_size) {
2071 if ((!obj_priv->dirty ||
2072 i915_gem_object_is_purgeable(obj_priv)) &&
2073 (!best || obj->size < best->size)) {
2074 best = obj;
2075 if (best->size == min_size)
2076 return best;
2077 }
2078 if (!first)
2079 first = obj;
2080 }
2081 }
2082
2083 return best ? best : first;
2084}
2085
2086static int
2087i915_gem_evict_everything(struct drm_device *dev)
2088{
2089 drm_i915_private_t *dev_priv = dev->dev_private;
2090 uint32_t seqno;
2091 int ret;
2092 bool lists_empty;
2093
2094 spin_lock(&dev_priv->mm.active_list_lock);
2095 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2096 list_empty(&dev_priv->mm.flushing_list) &&
2097 list_empty(&dev_priv->mm.active_list));
2098 spin_unlock(&dev_priv->mm.active_list_lock);
2099
2100 if (lists_empty)
2101 return -ENOSPC;
2102
2103 /* Flush everything (on to the inactive lists) and evict */
2104 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2105 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
2106 if (seqno == 0)
2107 return -ENOMEM;
2108
2109 ret = i915_wait_request(dev, seqno);
2110 if (ret)
2111 return ret;
2112
2113 ret = i915_gem_evict_from_inactive_list(dev);
2114 if (ret)
2115 return ret;
2116
2117 spin_lock(&dev_priv->mm.active_list_lock);
2118 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2119 list_empty(&dev_priv->mm.flushing_list) &&
2120 list_empty(&dev_priv->mm.active_list));
2121 spin_unlock(&dev_priv->mm.active_list_lock);
2122 BUG_ON(!lists_empty);
2123
1959 return 0; 2124 return 0;
1960} 2125}
1961 2126
1962static int 2127static int
1963i915_gem_evict_something(struct drm_device *dev) 2128i915_gem_evict_something(struct drm_device *dev, int min_size)
1964{ 2129{
1965 drm_i915_private_t *dev_priv = dev->dev_private; 2130 drm_i915_private_t *dev_priv = dev->dev_private;
1966 struct drm_gem_object *obj; 2131 struct drm_gem_object *obj;
1967 struct drm_i915_gem_object *obj_priv; 2132 int ret;
1968 int ret = 0;
1969 2133
1970 for (;;) { 2134 for (;;) {
2135 i915_gem_retire_requests(dev);
2136
1971 /* If there's an inactive buffer available now, grab it 2137 /* If there's an inactive buffer available now, grab it
1972 * and be done. 2138 * and be done.
1973 */ 2139 */
1974 if (!list_empty(&dev_priv->mm.inactive_list)) { 2140 obj = i915_gem_find_inactive_object(dev, min_size);
1975 obj_priv = list_first_entry(&dev_priv->mm.inactive_list, 2141 if (obj) {
1976 struct drm_i915_gem_object, 2142 struct drm_i915_gem_object *obj_priv;
1977 list); 2143
1978 obj = obj_priv->obj;
1979 BUG_ON(obj_priv->pin_count != 0);
1980#if WATCH_LRU 2144#if WATCH_LRU
1981 DRM_INFO("%s: evicting %p\n", __func__, obj); 2145 DRM_INFO("%s: evicting %p\n", __func__, obj);
1982#endif 2146#endif
2147 obj_priv = obj->driver_private;
2148 BUG_ON(obj_priv->pin_count != 0);
1983 BUG_ON(obj_priv->active); 2149 BUG_ON(obj_priv->active);
1984 2150
1985 /* Wait on the rendering and unbind the buffer. */ 2151 /* Wait on the rendering and unbind the buffer. */
1986 ret = i915_gem_object_unbind(obj); 2152 return i915_gem_object_unbind(obj);
1987 break;
1988 } 2153 }
1989 2154
1990 /* If we didn't get anything, but the ring is still processing 2155 /* If we didn't get anything, but the ring is still processing
1991 * things, wait for one of those things to finish and hopefully 2156 * things, wait for the next to finish and hopefully leave us
1992 * leave us a buffer to evict. 2157 * a buffer to evict.
1993 */ 2158 */
1994 if (!list_empty(&dev_priv->mm.request_list)) { 2159 if (!list_empty(&dev_priv->mm.request_list)) {
1995 struct drm_i915_gem_request *request; 2160 struct drm_i915_gem_request *request;
@@ -2000,16 +2165,9 @@ i915_gem_evict_something(struct drm_device *dev)
2000 2165
2001 ret = i915_wait_request(dev, request->seqno); 2166 ret = i915_wait_request(dev, request->seqno);
2002 if (ret) 2167 if (ret)
2003 break; 2168 return ret;
2004 2169
2005 /* if waiting caused an object to become inactive, 2170 continue;
2006 * then loop around and wait for it. Otherwise, we
2007 * assume that waiting freed and unbound something,
2008 * so there should now be some space in the GTT
2009 */
2010 if (!list_empty(&dev_priv->mm.inactive_list))
2011 continue;
2012 break;
2013 } 2171 }
2014 2172
2015 /* If we didn't have anything on the request list but there 2173 /* If we didn't have anything on the request list but there
@@ -2018,46 +2176,44 @@ i915_gem_evict_something(struct drm_device *dev)
2018 * will get moved to inactive. 2176 * will get moved to inactive.
2019 */ 2177 */
2020 if (!list_empty(&dev_priv->mm.flushing_list)) { 2178 if (!list_empty(&dev_priv->mm.flushing_list)) {
2021 obj_priv = list_first_entry(&dev_priv->mm.flushing_list, 2179 struct drm_i915_gem_object *obj_priv;
2022 struct drm_i915_gem_object,
2023 list);
2024 obj = obj_priv->obj;
2025 2180
2026 i915_gem_flush(dev, 2181 /* Find an object that we can immediately reuse */
2027 obj->write_domain, 2182 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
2028 obj->write_domain); 2183 obj = obj_priv->obj;
2029 i915_add_request(dev, NULL, obj->write_domain); 2184 if (obj->size >= min_size)
2185 break;
2030 2186
2031 obj = NULL; 2187 obj = NULL;
2032 continue; 2188 }
2033 }
2034 2189
2035 DRM_ERROR("inactive empty %d request empty %d " 2190 if (obj != NULL) {
2036 "flushing empty %d\n", 2191 uint32_t seqno;
2037 list_empty(&dev_priv->mm.inactive_list),
2038 list_empty(&dev_priv->mm.request_list),
2039 list_empty(&dev_priv->mm.flushing_list));
2040 /* If we didn't do any of the above, there's nothing to be done
2041 * and we just can't fit it in.
2042 */
2043 return -ENOSPC;
2044 }
2045 return ret;
2046}
2047 2192
2048static int 2193 i915_gem_flush(dev,
2049i915_gem_evict_everything(struct drm_device *dev) 2194 obj->write_domain,
2050{ 2195 obj->write_domain);
2051 int ret; 2196 seqno = i915_add_request(dev, NULL, obj->write_domain);
2197 if (seqno == 0)
2198 return -ENOMEM;
2052 2199
2053 for (;;) { 2200 ret = i915_wait_request(dev, seqno);
2054 ret = i915_gem_evict_something(dev); 2201 if (ret)
2055 if (ret != 0) 2202 return ret;
2056 break; 2203
2204 continue;
2205 }
2206 }
2207
2208 /* If we didn't do any of the above, there's no single buffer
2209 * large enough to swap out for the new one, so just evict
2210 * everything and start again. (This should be rare.)
2211 */
2212 if (!list_empty (&dev_priv->mm.inactive_list))
2213 return i915_gem_evict_from_inactive_list(dev);
2214 else
2215 return i915_gem_evict_everything(dev);
2057 } 2216 }
2058 if (ret == -ENOSPC)
2059 return 0;
2060 return ret;
2061} 2217}
2062 2218
2063int 2219int
@@ -2080,7 +2236,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
2080 BUG_ON(obj_priv->pages != NULL); 2236 BUG_ON(obj_priv->pages != NULL);
2081 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *)); 2237 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
2082 if (obj_priv->pages == NULL) { 2238 if (obj_priv->pages == NULL) {
2083 DRM_ERROR("Faled to allocate page list\n");
2084 obj_priv->pages_refcount--; 2239 obj_priv->pages_refcount--;
2085 return -ENOMEM; 2240 return -ENOMEM;
2086 } 2241 }
@@ -2091,7 +2246,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
2091 page = read_mapping_page(mapping, i, NULL); 2246 page = read_mapping_page(mapping, i, NULL);
2092 if (IS_ERR(page)) { 2247 if (IS_ERR(page)) {
2093 ret = PTR_ERR(page); 2248 ret = PTR_ERR(page);
2094 DRM_ERROR("read_mapping_page failed: %d\n", ret);
2095 i915_gem_object_put_pages(obj); 2249 i915_gem_object_put_pages(obj);
2096 return ret; 2250 return ret;
2097 } 2251 }
@@ -2328,6 +2482,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2328 else 2482 else
2329 i830_write_fence_reg(reg); 2483 i830_write_fence_reg(reg);
2330 2484
2485 trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
2486
2331 return 0; 2487 return 0;
2332} 2488}
2333 2489
@@ -2410,10 +2566,17 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2410 drm_i915_private_t *dev_priv = dev->dev_private; 2566 drm_i915_private_t *dev_priv = dev->dev_private;
2411 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2567 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2412 struct drm_mm_node *free_space; 2568 struct drm_mm_node *free_space;
2413 int page_count, ret; 2569 bool retry_alloc = false;
2570 int ret;
2414 2571
2415 if (dev_priv->mm.suspended) 2572 if (dev_priv->mm.suspended)
2416 return -EBUSY; 2573 return -EBUSY;
2574
2575 if (obj_priv->madv != I915_MADV_WILLNEED) {
2576 DRM_ERROR("Attempting to bind a purgeable object\n");
2577 return -EINVAL;
2578 }
2579
2417 if (alignment == 0) 2580 if (alignment == 0)
2418 alignment = i915_gem_get_gtt_alignment(obj); 2581 alignment = i915_gem_get_gtt_alignment(obj);
2419 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) { 2582 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
@@ -2433,30 +2596,16 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2433 } 2596 }
2434 } 2597 }
2435 if (obj_priv->gtt_space == NULL) { 2598 if (obj_priv->gtt_space == NULL) {
2436 bool lists_empty;
2437
2438 /* If the gtt is empty and we're still having trouble 2599 /* If the gtt is empty and we're still having trouble
2439 * fitting our object in, we're out of memory. 2600 * fitting our object in, we're out of memory.
2440 */ 2601 */
2441#if WATCH_LRU 2602#if WATCH_LRU
2442 DRM_INFO("%s: GTT full, evicting something\n", __func__); 2603 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2443#endif 2604#endif
2444 spin_lock(&dev_priv->mm.active_list_lock); 2605 ret = i915_gem_evict_something(dev, obj->size);
2445 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 2606 if (ret)
2446 list_empty(&dev_priv->mm.flushing_list) &&
2447 list_empty(&dev_priv->mm.active_list));
2448 spin_unlock(&dev_priv->mm.active_list_lock);
2449 if (lists_empty) {
2450 DRM_ERROR("GTT full, but LRU list empty\n");
2451 return -ENOSPC;
2452 }
2453
2454 ret = i915_gem_evict_something(dev);
2455 if (ret != 0) {
2456 if (ret != -ERESTARTSYS)
2457 DRM_ERROR("Failed to evict a buffer %d\n", ret);
2458 return ret; 2607 return ret;
2459 } 2608
2460 goto search_free; 2609 goto search_free;
2461 } 2610 }
2462 2611
@@ -2464,27 +2613,56 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2464 DRM_INFO("Binding object of size %zd at 0x%08x\n", 2613 DRM_INFO("Binding object of size %zd at 0x%08x\n",
2465 obj->size, obj_priv->gtt_offset); 2614 obj->size, obj_priv->gtt_offset);
2466#endif 2615#endif
2616 if (retry_alloc) {
2617 i915_gem_object_set_page_gfp_mask (obj,
2618 i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
2619 }
2467 ret = i915_gem_object_get_pages(obj); 2620 ret = i915_gem_object_get_pages(obj);
2621 if (retry_alloc) {
2622 i915_gem_object_set_page_gfp_mask (obj,
2623 i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
2624 }
2468 if (ret) { 2625 if (ret) {
2469 drm_mm_put_block(obj_priv->gtt_space); 2626 drm_mm_put_block(obj_priv->gtt_space);
2470 obj_priv->gtt_space = NULL; 2627 obj_priv->gtt_space = NULL;
2628
2629 if (ret == -ENOMEM) {
2630 /* first try to clear up some space from the GTT */
2631 ret = i915_gem_evict_something(dev, obj->size);
2632 if (ret) {
2633 /* now try to shrink everyone else */
2634 if (! retry_alloc) {
2635 retry_alloc = true;
2636 goto search_free;
2637 }
2638
2639 return ret;
2640 }
2641
2642 goto search_free;
2643 }
2644
2471 return ret; 2645 return ret;
2472 } 2646 }
2473 2647
2474 page_count = obj->size / PAGE_SIZE;
2475 /* Create an AGP memory structure pointing at our pages, and bind it 2648 /* Create an AGP memory structure pointing at our pages, and bind it
2476 * into the GTT. 2649 * into the GTT.
2477 */ 2650 */
2478 obj_priv->agp_mem = drm_agp_bind_pages(dev, 2651 obj_priv->agp_mem = drm_agp_bind_pages(dev,
2479 obj_priv->pages, 2652 obj_priv->pages,
2480 page_count, 2653 obj->size >> PAGE_SHIFT,
2481 obj_priv->gtt_offset, 2654 obj_priv->gtt_offset,
2482 obj_priv->agp_type); 2655 obj_priv->agp_type);
2483 if (obj_priv->agp_mem == NULL) { 2656 if (obj_priv->agp_mem == NULL) {
2484 i915_gem_object_put_pages(obj); 2657 i915_gem_object_put_pages(obj);
2485 drm_mm_put_block(obj_priv->gtt_space); 2658 drm_mm_put_block(obj_priv->gtt_space);
2486 obj_priv->gtt_space = NULL; 2659 obj_priv->gtt_space = NULL;
2487 return -ENOMEM; 2660
2661 ret = i915_gem_evict_something(dev, obj->size);
2662 if (ret)
2663 return ret;
2664
2665 goto search_free;
2488 } 2666 }
2489 atomic_inc(&dev->gtt_count); 2667 atomic_inc(&dev->gtt_count);
2490 atomic_add(obj->size, &dev->gtt_memory); 2668 atomic_add(obj->size, &dev->gtt_memory);
@@ -2496,6 +2674,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2496 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); 2674 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2497 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); 2675 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2498 2676
2677 trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
2678
2499 return 0; 2679 return 0;
2500} 2680}
2501 2681
@@ -2511,15 +2691,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
2511 if (obj_priv->pages == NULL) 2691 if (obj_priv->pages == NULL)
2512 return; 2692 return;
2513 2693
2514 /* XXX: The 865 in particular appears to be weird in how it handles 2694 trace_i915_gem_object_clflush(obj);
2515 * cache flushing. We haven't figured it out, but the
2516 * clflush+agp_chipset_flush doesn't appear to successfully get the
2517 * data visible to the PGU, while wbinvd + agp_chipset_flush does.
2518 */
2519 if (IS_I865G(obj->dev)) {
2520 wbinvd();
2521 return;
2522 }
2523 2695
2524 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); 2696 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
2525} 2697}
@@ -2530,21 +2702,29 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2530{ 2702{
2531 struct drm_device *dev = obj->dev; 2703 struct drm_device *dev = obj->dev;
2532 uint32_t seqno; 2704 uint32_t seqno;
2705 uint32_t old_write_domain;
2533 2706
2534 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 2707 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2535 return; 2708 return;
2536 2709
2537 /* Queue the GPU write cache flushing we need. */ 2710 /* Queue the GPU write cache flushing we need. */
2711 old_write_domain = obj->write_domain;
2538 i915_gem_flush(dev, 0, obj->write_domain); 2712 i915_gem_flush(dev, 0, obj->write_domain);
2539 seqno = i915_add_request(dev, NULL, obj->write_domain); 2713 seqno = i915_add_request(dev, NULL, obj->write_domain);
2540 obj->write_domain = 0; 2714 obj->write_domain = 0;
2541 i915_gem_object_move_to_active(obj, seqno); 2715 i915_gem_object_move_to_active(obj, seqno);
2716
2717 trace_i915_gem_object_change_domain(obj,
2718 obj->read_domains,
2719 old_write_domain);
2542} 2720}
2543 2721
2544/** Flushes the GTT write domain for the object if it's dirty. */ 2722/** Flushes the GTT write domain for the object if it's dirty. */
2545static void 2723static void
2546i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) 2724i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2547{ 2725{
2726 uint32_t old_write_domain;
2727
2548 if (obj->write_domain != I915_GEM_DOMAIN_GTT) 2728 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2549 return; 2729 return;
2550 2730
@@ -2552,7 +2732,12 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2552 * to it immediately go to main memory as far as we know, so there's 2732 * to it immediately go to main memory as far as we know, so there's
2553 * no chipset flush. It also doesn't land in render cache. 2733 * no chipset flush. It also doesn't land in render cache.
2554 */ 2734 */
2735 old_write_domain = obj->write_domain;
2555 obj->write_domain = 0; 2736 obj->write_domain = 0;
2737
2738 trace_i915_gem_object_change_domain(obj,
2739 obj->read_domains,
2740 old_write_domain);
2556} 2741}
2557 2742
2558/** Flushes the CPU write domain for the object if it's dirty. */ 2743/** Flushes the CPU write domain for the object if it's dirty. */
@@ -2560,13 +2745,19 @@ static void
2560i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) 2745i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2561{ 2746{
2562 struct drm_device *dev = obj->dev; 2747 struct drm_device *dev = obj->dev;
2748 uint32_t old_write_domain;
2563 2749
2564 if (obj->write_domain != I915_GEM_DOMAIN_CPU) 2750 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2565 return; 2751 return;
2566 2752
2567 i915_gem_clflush_object(obj); 2753 i915_gem_clflush_object(obj);
2568 drm_agp_chipset_flush(dev); 2754 drm_agp_chipset_flush(dev);
2755 old_write_domain = obj->write_domain;
2569 obj->write_domain = 0; 2756 obj->write_domain = 0;
2757
2758 trace_i915_gem_object_change_domain(obj,
2759 obj->read_domains,
2760 old_write_domain);
2570} 2761}
2571 2762
2572/** 2763/**
@@ -2579,6 +2770,7 @@ int
2579i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) 2770i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2580{ 2771{
2581 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2772 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2773 uint32_t old_write_domain, old_read_domains;
2582 int ret; 2774 int ret;
2583 2775
2584 /* Not valid to be called on unbound objects. */ 2776 /* Not valid to be called on unbound objects. */
@@ -2591,6 +2783,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2591 if (ret != 0) 2783 if (ret != 0)
2592 return ret; 2784 return ret;
2593 2785
2786 old_write_domain = obj->write_domain;
2787 old_read_domains = obj->read_domains;
2788
2594 /* If we're writing through the GTT domain, then CPU and GPU caches 2789 /* If we're writing through the GTT domain, then CPU and GPU caches
2595 * will need to be invalidated at next use. 2790 * will need to be invalidated at next use.
2596 */ 2791 */
@@ -2609,6 +2804,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2609 obj_priv->dirty = 1; 2804 obj_priv->dirty = 1;
2610 } 2805 }
2611 2806
2807 trace_i915_gem_object_change_domain(obj,
2808 old_read_domains,
2809 old_write_domain);
2810
2612 return 0; 2811 return 0;
2613} 2812}
2614 2813
@@ -2621,6 +2820,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2621static int 2820static int
2622i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) 2821i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2623{ 2822{
2823 uint32_t old_write_domain, old_read_domains;
2624 int ret; 2824 int ret;
2625 2825
2626 i915_gem_object_flush_gpu_write_domain(obj); 2826 i915_gem_object_flush_gpu_write_domain(obj);
@@ -2636,6 +2836,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2636 */ 2836 */
2637 i915_gem_object_set_to_full_cpu_read_domain(obj); 2837 i915_gem_object_set_to_full_cpu_read_domain(obj);
2638 2838
2839 old_write_domain = obj->write_domain;
2840 old_read_domains = obj->read_domains;
2841
2639 /* Flush the CPU cache if it's still invalid. */ 2842 /* Flush the CPU cache if it's still invalid. */
2640 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { 2843 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2641 i915_gem_clflush_object(obj); 2844 i915_gem_clflush_object(obj);
@@ -2656,6 +2859,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2656 obj->write_domain = I915_GEM_DOMAIN_CPU; 2859 obj->write_domain = I915_GEM_DOMAIN_CPU;
2657 } 2860 }
2658 2861
2862 trace_i915_gem_object_change_domain(obj,
2863 old_read_domains,
2864 old_write_domain);
2865
2659 return 0; 2866 return 0;
2660} 2867}
2661 2868
@@ -2777,6 +2984,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2777 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2984 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2778 uint32_t invalidate_domains = 0; 2985 uint32_t invalidate_domains = 0;
2779 uint32_t flush_domains = 0; 2986 uint32_t flush_domains = 0;
2987 uint32_t old_read_domains;
2780 2988
2781 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); 2989 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2782 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); 2990 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
@@ -2823,6 +3031,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2823 i915_gem_clflush_object(obj); 3031 i915_gem_clflush_object(obj);
2824 } 3032 }
2825 3033
3034 old_read_domains = obj->read_domains;
3035
2826 /* The actual obj->write_domain will be updated with 3036 /* The actual obj->write_domain will be updated with
2827 * pending_write_domain after we emit the accumulated flush for all 3037 * pending_write_domain after we emit the accumulated flush for all
2828 * of our domain changes in execbuffers (which clears objects' 3038 * of our domain changes in execbuffers (which clears objects'
@@ -2841,6 +3051,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2841 obj->read_domains, obj->write_domain, 3051 obj->read_domains, obj->write_domain,
2842 dev->invalidate_domains, dev->flush_domains); 3052 dev->invalidate_domains, dev->flush_domains);
2843#endif 3053#endif
3054
3055 trace_i915_gem_object_change_domain(obj,
3056 old_read_domains,
3057 obj->write_domain);
2844} 3058}
2845 3059
2846/** 3060/**
@@ -2893,6 +3107,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2893 uint64_t offset, uint64_t size) 3107 uint64_t offset, uint64_t size)
2894{ 3108{
2895 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3109 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3110 uint32_t old_read_domains;
2896 int i, ret; 3111 int i, ret;
2897 3112
2898 if (offset == 0 && size == obj->size) 3113 if (offset == 0 && size == obj->size)
@@ -2939,8 +3154,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2939 */ 3154 */
2940 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); 3155 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2941 3156
3157 old_read_domains = obj->read_domains;
2942 obj->read_domains |= I915_GEM_DOMAIN_CPU; 3158 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2943 3159
3160 trace_i915_gem_object_change_domain(obj,
3161 old_read_domains,
3162 obj->write_domain);
3163
2944 return 0; 3164 return 0;
2945} 3165}
2946 3166
@@ -2984,6 +3204,21 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2984 } 3204 }
2985 target_obj_priv = target_obj->driver_private; 3205 target_obj_priv = target_obj->driver_private;
2986 3206
3207#if WATCH_RELOC
3208 DRM_INFO("%s: obj %p offset %08x target %d "
3209 "read %08x write %08x gtt %08x "
3210 "presumed %08x delta %08x\n",
3211 __func__,
3212 obj,
3213 (int) reloc->offset,
3214 (int) reloc->target_handle,
3215 (int) reloc->read_domains,
3216 (int) reloc->write_domain,
3217 (int) target_obj_priv->gtt_offset,
3218 (int) reloc->presumed_offset,
3219 reloc->delta);
3220#endif
3221
2987 /* The target buffer should have appeared before us in the 3222 /* The target buffer should have appeared before us in the
2988 * exec_object list, so it should have a GTT space bound by now. 3223 * exec_object list, so it should have a GTT space bound by now.
2989 */ 3224 */
@@ -2995,25 +3230,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2995 return -EINVAL; 3230 return -EINVAL;
2996 } 3231 }
2997 3232
2998 if (reloc->offset > obj->size - 4) { 3233 /* Validate that the target is in a valid r/w GPU domain */
2999 DRM_ERROR("Relocation beyond object bounds: "
3000 "obj %p target %d offset %d size %d.\n",
3001 obj, reloc->target_handle,
3002 (int) reloc->offset, (int) obj->size);
3003 drm_gem_object_unreference(target_obj);
3004 i915_gem_object_unpin(obj);
3005 return -EINVAL;
3006 }
3007 if (reloc->offset & 3) {
3008 DRM_ERROR("Relocation not 4-byte aligned: "
3009 "obj %p target %d offset %d.\n",
3010 obj, reloc->target_handle,
3011 (int) reloc->offset);
3012 drm_gem_object_unreference(target_obj);
3013 i915_gem_object_unpin(obj);
3014 return -EINVAL;
3015 }
3016
3017 if (reloc->write_domain & I915_GEM_DOMAIN_CPU || 3234 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3018 reloc->read_domains & I915_GEM_DOMAIN_CPU) { 3235 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3019 DRM_ERROR("reloc with read/write CPU domains: " 3236 DRM_ERROR("reloc with read/write CPU domains: "
@@ -3027,7 +3244,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3027 i915_gem_object_unpin(obj); 3244 i915_gem_object_unpin(obj);
3028 return -EINVAL; 3245 return -EINVAL;
3029 } 3246 }
3030
3031 if (reloc->write_domain && target_obj->pending_write_domain && 3247 if (reloc->write_domain && target_obj->pending_write_domain &&
3032 reloc->write_domain != target_obj->pending_write_domain) { 3248 reloc->write_domain != target_obj->pending_write_domain) {
3033 DRM_ERROR("Write domain conflict: " 3249 DRM_ERROR("Write domain conflict: "
@@ -3042,21 +3258,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3042 return -EINVAL; 3258 return -EINVAL;
3043 } 3259 }
3044 3260
3045#if WATCH_RELOC
3046 DRM_INFO("%s: obj %p offset %08x target %d "
3047 "read %08x write %08x gtt %08x "
3048 "presumed %08x delta %08x\n",
3049 __func__,
3050 obj,
3051 (int) reloc->offset,
3052 (int) reloc->target_handle,
3053 (int) reloc->read_domains,
3054 (int) reloc->write_domain,
3055 (int) target_obj_priv->gtt_offset,
3056 (int) reloc->presumed_offset,
3057 reloc->delta);
3058#endif
3059
3060 target_obj->pending_read_domains |= reloc->read_domains; 3261 target_obj->pending_read_domains |= reloc->read_domains;
3061 target_obj->pending_write_domain |= reloc->write_domain; 3262 target_obj->pending_write_domain |= reloc->write_domain;
3062 3263
@@ -3068,6 +3269,37 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3068 continue; 3269 continue;
3069 } 3270 }
3070 3271
3272 /* Check that the relocation address is valid... */
3273 if (reloc->offset > obj->size - 4) {
3274 DRM_ERROR("Relocation beyond object bounds: "
3275 "obj %p target %d offset %d size %d.\n",
3276 obj, reloc->target_handle,
3277 (int) reloc->offset, (int) obj->size);
3278 drm_gem_object_unreference(target_obj);
3279 i915_gem_object_unpin(obj);
3280 return -EINVAL;
3281 }
3282 if (reloc->offset & 3) {
3283 DRM_ERROR("Relocation not 4-byte aligned: "
3284 "obj %p target %d offset %d.\n",
3285 obj, reloc->target_handle,
3286 (int) reloc->offset);
3287 drm_gem_object_unreference(target_obj);
3288 i915_gem_object_unpin(obj);
3289 return -EINVAL;
3290 }
3291
3292 /* and points to somewhere within the target object. */
3293 if (reloc->delta >= target_obj->size) {
3294 DRM_ERROR("Relocation beyond target object bounds: "
3295 "obj %p target %d delta %d size %d.\n",
3296 obj, reloc->target_handle,
3297 (int) reloc->delta, (int) target_obj->size);
3298 drm_gem_object_unreference(target_obj);
3299 i915_gem_object_unpin(obj);
3300 return -EINVAL;
3301 }
3302
3071 ret = i915_gem_object_set_to_gtt_domain(obj, 1); 3303 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3072 if (ret != 0) { 3304 if (ret != 0) {
3073 drm_gem_object_unreference(target_obj); 3305 drm_gem_object_unreference(target_obj);
@@ -3126,6 +3358,8 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
3126 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 3358 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3127 exec_len = (uint32_t) exec->batch_len; 3359 exec_len = (uint32_t) exec->batch_len;
3128 3360
3361 trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
3362
3129 count = nbox ? nbox : 1; 3363 count = nbox ? nbox : 1;
3130 3364
3131 for (i = 0; i < count; i++) { 3365 for (i = 0; i < count; i++) {
@@ -3363,7 +3597,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3363 3597
3364 i915_verify_inactive(dev, __FILE__, __LINE__); 3598 i915_verify_inactive(dev, __FILE__, __LINE__);
3365 3599
3366 if (dev_priv->mm.wedged) { 3600 if (atomic_read(&dev_priv->mm.wedged)) {
3367 DRM_ERROR("Execbuf while wedged\n"); 3601 DRM_ERROR("Execbuf while wedged\n");
3368 mutex_unlock(&dev->struct_mutex); 3602 mutex_unlock(&dev->struct_mutex);
3369 ret = -EIO; 3603 ret = -EIO;
@@ -3421,8 +3655,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3421 3655
3422 /* error other than GTT full, or we've already tried again */ 3656 /* error other than GTT full, or we've already tried again */
3423 if (ret != -ENOSPC || pin_tries >= 1) { 3657 if (ret != -ENOSPC || pin_tries >= 1) {
3424 if (ret != -ERESTARTSYS) 3658 if (ret != -ERESTARTSYS) {
3425 DRM_ERROR("Failed to pin buffers %d\n", ret); 3659 unsigned long long total_size = 0;
3660 for (i = 0; i < args->buffer_count; i++)
3661 total_size += object_list[i]->size;
3662 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
3663 pinned+1, args->buffer_count,
3664 total_size, ret);
3665 DRM_ERROR("%d objects [%d pinned], "
3666 "%d object bytes [%d pinned], "
3667 "%d/%d gtt bytes\n",
3668 atomic_read(&dev->object_count),
3669 atomic_read(&dev->pin_count),
3670 atomic_read(&dev->object_memory),
3671 atomic_read(&dev->pin_memory),
3672 atomic_read(&dev->gtt_memory),
3673 dev->gtt_total);
3674 }
3426 goto err; 3675 goto err;
3427 } 3676 }
3428 3677
@@ -3433,7 +3682,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3433 3682
3434 /* evict everyone we can from the aperture */ 3683 /* evict everyone we can from the aperture */
3435 ret = i915_gem_evict_everything(dev); 3684 ret = i915_gem_evict_everything(dev);
3436 if (ret) 3685 if (ret && ret != -ENOSPC)
3437 goto err; 3686 goto err;
3438 } 3687 }
3439 3688
@@ -3489,8 +3738,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3489 3738
3490 for (i = 0; i < args->buffer_count; i++) { 3739 for (i = 0; i < args->buffer_count; i++) {
3491 struct drm_gem_object *obj = object_list[i]; 3740 struct drm_gem_object *obj = object_list[i];
3741 uint32_t old_write_domain = obj->write_domain;
3492 3742
3493 obj->write_domain = obj->pending_write_domain; 3743 obj->write_domain = obj->pending_write_domain;
3744 trace_i915_gem_object_change_domain(obj,
3745 obj->read_domains,
3746 old_write_domain);
3494 } 3747 }
3495 3748
3496 i915_verify_inactive(dev, __FILE__, __LINE__); 3749 i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -3607,11 +3860,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3607 i915_verify_inactive(dev, __FILE__, __LINE__); 3860 i915_verify_inactive(dev, __FILE__, __LINE__);
3608 if (obj_priv->gtt_space == NULL) { 3861 if (obj_priv->gtt_space == NULL) {
3609 ret = i915_gem_object_bind_to_gtt(obj, alignment); 3862 ret = i915_gem_object_bind_to_gtt(obj, alignment);
3610 if (ret != 0) { 3863 if (ret)
3611 if (ret != -EBUSY && ret != -ERESTARTSYS)
3612 DRM_ERROR("Failure to bind: %d\n", ret);
3613 return ret; 3864 return ret;
3614 }
3615 } 3865 }
3616 /* 3866 /*
3617 * Pre-965 chips need a fence register set up in order to 3867 * Pre-965 chips need a fence register set up in order to
@@ -3691,6 +3941,13 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3691 } 3941 }
3692 obj_priv = obj->driver_private; 3942 obj_priv = obj->driver_private;
3693 3943
3944 if (obj_priv->madv != I915_MADV_WILLNEED) {
3945 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3946 drm_gem_object_unreference(obj);
3947 mutex_unlock(&dev->struct_mutex);
3948 return -EINVAL;
3949 }
3950
3694 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { 3951 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
3695 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", 3952 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3696 args->handle); 3953 args->handle);
@@ -3803,6 +4060,56 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3803 return i915_gem_ring_throttle(dev, file_priv); 4060 return i915_gem_ring_throttle(dev, file_priv);
3804} 4061}
3805 4062
4063int
4064i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4065 struct drm_file *file_priv)
4066{
4067 struct drm_i915_gem_madvise *args = data;
4068 struct drm_gem_object *obj;
4069 struct drm_i915_gem_object *obj_priv;
4070
4071 switch (args->madv) {
4072 case I915_MADV_DONTNEED:
4073 case I915_MADV_WILLNEED:
4074 break;
4075 default:
4076 return -EINVAL;
4077 }
4078
4079 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4080 if (obj == NULL) {
4081 DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
4082 args->handle);
4083 return -EBADF;
4084 }
4085
4086 mutex_lock(&dev->struct_mutex);
4087 obj_priv = obj->driver_private;
4088
4089 if (obj_priv->pin_count) {
4090 drm_gem_object_unreference(obj);
4091 mutex_unlock(&dev->struct_mutex);
4092
4093 DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
4094 return -EINVAL;
4095 }
4096
4097 if (obj_priv->madv != __I915_MADV_PURGED)
4098 obj_priv->madv = args->madv;
4099
4100 /* if the object is no longer bound, discard its backing storage */
4101 if (i915_gem_object_is_purgeable(obj_priv) &&
4102 obj_priv->gtt_space == NULL)
4103 i915_gem_object_truncate(obj);
4104
4105 args->retained = obj_priv->madv != __I915_MADV_PURGED;
4106
4107 drm_gem_object_unreference(obj);
4108 mutex_unlock(&dev->struct_mutex);
4109
4110 return 0;
4111}
4112
3806int i915_gem_init_object(struct drm_gem_object *obj) 4113int i915_gem_init_object(struct drm_gem_object *obj)
3807{ 4114{
3808 struct drm_i915_gem_object *obj_priv; 4115 struct drm_i915_gem_object *obj_priv;
@@ -3827,6 +4134,9 @@ int i915_gem_init_object(struct drm_gem_object *obj)
3827 obj_priv->fence_reg = I915_FENCE_REG_NONE; 4134 obj_priv->fence_reg = I915_FENCE_REG_NONE;
3828 INIT_LIST_HEAD(&obj_priv->list); 4135 INIT_LIST_HEAD(&obj_priv->list);
3829 INIT_LIST_HEAD(&obj_priv->fence_list); 4136 INIT_LIST_HEAD(&obj_priv->fence_list);
4137 obj_priv->madv = I915_MADV_WILLNEED;
4138
4139 trace_i915_gem_object_create(obj);
3830 4140
3831 return 0; 4141 return 0;
3832} 4142}
@@ -3836,6 +4146,8 @@ void i915_gem_free_object(struct drm_gem_object *obj)
3836 struct drm_device *dev = obj->dev; 4146 struct drm_device *dev = obj->dev;
3837 struct drm_i915_gem_object *obj_priv = obj->driver_private; 4147 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3838 4148
4149 trace_i915_gem_object_destroy(obj);
4150
3839 while (obj_priv->pin_count > 0) 4151 while (obj_priv->pin_count > 0)
3840 i915_gem_object_unpin(obj); 4152 i915_gem_object_unpin(obj);
3841 4153
@@ -3844,43 +4156,35 @@ void i915_gem_free_object(struct drm_gem_object *obj)
3844 4156
3845 i915_gem_object_unbind(obj); 4157 i915_gem_object_unbind(obj);
3846 4158
3847 i915_gem_free_mmap_offset(obj); 4159 if (obj_priv->mmap_offset)
4160 i915_gem_free_mmap_offset(obj);
3848 4161
3849 kfree(obj_priv->page_cpu_valid); 4162 kfree(obj_priv->page_cpu_valid);
3850 kfree(obj_priv->bit_17); 4163 kfree(obj_priv->bit_17);
3851 kfree(obj->driver_private); 4164 kfree(obj->driver_private);
3852} 4165}
3853 4166
3854/** Unbinds all objects that are on the given buffer list. */ 4167/** Unbinds all inactive objects. */
3855static int 4168static int
3856i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) 4169i915_gem_evict_from_inactive_list(struct drm_device *dev)
3857{ 4170{
3858 struct drm_gem_object *obj; 4171 drm_i915_private_t *dev_priv = dev->dev_private;
3859 struct drm_i915_gem_object *obj_priv;
3860 int ret;
3861 4172
3862 while (!list_empty(head)) { 4173 while (!list_empty(&dev_priv->mm.inactive_list)) {
3863 obj_priv = list_first_entry(head, 4174 struct drm_gem_object *obj;
3864 struct drm_i915_gem_object, 4175 int ret;
3865 list);
3866 obj = obj_priv->obj;
3867 4176
3868 if (obj_priv->pin_count != 0) { 4177 obj = list_first_entry(&dev_priv->mm.inactive_list,
3869 DRM_ERROR("Pinned object in unbind list\n"); 4178 struct drm_i915_gem_object,
3870 mutex_unlock(&dev->struct_mutex); 4179 list)->obj;
3871 return -EINVAL;
3872 }
3873 4180
3874 ret = i915_gem_object_unbind(obj); 4181 ret = i915_gem_object_unbind(obj);
3875 if (ret != 0) { 4182 if (ret != 0) {
3876 DRM_ERROR("Error unbinding object in LeaveVT: %d\n", 4183 DRM_ERROR("Error unbinding object: %d\n", ret);
3877 ret);
3878 mutex_unlock(&dev->struct_mutex);
3879 return ret; 4184 return ret;
3880 } 4185 }
3881 } 4186 }
3882 4187
3883
3884 return 0; 4188 return 0;
3885} 4189}
3886 4190
@@ -3902,6 +4206,7 @@ i915_gem_idle(struct drm_device *dev)
3902 * We need to replace this with a semaphore, or something. 4206 * We need to replace this with a semaphore, or something.
3903 */ 4207 */
3904 dev_priv->mm.suspended = 1; 4208 dev_priv->mm.suspended = 1;
4209 del_timer(&dev_priv->hangcheck_timer);
3905 4210
3906 /* Cancel the retire work handler, wait for it to finish if running 4211 /* Cancel the retire work handler, wait for it to finish if running
3907 */ 4212 */
@@ -3931,7 +4236,7 @@ i915_gem_idle(struct drm_device *dev)
3931 if (last_seqno == cur_seqno) { 4236 if (last_seqno == cur_seqno) {
3932 if (stuck++ > 100) { 4237 if (stuck++ > 100) {
3933 DRM_ERROR("hardware wedged\n"); 4238 DRM_ERROR("hardware wedged\n");
3934 dev_priv->mm.wedged = 1; 4239 atomic_set(&dev_priv->mm.wedged, 1);
3935 DRM_WAKEUP(&dev_priv->irq_queue); 4240 DRM_WAKEUP(&dev_priv->irq_queue);
3936 break; 4241 break;
3937 } 4242 }
@@ -3944,7 +4249,7 @@ i915_gem_idle(struct drm_device *dev)
3944 i915_gem_retire_requests(dev); 4249 i915_gem_retire_requests(dev);
3945 4250
3946 spin_lock(&dev_priv->mm.active_list_lock); 4251 spin_lock(&dev_priv->mm.active_list_lock);
3947 if (!dev_priv->mm.wedged) { 4252 if (!atomic_read(&dev_priv->mm.wedged)) {
3948 /* Active and flushing should now be empty as we've 4253 /* Active and flushing should now be empty as we've
3949 * waited for a sequence higher than any pending execbuffer 4254 * waited for a sequence higher than any pending execbuffer
3950 */ 4255 */
@@ -3962,29 +4267,41 @@ i915_gem_idle(struct drm_device *dev)
3962 * the GPU domains and just stuff them onto inactive. 4267 * the GPU domains and just stuff them onto inactive.
3963 */ 4268 */
3964 while (!list_empty(&dev_priv->mm.active_list)) { 4269 while (!list_empty(&dev_priv->mm.active_list)) {
3965 struct drm_i915_gem_object *obj_priv; 4270 struct drm_gem_object *obj;
4271 uint32_t old_write_domain;
3966 4272
3967 obj_priv = list_first_entry(&dev_priv->mm.active_list, 4273 obj = list_first_entry(&dev_priv->mm.active_list,
3968 struct drm_i915_gem_object, 4274 struct drm_i915_gem_object,
3969 list); 4275 list)->obj;
3970 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; 4276 old_write_domain = obj->write_domain;
3971 i915_gem_object_move_to_inactive(obj_priv->obj); 4277 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4278 i915_gem_object_move_to_inactive(obj);
4279
4280 trace_i915_gem_object_change_domain(obj,
4281 obj->read_domains,
4282 old_write_domain);
3972 } 4283 }
3973 spin_unlock(&dev_priv->mm.active_list_lock); 4284 spin_unlock(&dev_priv->mm.active_list_lock);
3974 4285
3975 while (!list_empty(&dev_priv->mm.flushing_list)) { 4286 while (!list_empty(&dev_priv->mm.flushing_list)) {
3976 struct drm_i915_gem_object *obj_priv; 4287 struct drm_gem_object *obj;
4288 uint32_t old_write_domain;
3977 4289
3978 obj_priv = list_first_entry(&dev_priv->mm.flushing_list, 4290 obj = list_first_entry(&dev_priv->mm.flushing_list,
3979 struct drm_i915_gem_object, 4291 struct drm_i915_gem_object,
3980 list); 4292 list)->obj;
3981 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; 4293 old_write_domain = obj->write_domain;
3982 i915_gem_object_move_to_inactive(obj_priv->obj); 4294 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4295 i915_gem_object_move_to_inactive(obj);
4296
4297 trace_i915_gem_object_change_domain(obj,
4298 obj->read_domains,
4299 old_write_domain);
3983 } 4300 }
3984 4301
3985 4302
3986 /* Move all inactive buffers out of the GTT. */ 4303 /* Move all inactive buffers out of the GTT. */
3987 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); 4304 ret = i915_gem_evict_from_inactive_list(dev);
3988 WARN_ON(!list_empty(&dev_priv->mm.inactive_list)); 4305 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
3989 if (ret) { 4306 if (ret) {
3990 mutex_unlock(&dev->struct_mutex); 4307 mutex_unlock(&dev->struct_mutex);
@@ -4206,9 +4523,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4206 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4523 if (drm_core_check_feature(dev, DRIVER_MODESET))
4207 return 0; 4524 return 0;
4208 4525
4209 if (dev_priv->mm.wedged) { 4526 if (atomic_read(&dev_priv->mm.wedged)) {
4210 DRM_ERROR("Reenabling wedged hardware, good luck\n"); 4527 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4211 dev_priv->mm.wedged = 0; 4528 atomic_set(&dev_priv->mm.wedged, 0);
4212 } 4529 }
4213 4530
4214 mutex_lock(&dev->struct_mutex); 4531 mutex_lock(&dev->struct_mutex);
@@ -4238,15 +4555,11 @@ int
4238i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 4555i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4239 struct drm_file *file_priv) 4556 struct drm_file *file_priv)
4240{ 4557{
4241 int ret;
4242
4243 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4558 if (drm_core_check_feature(dev, DRIVER_MODESET))
4244 return 0; 4559 return 0;
4245 4560
4246 ret = i915_gem_idle(dev);
4247 drm_irq_uninstall(dev); 4561 drm_irq_uninstall(dev);
4248 4562 return i915_gem_idle(dev);
4249 return ret;
4250} 4563}
4251 4564
4252void 4565void
@@ -4278,6 +4591,10 @@ i915_gem_load(struct drm_device *dev)
4278 i915_gem_retire_work_handler); 4591 i915_gem_retire_work_handler);
4279 dev_priv->mm.next_gem_seqno = 1; 4592 dev_priv->mm.next_gem_seqno = 1;
4280 4593
4594 spin_lock(&shrink_list_lock);
4595 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4596 spin_unlock(&shrink_list_lock);
4597
4281 /* Old X drivers will take 0-2 for front, back, depth buffers */ 4598 /* Old X drivers will take 0-2 for front, back, depth buffers */
4282 dev_priv->fence_reg_start = 3; 4599 dev_priv->fence_reg_start = 3;
4283 4600
@@ -4495,3 +4812,116 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
4495 list_del_init(i915_file_priv->mm.request_list.next); 4812 list_del_init(i915_file_priv->mm.request_list.next);
4496 mutex_unlock(&dev->struct_mutex); 4813 mutex_unlock(&dev->struct_mutex);
4497} 4814}
4815
4816static int
4817i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
4818{
4819 drm_i915_private_t *dev_priv, *next_dev;
4820 struct drm_i915_gem_object *obj_priv, *next_obj;
4821 int cnt = 0;
4822 int would_deadlock = 1;
4823
4824 /* "fast-path" to count number of available objects */
4825 if (nr_to_scan == 0) {
4826 spin_lock(&shrink_list_lock);
4827 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
4828 struct drm_device *dev = dev_priv->dev;
4829
4830 if (mutex_trylock(&dev->struct_mutex)) {
4831 list_for_each_entry(obj_priv,
4832 &dev_priv->mm.inactive_list,
4833 list)
4834 cnt++;
4835 mutex_unlock(&dev->struct_mutex);
4836 }
4837 }
4838 spin_unlock(&shrink_list_lock);
4839
4840 return (cnt / 100) * sysctl_vfs_cache_pressure;
4841 }
4842
4843 spin_lock(&shrink_list_lock);
4844
4845 /* first scan for clean buffers */
4846 list_for_each_entry_safe(dev_priv, next_dev,
4847 &shrink_list, mm.shrink_list) {
4848 struct drm_device *dev = dev_priv->dev;
4849
4850 if (! mutex_trylock(&dev->struct_mutex))
4851 continue;
4852
4853 spin_unlock(&shrink_list_lock);
4854
4855 i915_gem_retire_requests(dev);
4856
4857 list_for_each_entry_safe(obj_priv, next_obj,
4858 &dev_priv->mm.inactive_list,
4859 list) {
4860 if (i915_gem_object_is_purgeable(obj_priv)) {
4861 i915_gem_object_unbind(obj_priv->obj);
4862 if (--nr_to_scan <= 0)
4863 break;
4864 }
4865 }
4866
4867 spin_lock(&shrink_list_lock);
4868 mutex_unlock(&dev->struct_mutex);
4869
4870 would_deadlock = 0;
4871
4872 if (nr_to_scan <= 0)
4873 break;
4874 }
4875
4876 /* second pass, evict/count anything still on the inactive list */
4877 list_for_each_entry_safe(dev_priv, next_dev,
4878 &shrink_list, mm.shrink_list) {
4879 struct drm_device *dev = dev_priv->dev;
4880
4881 if (! mutex_trylock(&dev->struct_mutex))
4882 continue;
4883
4884 spin_unlock(&shrink_list_lock);
4885
4886 list_for_each_entry_safe(obj_priv, next_obj,
4887 &dev_priv->mm.inactive_list,
4888 list) {
4889 if (nr_to_scan > 0) {
4890 i915_gem_object_unbind(obj_priv->obj);
4891 nr_to_scan--;
4892 } else
4893 cnt++;
4894 }
4895
4896 spin_lock(&shrink_list_lock);
4897 mutex_unlock(&dev->struct_mutex);
4898
4899 would_deadlock = 0;
4900 }
4901
4902 spin_unlock(&shrink_list_lock);
4903
4904 if (would_deadlock)
4905 return -1;
4906 else if (cnt > 0)
4907 return (cnt / 100) * sysctl_vfs_cache_pressure;
4908 else
4909 return 0;
4910}
4911
4912static struct shrinker shrinker = {
4913 .shrink = i915_gem_shrink,
4914 .seeks = DEFAULT_SEEKS,
4915};
4916
4917__init void
4918i915_gem_shrinker_init(void)
4919{
4920 register_shrinker(&shrinker);
4921}
4922
4923__exit void
4924i915_gem_shrinker_exit(void)
4925{
4926 unregister_shrinker(&shrinker);
4927}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 6c89f2ff2495..c3ceffa46ea0 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -31,6 +31,7 @@
31#include "drm.h" 31#include "drm.h"
32#include "i915_drm.h" 32#include "i915_drm.h"
33#include "i915_drv.h" 33#include "i915_drv.h"
34#include "i915_trace.h"
34#include "intel_drv.h" 35#include "intel_drv.h"
35 36
36#define MAX_NOPID ((u32)~0) 37#define MAX_NOPID ((u32)~0)
@@ -279,7 +280,9 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
279 } 280 }
280 281
281 if (gt_iir & GT_USER_INTERRUPT) { 282 if (gt_iir & GT_USER_INTERRUPT) {
282 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); 283 u32 seqno = i915_get_gem_seqno(dev);
284 dev_priv->mm.irq_gem_seqno = seqno;
285 trace_i915_gem_request_complete(dev, seqno);
283 DRM_WAKEUP(&dev_priv->irq_queue); 286 DRM_WAKEUP(&dev_priv->irq_queue);
284 } 287 }
285 288
@@ -302,12 +305,25 @@ static void i915_error_work_func(struct work_struct *work)
302 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 305 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
303 error_work); 306 error_work);
304 struct drm_device *dev = dev_priv->dev; 307 struct drm_device *dev = dev_priv->dev;
305 char *event_string = "ERROR=1"; 308 char *error_event[] = { "ERROR=1", NULL };
306 char *envp[] = { event_string, NULL }; 309 char *reset_event[] = { "RESET=1", NULL };
310 char *reset_done_event[] = { "ERROR=0", NULL };
307 311
308 DRM_DEBUG("generating error event\n"); 312 DRM_DEBUG("generating error event\n");
309 313 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
310 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp); 314
315 if (atomic_read(&dev_priv->mm.wedged)) {
316 if (IS_I965G(dev)) {
317 DRM_DEBUG("resetting chip\n");
318 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
319 if (!i965_reset(dev, GDRST_RENDER)) {
320 atomic_set(&dev_priv->mm.wedged, 0);
321 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
322 }
323 } else {
324 printk("reboot required\n");
325 }
326 }
311} 327}
312 328
313/** 329/**
@@ -372,7 +388,7 @@ out:
372 * so userspace knows something bad happened (should trigger collection 388 * so userspace knows something bad happened (should trigger collection
373 * of a ring dump etc.). 389 * of a ring dump etc.).
374 */ 390 */
375static void i915_handle_error(struct drm_device *dev) 391static void i915_handle_error(struct drm_device *dev, bool wedged)
376{ 392{
377 struct drm_i915_private *dev_priv = dev->dev_private; 393 struct drm_i915_private *dev_priv = dev->dev_private;
378 u32 eir = I915_READ(EIR); 394 u32 eir = I915_READ(EIR);
@@ -482,6 +498,16 @@ static void i915_handle_error(struct drm_device *dev)
482 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 498 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
483 } 499 }
484 500
501 if (wedged) {
502 atomic_set(&dev_priv->mm.wedged, 1);
503
504 /*
505 * Wakeup waiting processes so they don't hang
506 */
507 printk("i915: Waking up sleeping processes\n");
508 DRM_WAKEUP(&dev_priv->irq_queue);
509 }
510
485 queue_work(dev_priv->wq, &dev_priv->error_work); 511 queue_work(dev_priv->wq, &dev_priv->error_work);
486} 512}
487 513
@@ -527,7 +553,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
527 pipeb_stats = I915_READ(PIPEBSTAT); 553 pipeb_stats = I915_READ(PIPEBSTAT);
528 554
529 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 555 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
530 i915_handle_error(dev); 556 i915_handle_error(dev, false);
531 557
532 /* 558 /*
533 * Clear the PIPE(A|B)STAT regs before the IIR 559 * Clear the PIPE(A|B)STAT regs before the IIR
@@ -599,8 +625,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
599 } 625 }
600 626
601 if (iir & I915_USER_INTERRUPT) { 627 if (iir & I915_USER_INTERRUPT) {
602 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); 628 u32 seqno = i915_get_gem_seqno(dev);
629 dev_priv->mm.irq_gem_seqno = seqno;
630 trace_i915_gem_request_complete(dev, seqno);
603 DRM_WAKEUP(&dev_priv->irq_queue); 631 DRM_WAKEUP(&dev_priv->irq_queue);
632 dev_priv->hangcheck_count = 0;
633 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
604 } 634 }
605 635
606 if (pipea_stats & vblank_status) { 636 if (pipea_stats & vblank_status) {
@@ -695,6 +725,16 @@ void i915_user_irq_put(struct drm_device *dev)
695 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 725 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
696} 726}
697 727
728void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
729{
730 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
731
732 if (dev_priv->trace_irq_seqno == 0)
733 i915_user_irq_get(dev);
734
735 dev_priv->trace_irq_seqno = seqno;
736}
737
698static int i915_wait_irq(struct drm_device * dev, int irq_nr) 738static int i915_wait_irq(struct drm_device * dev, int irq_nr)
699{ 739{
700 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 740 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -880,6 +920,52 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
880 return -EINVAL; 920 return -EINVAL;
881} 921}
882 922
923struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) {
924 drm_i915_private_t *dev_priv = dev->dev_private;
925 return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list);
926}
927
928/**
929 * This is called when the chip hasn't reported back with completed
930 * batchbuffers in a long time. The first time this is called we simply record
931 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
932 * again, we assume the chip is wedged and try to fix it.
933 */
934void i915_hangcheck_elapsed(unsigned long data)
935{
936 struct drm_device *dev = (struct drm_device *)data;
937 drm_i915_private_t *dev_priv = dev->dev_private;
938 uint32_t acthd;
939
940 if (!IS_I965G(dev))
941 acthd = I915_READ(ACTHD);
942 else
943 acthd = I915_READ(ACTHD_I965);
944
945 /* If all work is done then ACTHD clearly hasn't advanced. */
946 if (list_empty(&dev_priv->mm.request_list) ||
947 i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) {
948 dev_priv->hangcheck_count = 0;
949 return;
950 }
951
952 if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) {
953 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
954 i915_handle_error(dev, true);
955 return;
956 }
957
958 /* Reset timer case chip hangs without another request being added */
959 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
960
961 if (acthd != dev_priv->last_acthd)
962 dev_priv->hangcheck_count = 0;
963 else
964 dev_priv->hangcheck_count++;
965
966 dev_priv->last_acthd = acthd;
967}
968
883/* drm_dma.h hooks 969/* drm_dma.h hooks
884*/ 970*/
885static void igdng_irq_preinstall(struct drm_device *dev) 971static void igdng_irq_preinstall(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index e4b4e8898e39..2d5193556d3f 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -148,6 +148,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
148 struct drm_i915_private *dev_priv = dev->dev_private; 148 struct drm_i915_private *dev_priv = dev->dev_private;
149 struct opregion_asle *asle = dev_priv->opregion.asle; 149 struct opregion_asle *asle = dev_priv->opregion.asle;
150 u32 blc_pwm_ctl, blc_pwm_ctl2; 150 u32 blc_pwm_ctl, blc_pwm_ctl2;
151 u32 max_backlight, level, shift;
151 152
152 if (!(bclp & ASLE_BCLP_VALID)) 153 if (!(bclp & ASLE_BCLP_VALID))
153 return ASLE_BACKLIGHT_FAIL; 154 return ASLE_BACKLIGHT_FAIL;
@@ -157,14 +158,25 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
157 return ASLE_BACKLIGHT_FAIL; 158 return ASLE_BACKLIGHT_FAIL;
158 159
159 blc_pwm_ctl = I915_READ(BLC_PWM_CTL); 160 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
160 blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
161 blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2); 161 blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
162 162
163 if (blc_pwm_ctl2 & BLM_COMBINATION_MODE) 163 if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
164 pci_write_config_dword(dev->pdev, PCI_LBPC, bclp); 164 pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
165 else 165 else {
166 I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101)-1)); 166 if (IS_IGD(dev)) {
167 167 blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
168 max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
169 BACKLIGHT_MODULATION_FREQ_SHIFT;
170 shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1;
171 } else {
172 blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
173 max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
174 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
175 shift = BACKLIGHT_DUTY_CYCLE_SHIFT;
176 }
177 level = (bclp * max_backlight) / 255;
178 I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift));
179 }
168 asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; 180 asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
169 181
170 return 0; 182 return 0;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e38cd21161c8..1687edf68795 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -30,6 +30,7 @@
30 * fb aperture size and the amount of pre-reserved memory. 30 * fb aperture size and the amount of pre-reserved memory.
31 */ 31 */
32#define INTEL_GMCH_CTRL 0x52 32#define INTEL_GMCH_CTRL 0x52
33#define INTEL_GMCH_VGA_DISABLE (1 << 1)
33#define INTEL_GMCH_ENABLED 0x4 34#define INTEL_GMCH_ENABLED 0x4
34#define INTEL_GMCH_MEM_MASK 0x1 35#define INTEL_GMCH_MEM_MASK 0x1
35#define INTEL_GMCH_MEM_64M 0x1 36#define INTEL_GMCH_MEM_64M 0x1
@@ -85,6 +86,10 @@
85#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) 86#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
86#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) 87#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
87#define LBB 0xf4 88#define LBB 0xf4
89#define GDRST 0xc0
90#define GDRST_FULL (0<<2)
91#define GDRST_RENDER (1<<2)
92#define GDRST_MEDIA (3<<2)
88 93
89/* VGA stuff */ 94/* VGA stuff */
90 95
@@ -343,9 +348,37 @@
343#define FBC_CTL_PLANEA (0<<0) 348#define FBC_CTL_PLANEA (0<<0)
344#define FBC_CTL_PLANEB (1<<0) 349#define FBC_CTL_PLANEB (1<<0)
345#define FBC_FENCE_OFF 0x0321b 350#define FBC_FENCE_OFF 0x0321b
351#define FBC_TAG 0x03300
346 352
347#define FBC_LL_SIZE (1536) 353#define FBC_LL_SIZE (1536)
348 354
355/* Framebuffer compression for GM45+ */
356#define DPFC_CB_BASE 0x3200
357#define DPFC_CONTROL 0x3208
358#define DPFC_CTL_EN (1<<31)
359#define DPFC_CTL_PLANEA (0<<30)
360#define DPFC_CTL_PLANEB (1<<30)
361#define DPFC_CTL_FENCE_EN (1<<29)
362#define DPFC_SR_EN (1<<10)
363#define DPFC_CTL_LIMIT_1X (0<<6)
364#define DPFC_CTL_LIMIT_2X (1<<6)
365#define DPFC_CTL_LIMIT_4X (2<<6)
366#define DPFC_RECOMP_CTL 0x320c
367#define DPFC_RECOMP_STALL_EN (1<<27)
368#define DPFC_RECOMP_STALL_WM_SHIFT (16)
369#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
370#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
371#define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f)
372#define DPFC_STATUS 0x3210
373#define DPFC_INVAL_SEG_SHIFT (16)
374#define DPFC_INVAL_SEG_MASK (0x07ff0000)
375#define DPFC_COMP_SEG_SHIFT (0)
376#define DPFC_COMP_SEG_MASK (0x000003ff)
377#define DPFC_STATUS2 0x3214
378#define DPFC_FENCE_YOFF 0x3218
379#define DPFC_CHICKEN 0x3224
380#define DPFC_HT_MODIFY (1<<31)
381
349/* 382/*
350 * GPIO regs 383 * GPIO regs
351 */ 384 */
@@ -935,6 +968,8 @@
935#define LVDS_PORT_EN (1 << 31) 968#define LVDS_PORT_EN (1 << 31)
936/* Selects pipe B for LVDS data. Must be set on pre-965. */ 969/* Selects pipe B for LVDS data. Must be set on pre-965. */
937#define LVDS_PIPEB_SELECT (1 << 30) 970#define LVDS_PIPEB_SELECT (1 << 30)
971/* Enable border for unscaled (or aspect-scaled) display */
972#define LVDS_BORDER_ENABLE (1 << 15)
938/* 973/*
939 * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per 974 * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
940 * pixel. 975 * pixel.
@@ -1045,6 +1080,8 @@
1045#define BACKLIGHT_DUTY_CYCLE_SHIFT (0) 1080#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
1046#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) 1081#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
1047 1082
1083#define BLC_HIST_CTL 0x61260
1084
1048/* TV port control */ 1085/* TV port control */
1049#define TV_CTL 0x68000 1086#define TV_CTL 0x68000
1050/** Enables the TV encoder */ 1087/** Enables the TV encoder */
@@ -1747,6 +1784,11 @@
1747#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ 1784#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
1748#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) 1785#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
1749#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) 1786#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
1787#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
1788#define PIPE_8BPC (0 << 5)
1789#define PIPE_10BPC (1 << 5)
1790#define PIPE_6BPC (2 << 5)
1791#define PIPE_12BPC (3 << 5)
1750 1792
1751#define DSPARB 0x70030 1793#define DSPARB 0x70030
1752#define DSPARB_CSTART_MASK (0x7f << 7) 1794#define DSPARB_CSTART_MASK (0x7f << 7)
@@ -1757,17 +1799,29 @@
1757#define DSPARB_AEND_SHIFT 0 1799#define DSPARB_AEND_SHIFT 0
1758 1800
1759#define DSPFW1 0x70034 1801#define DSPFW1 0x70034
1802#define DSPFW_SR_SHIFT 23
1803#define DSPFW_CURSORB_SHIFT 16
1804#define DSPFW_PLANEB_SHIFT 8
1760#define DSPFW2 0x70038 1805#define DSPFW2 0x70038
1806#define DSPFW_CURSORA_MASK 0x00003f00
1807#define DSPFW_CURSORA_SHIFT 16
1761#define DSPFW3 0x7003c 1808#define DSPFW3 0x7003c
1809#define DSPFW_HPLL_SR_EN (1<<31)
1810#define DSPFW_CURSOR_SR_SHIFT 24
1762#define IGD_SELF_REFRESH_EN (1<<30) 1811#define IGD_SELF_REFRESH_EN (1<<30)
1763 1812
1764/* FIFO watermark sizes etc */ 1813/* FIFO watermark sizes etc */
1814#define G4X_FIFO_LINE_SIZE 64
1765#define I915_FIFO_LINE_SIZE 64 1815#define I915_FIFO_LINE_SIZE 64
1766#define I830_FIFO_LINE_SIZE 32 1816#define I830_FIFO_LINE_SIZE 32
1817
1818#define G4X_FIFO_SIZE 127
1767#define I945_FIFO_SIZE 127 /* 945 & 965 */ 1819#define I945_FIFO_SIZE 127 /* 945 & 965 */
1768#define I915_FIFO_SIZE 95 1820#define I915_FIFO_SIZE 95
1769#define I855GM_FIFO_SIZE 127 /* In cachelines */ 1821#define I855GM_FIFO_SIZE 127 /* In cachelines */
1770#define I830_FIFO_SIZE 95 1822#define I830_FIFO_SIZE 95
1823
1824#define G4X_MAX_WM 0x3f
1771#define I915_MAX_WM 0x3f 1825#define I915_MAX_WM 0x3f
1772 1826
1773#define IGD_DISPLAY_FIFO 512 /* in 64byte unit */ 1827#define IGD_DISPLAY_FIFO 512 /* in 64byte unit */
@@ -1997,8 +2051,15 @@
1997#define PFA_CTL_1 0x68080 2051#define PFA_CTL_1 0x68080
1998#define PFB_CTL_1 0x68880 2052#define PFB_CTL_1 0x68880
1999#define PF_ENABLE (1<<31) 2053#define PF_ENABLE (1<<31)
2054#define PF_FILTER_MASK (3<<23)
2055#define PF_FILTER_PROGRAMMED (0<<23)
2056#define PF_FILTER_MED_3x3 (1<<23)
2057#define PF_FILTER_EDGE_ENHANCE (2<<23)
2058#define PF_FILTER_EDGE_SOFTEN (3<<23)
2000#define PFA_WIN_SZ 0x68074 2059#define PFA_WIN_SZ 0x68074
2001#define PFB_WIN_SZ 0x68874 2060#define PFB_WIN_SZ 0x68874
2061#define PFA_WIN_POS 0x68070
2062#define PFB_WIN_POS 0x68870
2002 2063
2003/* legacy palette */ 2064/* legacy palette */
2004#define LGC_PALETTE_A 0x4a000 2065#define LGC_PALETTE_A 0x4a000
@@ -2114,11 +2175,11 @@
2114#define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13) 2175#define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13)
2115#define DREF_SSC_SOURCE_DISABLE (0<<11) 2176#define DREF_SSC_SOURCE_DISABLE (0<<11)
2116#define DREF_SSC_SOURCE_ENABLE (2<<11) 2177#define DREF_SSC_SOURCE_ENABLE (2<<11)
2117#define DREF_SSC_SOURCE_MASK (2<<11) 2178#define DREF_SSC_SOURCE_MASK (3<<11)
2118#define DREF_NONSPREAD_SOURCE_DISABLE (0<<9) 2179#define DREF_NONSPREAD_SOURCE_DISABLE (0<<9)
2119#define DREF_NONSPREAD_CK505_ENABLE (1<<9) 2180#define DREF_NONSPREAD_CK505_ENABLE (1<<9)
2120#define DREF_NONSPREAD_SOURCE_ENABLE (2<<9) 2181#define DREF_NONSPREAD_SOURCE_ENABLE (2<<9)
2121#define DREF_NONSPREAD_SOURCE_MASK (2<<9) 2182#define DREF_NONSPREAD_SOURCE_MASK (3<<9)
2122#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7) 2183#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
2123#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7) 2184#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
2124#define DREF_SSC4_DOWNSPREAD (0<<6) 2185#define DREF_SSC4_DOWNSPREAD (0<<6)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 20d4d19f5568..992d5617e798 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -32,11 +32,15 @@
32static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) 32static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
33{ 33{
34 struct drm_i915_private *dev_priv = dev->dev_private; 34 struct drm_i915_private *dev_priv = dev->dev_private;
35 u32 dpll_reg;
35 36
36 if (pipe == PIPE_A) 37 if (IS_IGDNG(dev)) {
37 return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE); 38 dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
38 else 39 } else {
39 return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE); 40 dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
41 }
42
43 return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
40} 44}
41 45
42static void i915_save_palette(struct drm_device *dev, enum pipe pipe) 46static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
@@ -49,6 +53,9 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
49 if (!i915_pipe_enabled(dev, pipe)) 53 if (!i915_pipe_enabled(dev, pipe))
50 return; 54 return;
51 55
56 if (IS_IGDNG(dev))
57 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
58
52 if (pipe == PIPE_A) 59 if (pipe == PIPE_A)
53 array = dev_priv->save_palette_a; 60 array = dev_priv->save_palette_a;
54 else 61 else
@@ -68,6 +75,9 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
68 if (!i915_pipe_enabled(dev, pipe)) 75 if (!i915_pipe_enabled(dev, pipe))
69 return; 76 return;
70 77
78 if (IS_IGDNG(dev))
79 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
80
71 if (pipe == PIPE_A) 81 if (pipe == PIPE_A)
72 array = dev_priv->save_palette_a; 82 array = dev_priv->save_palette_a;
73 else 83 else
@@ -228,13 +238,20 @@ static void i915_save_modeset_reg(struct drm_device *dev)
228 238
229 if (drm_core_check_feature(dev, DRIVER_MODESET)) 239 if (drm_core_check_feature(dev, DRIVER_MODESET))
230 return; 240 return;
241
231 /* Pipe & plane A info */ 242 /* Pipe & plane A info */
232 dev_priv->savePIPEACONF = I915_READ(PIPEACONF); 243 dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
233 dev_priv->savePIPEASRC = I915_READ(PIPEASRC); 244 dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
234 dev_priv->saveFPA0 = I915_READ(FPA0); 245 if (IS_IGDNG(dev)) {
235 dev_priv->saveFPA1 = I915_READ(FPA1); 246 dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
236 dev_priv->saveDPLL_A = I915_READ(DPLL_A); 247 dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
237 if (IS_I965G(dev)) 248 dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
249 } else {
250 dev_priv->saveFPA0 = I915_READ(FPA0);
251 dev_priv->saveFPA1 = I915_READ(FPA1);
252 dev_priv->saveDPLL_A = I915_READ(DPLL_A);
253 }
254 if (IS_I965G(dev) && !IS_IGDNG(dev))
238 dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); 255 dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
239 dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); 256 dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
240 dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); 257 dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -242,7 +259,24 @@ static void i915_save_modeset_reg(struct drm_device *dev)
242 dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); 259 dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
243 dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); 260 dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
244 dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); 261 dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
245 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); 262 if (!IS_IGDNG(dev))
263 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
264
265 if (IS_IGDNG(dev)) {
266 dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL);
267 dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL);
268
269 dev_priv->savePFA_CTL_1 = I915_READ(PFA_CTL_1);
270 dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ);
271 dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS);
272
273 dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A);
274 dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A);
275 dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A);
276 dev_priv->saveTRANS_VTOTAL_A = I915_READ(TRANS_VTOTAL_A);
277 dev_priv->saveTRANS_VBLANK_A = I915_READ(TRANS_VBLANK_A);
278 dev_priv->saveTRANS_VSYNC_A = I915_READ(TRANS_VSYNC_A);
279 }
246 280
247 dev_priv->saveDSPACNTR = I915_READ(DSPACNTR); 281 dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
248 dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE); 282 dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
@@ -259,10 +293,16 @@ static void i915_save_modeset_reg(struct drm_device *dev)
259 /* Pipe & plane B info */ 293 /* Pipe & plane B info */
260 dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); 294 dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
261 dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); 295 dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
262 dev_priv->saveFPB0 = I915_READ(FPB0); 296 if (IS_IGDNG(dev)) {
263 dev_priv->saveFPB1 = I915_READ(FPB1); 297 dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
264 dev_priv->saveDPLL_B = I915_READ(DPLL_B); 298 dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
265 if (IS_I965G(dev)) 299 dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
300 } else {
301 dev_priv->saveFPB0 = I915_READ(FPB0);
302 dev_priv->saveFPB1 = I915_READ(FPB1);
303 dev_priv->saveDPLL_B = I915_READ(DPLL_B);
304 }
305 if (IS_I965G(dev) && !IS_IGDNG(dev))
266 dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); 306 dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
267 dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); 307 dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
268 dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); 308 dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -270,7 +310,24 @@ static void i915_save_modeset_reg(struct drm_device *dev)
270 dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); 310 dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
271 dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); 311 dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
272 dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); 312 dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
273 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); 313 if (!IS_IGDNG(dev))
314 dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
315
316 if (IS_IGDNG(dev)) {
317 dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL);
318 dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL);
319
320 dev_priv->savePFB_CTL_1 = I915_READ(PFB_CTL_1);
321 dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ);
322 dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS);
323
324 dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B);
325 dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B);
326 dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B);
327 dev_priv->saveTRANS_VTOTAL_B = I915_READ(TRANS_VTOTAL_B);
328 dev_priv->saveTRANS_VBLANK_B = I915_READ(TRANS_VBLANK_B);
329 dev_priv->saveTRANS_VSYNC_B = I915_READ(TRANS_VSYNC_B);
330 }
274 331
275 dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR); 332 dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
276 dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE); 333 dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
@@ -285,26 +342,45 @@ static void i915_save_modeset_reg(struct drm_device *dev)
285 dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); 342 dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
286 return; 343 return;
287} 344}
345
288static void i915_restore_modeset_reg(struct drm_device *dev) 346static void i915_restore_modeset_reg(struct drm_device *dev)
289{ 347{
290 struct drm_i915_private *dev_priv = dev->dev_private; 348 struct drm_i915_private *dev_priv = dev->dev_private;
349 int dpll_a_reg, fpa0_reg, fpa1_reg;
350 int dpll_b_reg, fpb0_reg, fpb1_reg;
291 351
292 if (drm_core_check_feature(dev, DRIVER_MODESET)) 352 if (drm_core_check_feature(dev, DRIVER_MODESET))
293 return; 353 return;
294 354
355 if (IS_IGDNG(dev)) {
356 dpll_a_reg = PCH_DPLL_A;
357 dpll_b_reg = PCH_DPLL_B;
358 fpa0_reg = PCH_FPA0;
359 fpb0_reg = PCH_FPB0;
360 fpa1_reg = PCH_FPA1;
361 fpb1_reg = PCH_FPB1;
362 } else {
363 dpll_a_reg = DPLL_A;
364 dpll_b_reg = DPLL_B;
365 fpa0_reg = FPA0;
366 fpb0_reg = FPB0;
367 fpa1_reg = FPA1;
368 fpb1_reg = FPB1;
369 }
370
295 /* Pipe & plane A info */ 371 /* Pipe & plane A info */
296 /* Prime the clock */ 372 /* Prime the clock */
297 if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { 373 if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
298 I915_WRITE(DPLL_A, dev_priv->saveDPLL_A & 374 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
299 ~DPLL_VCO_ENABLE); 375 ~DPLL_VCO_ENABLE);
300 DRM_UDELAY(150); 376 DRM_UDELAY(150);
301 } 377 }
302 I915_WRITE(FPA0, dev_priv->saveFPA0); 378 I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
303 I915_WRITE(FPA1, dev_priv->saveFPA1); 379 I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
304 /* Actually enable it */ 380 /* Actually enable it */
305 I915_WRITE(DPLL_A, dev_priv->saveDPLL_A); 381 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
306 DRM_UDELAY(150); 382 DRM_UDELAY(150);
307 if (IS_I965G(dev)) 383 if (IS_I965G(dev) && !IS_IGDNG(dev))
308 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); 384 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
309 DRM_UDELAY(150); 385 DRM_UDELAY(150);
310 386
@@ -315,7 +391,24 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
315 I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); 391 I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
316 I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); 392 I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
317 I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); 393 I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
318 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); 394 if (!IS_IGDNG(dev))
395 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
396
397 if (IS_IGDNG(dev)) {
398 I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
399 I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
400
401 I915_WRITE(PFA_CTL_1, dev_priv->savePFA_CTL_1);
402 I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
403 I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
404
405 I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
406 I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
407 I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
408 I915_WRITE(TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
409 I915_WRITE(TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
410 I915_WRITE(TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
411 }
319 412
320 /* Restore plane info */ 413 /* Restore plane info */
321 I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); 414 I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
@@ -337,14 +430,14 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
337 430
338 /* Pipe & plane B info */ 431 /* Pipe & plane B info */
339 if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { 432 if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
340 I915_WRITE(DPLL_B, dev_priv->saveDPLL_B & 433 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
341 ~DPLL_VCO_ENABLE); 434 ~DPLL_VCO_ENABLE);
342 DRM_UDELAY(150); 435 DRM_UDELAY(150);
343 } 436 }
344 I915_WRITE(FPB0, dev_priv->saveFPB0); 437 I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
345 I915_WRITE(FPB1, dev_priv->saveFPB1); 438 I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
346 /* Actually enable it */ 439 /* Actually enable it */
347 I915_WRITE(DPLL_B, dev_priv->saveDPLL_B); 440 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
348 DRM_UDELAY(150); 441 DRM_UDELAY(150);
349 if (IS_I965G(dev)) 442 if (IS_I965G(dev))
350 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); 443 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
@@ -357,7 +450,24 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
357 I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); 450 I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
358 I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); 451 I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
359 I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); 452 I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
360 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); 453 if (!IS_IGDNG(dev))
454 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
455
456 if (IS_IGDNG(dev)) {
457 I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
458 I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
459
460 I915_WRITE(PFB_CTL_1, dev_priv->savePFB_CTL_1);
461 I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
462 I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
463
464 I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
465 I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
466 I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
467 I915_WRITE(TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
468 I915_WRITE(TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
469 I915_WRITE(TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
470 }
361 471
362 /* Restore plane info */ 472 /* Restore plane info */
363 I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); 473 I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
@@ -379,19 +489,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
379 489
380 return; 490 return;
381} 491}
382int i915_save_state(struct drm_device *dev) 492
493void i915_save_display(struct drm_device *dev)
383{ 494{
384 struct drm_i915_private *dev_priv = dev->dev_private; 495 struct drm_i915_private *dev_priv = dev->dev_private;
385 int i;
386
387 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
388
389 /* Render Standby */
390 if (IS_I965G(dev) && IS_MOBILE(dev))
391 dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
392
393 /* Hardware status page */
394 dev_priv->saveHWS = I915_READ(HWS_PGA);
395 496
396 /* Display arbitration control */ 497 /* Display arbitration control */
397 dev_priv->saveDSPARB = I915_READ(DSPARB); 498 dev_priv->saveDSPARB = I915_READ(DSPARB);
@@ -399,6 +500,7 @@ int i915_save_state(struct drm_device *dev)
399 /* This is only meaningful in non-KMS mode */ 500 /* This is only meaningful in non-KMS mode */
400 /* Don't save them in KMS mode */ 501 /* Don't save them in KMS mode */
401 i915_save_modeset_reg(dev); 502 i915_save_modeset_reg(dev);
503
402 /* Cursor state */ 504 /* Cursor state */
403 dev_priv->saveCURACNTR = I915_READ(CURACNTR); 505 dev_priv->saveCURACNTR = I915_READ(CURACNTR);
404 dev_priv->saveCURAPOS = I915_READ(CURAPOS); 506 dev_priv->saveCURAPOS = I915_READ(CURAPOS);
@@ -410,21 +512,43 @@ int i915_save_state(struct drm_device *dev)
410 dev_priv->saveCURSIZE = I915_READ(CURSIZE); 512 dev_priv->saveCURSIZE = I915_READ(CURSIZE);
411 513
412 /* CRT state */ 514 /* CRT state */
413 dev_priv->saveADPA = I915_READ(ADPA); 515 if (IS_IGDNG(dev)) {
516 dev_priv->saveADPA = I915_READ(PCH_ADPA);
517 } else {
518 dev_priv->saveADPA = I915_READ(ADPA);
519 }
414 520
415 /* LVDS state */ 521 /* LVDS state */
416 dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); 522 if (IS_IGDNG(dev)) {
417 dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); 523 dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
418 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); 524 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
419 if (IS_I965G(dev)) 525 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
420 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); 526 dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
421 if (IS_MOBILE(dev) && !IS_I830(dev)) 527 dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
422 dev_priv->saveLVDS = I915_READ(LVDS); 528 dev_priv->saveLVDS = I915_READ(PCH_LVDS);
423 if (!IS_I830(dev) && !IS_845G(dev)) 529 } else {
530 dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
531 dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
532 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
533 dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
534 if (IS_I965G(dev))
535 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
536 if (IS_MOBILE(dev) && !IS_I830(dev))
537 dev_priv->saveLVDS = I915_READ(LVDS);
538 }
539
540 if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev))
424 dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); 541 dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
425 dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); 542
426 dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); 543 if (IS_IGDNG(dev)) {
427 dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); 544 dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
545 dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
546 dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
547 } else {
548 dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
549 dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
550 dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
551 }
428 552
429 /* Display Port state */ 553 /* Display Port state */
430 if (SUPPORTS_INTEGRATED_DP(dev)) { 554 if (SUPPORTS_INTEGRATED_DP(dev)) {
@@ -443,25 +567,162 @@ int i915_save_state(struct drm_device *dev)
443 /* FIXME: save TV & SDVO state */ 567 /* FIXME: save TV & SDVO state */
444 568
445 /* FBC state */ 569 /* FBC state */
446 dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); 570 if (IS_GM45(dev)) {
447 dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); 571 dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
448 dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); 572 } else {
449 dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); 573 dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
450 574 dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
451 /* Interrupt state */ 575 dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
452 dev_priv->saveIIR = I915_READ(IIR); 576 dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
453 dev_priv->saveIER = I915_READ(IER); 577 }
454 dev_priv->saveIMR = I915_READ(IMR);
455 578
456 /* VGA state */ 579 /* VGA state */
457 dev_priv->saveVGA0 = I915_READ(VGA0); 580 dev_priv->saveVGA0 = I915_READ(VGA0);
458 dev_priv->saveVGA1 = I915_READ(VGA1); 581 dev_priv->saveVGA1 = I915_READ(VGA1);
459 dev_priv->saveVGA_PD = I915_READ(VGA_PD); 582 dev_priv->saveVGA_PD = I915_READ(VGA_PD);
460 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); 583 if (IS_IGDNG(dev))
584 dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
585 else
586 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
587
588 i915_save_vga(dev);
589}
590
591void i915_restore_display(struct drm_device *dev)
592{
593 struct drm_i915_private *dev_priv = dev->dev_private;
594
595 /* Display arbitration */
596 I915_WRITE(DSPARB, dev_priv->saveDSPARB);
597
598 /* Display port ratios (must be done before clock is set) */
599 if (SUPPORTS_INTEGRATED_DP(dev)) {
600 I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
601 I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
602 I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
603 I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
604 I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
605 I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
606 I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
607 I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
608 }
609
610 /* This is only meaningful in non-KMS mode */
611 /* Don't restore them in KMS mode */
612 i915_restore_modeset_reg(dev);
613
614 /* Cursor state */
615 I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
616 I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
617 I915_WRITE(CURABASE, dev_priv->saveCURABASE);
618 I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
619 I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
620 I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
621 if (!IS_I9XX(dev))
622 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
623
624 /* CRT state */
625 if (IS_IGDNG(dev))
626 I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
627 else
628 I915_WRITE(ADPA, dev_priv->saveADPA);
629
630 /* LVDS state */
631 if (IS_I965G(dev) && !IS_IGDNG(dev))
632 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
633
634 if (IS_IGDNG(dev)) {
635 I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
636 } else if (IS_MOBILE(dev) && !IS_I830(dev))
637 I915_WRITE(LVDS, dev_priv->saveLVDS);
638
639 if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev))
640 I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
641
642 if (IS_IGDNG(dev)) {
643 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
644 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
645 I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
646 I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
647 I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
648 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
649 I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
650 I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
651 } else {
652 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
653 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
654 I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL);
655 I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
656 I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
657 I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
658 I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
659 }
660
661 /* Display Port state */
662 if (SUPPORTS_INTEGRATED_DP(dev)) {
663 I915_WRITE(DP_B, dev_priv->saveDP_B);
664 I915_WRITE(DP_C, dev_priv->saveDP_C);
665 I915_WRITE(DP_D, dev_priv->saveDP_D);
666 }
667 /* FIXME: restore TV & SDVO state */
668
669 /* FBC info */
670 if (IS_GM45(dev)) {
671 g4x_disable_fbc(dev);
672 I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
673 } else {
674 i8xx_disable_fbc(dev);
675 I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
676 I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
677 I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
678 I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
679 }
680
681 /* VGA state */
682 if (IS_IGDNG(dev))
683 I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
684 else
685 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
686 I915_WRITE(VGA0, dev_priv->saveVGA0);
687 I915_WRITE(VGA1, dev_priv->saveVGA1);
688 I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
689 DRM_UDELAY(150);
690
691 i915_restore_vga(dev);
692}
693
694int i915_save_state(struct drm_device *dev)
695{
696 struct drm_i915_private *dev_priv = dev->dev_private;
697 int i;
698
699 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
700
701 /* Render Standby */
702 if (IS_I965G(dev) && IS_MOBILE(dev))
703 dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
704
705 /* Hardware status page */
706 dev_priv->saveHWS = I915_READ(HWS_PGA);
707
708 i915_save_display(dev);
709
710 /* Interrupt state */
711 if (IS_IGDNG(dev)) {
712 dev_priv->saveDEIER = I915_READ(DEIER);
713 dev_priv->saveDEIMR = I915_READ(DEIMR);
714 dev_priv->saveGTIER = I915_READ(GTIER);
715 dev_priv->saveGTIMR = I915_READ(GTIMR);
716 dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
717 dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
718 } else {
719 dev_priv->saveIER = I915_READ(IER);
720 dev_priv->saveIMR = I915_READ(IMR);
721 }
461 722
462 /* Clock gating state */ 723 /* Clock gating state */
463 dev_priv->saveD_STATE = I915_READ(D_STATE); 724 dev_priv->saveD_STATE = I915_READ(D_STATE);
464 dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); 725 dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); /* Not sure about this */
465 726
466 /* Cache mode state */ 727 /* Cache mode state */
467 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 728 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@ -489,7 +750,6 @@ int i915_save_state(struct drm_device *dev)
489 for (i = 0; i < 8; i++) 750 for (i = 0; i < 8; i++)
490 dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 751 dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
491 } 752 }
492 i915_save_vga(dev);
493 753
494 return 0; 754 return 0;
495} 755}
@@ -508,9 +768,6 @@ int i915_restore_state(struct drm_device *dev)
508 /* Hardware status page */ 768 /* Hardware status page */
509 I915_WRITE(HWS_PGA, dev_priv->saveHWS); 769 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
510 770
511 /* Display arbitration */
512 I915_WRITE(DSPARB, dev_priv->saveDSPARB);
513
514 /* Fences */ 771 /* Fences */
515 if (IS_I965G(dev)) { 772 if (IS_I965G(dev)) {
516 for (i = 0; i < 16; i++) 773 for (i = 0; i < 16; i++)
@@ -522,69 +779,21 @@ int i915_restore_state(struct drm_device *dev)
522 for (i = 0; i < 8; i++) 779 for (i = 0; i < 8; i++)
523 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); 780 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
524 } 781 }
525
526 /* Display port ratios (must be done before clock is set) */
527 if (SUPPORTS_INTEGRATED_DP(dev)) {
528 I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
529 I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
530 I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
531 I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
532 I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
533 I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
534 I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
535 I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
536 }
537 /* This is only meaningful in non-KMS mode */
538 /* Don't restore them in KMS mode */
539 i915_restore_modeset_reg(dev);
540 /* Cursor state */
541 I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
542 I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
543 I915_WRITE(CURABASE, dev_priv->saveCURABASE);
544 I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
545 I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
546 I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
547 if (!IS_I9XX(dev))
548 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
549
550 /* CRT state */
551 I915_WRITE(ADPA, dev_priv->saveADPA);
552 782
553 /* LVDS state */ 783 i915_restore_display(dev);
554 if (IS_I965G(dev))
555 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
556 if (IS_MOBILE(dev) && !IS_I830(dev))
557 I915_WRITE(LVDS, dev_priv->saveLVDS);
558 if (!IS_I830(dev) && !IS_845G(dev))
559 I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
560
561 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
562 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
563 I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
564 I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
565 I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
566 I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
567 784
568 /* Display Port state */ 785 /* Interrupt state */
569 if (SUPPORTS_INTEGRATED_DP(dev)) { 786 if (IS_IGDNG(dev)) {
570 I915_WRITE(DP_B, dev_priv->saveDP_B); 787 I915_WRITE(DEIER, dev_priv->saveDEIER);
571 I915_WRITE(DP_C, dev_priv->saveDP_C); 788 I915_WRITE(DEIMR, dev_priv->saveDEIMR);
572 I915_WRITE(DP_D, dev_priv->saveDP_D); 789 I915_WRITE(GTIER, dev_priv->saveGTIER);
790 I915_WRITE(GTIMR, dev_priv->saveGTIMR);
791 I915_WRITE(FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
792 I915_WRITE(FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
793 } else {
794 I915_WRITE (IER, dev_priv->saveIER);
795 I915_WRITE (IMR, dev_priv->saveIMR);
573 } 796 }
574 /* FIXME: restore TV & SDVO state */
575
576 /* FBC info */
577 I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
578 I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
579 I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
580 I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
581
582 /* VGA state */
583 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
584 I915_WRITE(VGA0, dev_priv->saveVGA0);
585 I915_WRITE(VGA1, dev_priv->saveVGA1);
586 I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
587 DRM_UDELAY(150);
588 797
589 /* Clock gating state */ 798 /* Clock gating state */
590 I915_WRITE (D_STATE, dev_priv->saveD_STATE); 799 I915_WRITE (D_STATE, dev_priv->saveD_STATE);
@@ -603,8 +812,6 @@ int i915_restore_state(struct drm_device *dev)
603 for (i = 0; i < 3; i++) 812 for (i = 0; i < 3; i++)
604 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); 813 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
605 814
606 i915_restore_vga(dev);
607
608 return 0; 815 return 0;
609} 816}
610 817
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
new file mode 100644
index 000000000000..01840d9bc38f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -0,0 +1,316 @@
1#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
2#define _I915_TRACE_H_
3
4#include <linux/stringify.h>
5#include <linux/types.h>
6#include <linux/tracepoint.h>
7
8#include <drm/drmP.h>
9
10#undef TRACE_SYSTEM
11#define TRACE_SYSTEM i915
12#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
13#define TRACE_INCLUDE_FILE i915_trace
14
15/* object tracking */
16
17TRACE_EVENT(i915_gem_object_create,
18
19 TP_PROTO(struct drm_gem_object *obj),
20
21 TP_ARGS(obj),
22
23 TP_STRUCT__entry(
24 __field(struct drm_gem_object *, obj)
25 __field(u32, size)
26 ),
27
28 TP_fast_assign(
29 __entry->obj = obj;
30 __entry->size = obj->size;
31 ),
32
33 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
34);
35
36TRACE_EVENT(i915_gem_object_bind,
37
38 TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset),
39
40 TP_ARGS(obj, gtt_offset),
41
42 TP_STRUCT__entry(
43 __field(struct drm_gem_object *, obj)
44 __field(u32, gtt_offset)
45 ),
46
47 TP_fast_assign(
48 __entry->obj = obj;
49 __entry->gtt_offset = gtt_offset;
50 ),
51
52 TP_printk("obj=%p, gtt_offset=%08x",
53 __entry->obj, __entry->gtt_offset)
54);
55
56TRACE_EVENT(i915_gem_object_clflush,
57
58 TP_PROTO(struct drm_gem_object *obj),
59
60 TP_ARGS(obj),
61
62 TP_STRUCT__entry(
63 __field(struct drm_gem_object *, obj)
64 ),
65
66 TP_fast_assign(
67 __entry->obj = obj;
68 ),
69
70 TP_printk("obj=%p", __entry->obj)
71);
72
73TRACE_EVENT(i915_gem_object_change_domain,
74
75 TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
76
77 TP_ARGS(obj, old_read_domains, old_write_domain),
78
79 TP_STRUCT__entry(
80 __field(struct drm_gem_object *, obj)
81 __field(u32, read_domains)
82 __field(u32, write_domain)
83 ),
84
85 TP_fast_assign(
86 __entry->obj = obj;
87 __entry->read_domains = obj->read_domains | (old_read_domains << 16);
88 __entry->write_domain = obj->write_domain | (old_write_domain << 16);
89 ),
90
91 TP_printk("obj=%p, read=%04x, write=%04x",
92 __entry->obj,
93 __entry->read_domains, __entry->write_domain)
94);
95
96TRACE_EVENT(i915_gem_object_get_fence,
97
98 TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode),
99
100 TP_ARGS(obj, fence, tiling_mode),
101
102 TP_STRUCT__entry(
103 __field(struct drm_gem_object *, obj)
104 __field(int, fence)
105 __field(int, tiling_mode)
106 ),
107
108 TP_fast_assign(
109 __entry->obj = obj;
110 __entry->fence = fence;
111 __entry->tiling_mode = tiling_mode;
112 ),
113
114 TP_printk("obj=%p, fence=%d, tiling=%d",
115 __entry->obj, __entry->fence, __entry->tiling_mode)
116);
117
118TRACE_EVENT(i915_gem_object_unbind,
119
120 TP_PROTO(struct drm_gem_object *obj),
121
122 TP_ARGS(obj),
123
124 TP_STRUCT__entry(
125 __field(struct drm_gem_object *, obj)
126 ),
127
128 TP_fast_assign(
129 __entry->obj = obj;
130 ),
131
132 TP_printk("obj=%p", __entry->obj)
133);
134
135TRACE_EVENT(i915_gem_object_destroy,
136
137 TP_PROTO(struct drm_gem_object *obj),
138
139 TP_ARGS(obj),
140
141 TP_STRUCT__entry(
142 __field(struct drm_gem_object *, obj)
143 ),
144
145 TP_fast_assign(
146 __entry->obj = obj;
147 ),
148
149 TP_printk("obj=%p", __entry->obj)
150);
151
152/* batch tracing */
153
154TRACE_EVENT(i915_gem_request_submit,
155
156 TP_PROTO(struct drm_device *dev, u32 seqno),
157
158 TP_ARGS(dev, seqno),
159
160 TP_STRUCT__entry(
161 __field(u32, dev)
162 __field(u32, seqno)
163 ),
164
165 TP_fast_assign(
166 __entry->dev = dev->primary->index;
167 __entry->seqno = seqno;
168 i915_trace_irq_get(dev, seqno);
169 ),
170
171 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
172);
173
174TRACE_EVENT(i915_gem_request_flush,
175
176 TP_PROTO(struct drm_device *dev, u32 seqno,
177 u32 flush_domains, u32 invalidate_domains),
178
179 TP_ARGS(dev, seqno, flush_domains, invalidate_domains),
180
181 TP_STRUCT__entry(
182 __field(u32, dev)
183 __field(u32, seqno)
184 __field(u32, flush_domains)
185 __field(u32, invalidate_domains)
186 ),
187
188 TP_fast_assign(
189 __entry->dev = dev->primary->index;
190 __entry->seqno = seqno;
191 __entry->flush_domains = flush_domains;
192 __entry->invalidate_domains = invalidate_domains;
193 ),
194
195 TP_printk("dev=%u, seqno=%u, flush=%04x, invalidate=%04x",
196 __entry->dev, __entry->seqno,
197 __entry->flush_domains, __entry->invalidate_domains)
198);
199
200
201TRACE_EVENT(i915_gem_request_complete,
202
203 TP_PROTO(struct drm_device *dev, u32 seqno),
204
205 TP_ARGS(dev, seqno),
206
207 TP_STRUCT__entry(
208 __field(u32, dev)
209 __field(u32, seqno)
210 ),
211
212 TP_fast_assign(
213 __entry->dev = dev->primary->index;
214 __entry->seqno = seqno;
215 ),
216
217 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
218);
219
220TRACE_EVENT(i915_gem_request_retire,
221
222 TP_PROTO(struct drm_device *dev, u32 seqno),
223
224 TP_ARGS(dev, seqno),
225
226 TP_STRUCT__entry(
227 __field(u32, dev)
228 __field(u32, seqno)
229 ),
230
231 TP_fast_assign(
232 __entry->dev = dev->primary->index;
233 __entry->seqno = seqno;
234 ),
235
236 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
237);
238
239TRACE_EVENT(i915_gem_request_wait_begin,
240
241 TP_PROTO(struct drm_device *dev, u32 seqno),
242
243 TP_ARGS(dev, seqno),
244
245 TP_STRUCT__entry(
246 __field(u32, dev)
247 __field(u32, seqno)
248 ),
249
250 TP_fast_assign(
251 __entry->dev = dev->primary->index;
252 __entry->seqno = seqno;
253 ),
254
255 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
256);
257
258TRACE_EVENT(i915_gem_request_wait_end,
259
260 TP_PROTO(struct drm_device *dev, u32 seqno),
261
262 TP_ARGS(dev, seqno),
263
264 TP_STRUCT__entry(
265 __field(u32, dev)
266 __field(u32, seqno)
267 ),
268
269 TP_fast_assign(
270 __entry->dev = dev->primary->index;
271 __entry->seqno = seqno;
272 ),
273
274 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
275);
276
277TRACE_EVENT(i915_ring_wait_begin,
278
279 TP_PROTO(struct drm_device *dev),
280
281 TP_ARGS(dev),
282
283 TP_STRUCT__entry(
284 __field(u32, dev)
285 ),
286
287 TP_fast_assign(
288 __entry->dev = dev->primary->index;
289 ),
290
291 TP_printk("dev=%u", __entry->dev)
292);
293
294TRACE_EVENT(i915_ring_wait_end,
295
296 TP_PROTO(struct drm_device *dev),
297
298 TP_ARGS(dev),
299
300 TP_STRUCT__entry(
301 __field(u32, dev)
302 ),
303
304 TP_fast_assign(
305 __entry->dev = dev->primary->index;
306 ),
307
308 TP_printk("dev=%u", __entry->dev)
309);
310
311#endif /* _I915_TRACE_H_ */
312
313/* This part must be outside protection */
314#undef TRACE_INCLUDE_PATH
315#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
316#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c
new file mode 100644
index 000000000000..ead876eb6ea0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_trace_points.c
@@ -0,0 +1,11 @@
1/*
2 * Copyright © 2009 Intel Corporation
3 *
4 * Authors:
5 * Chris Wilson <chris@chris-wilson.co.uk>
6 */
7
8#include "i915_drv.h"
9
10#define CREATE_TRACE_POINTS
11#include "i915_trace.h"
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 1e28c1652fd0..96cd256e60e6 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -217,6 +217,9 @@ parse_general_features(struct drm_i915_private *dev_priv,
217 if (IS_I85X(dev_priv->dev)) 217 if (IS_I85X(dev_priv->dev))
218 dev_priv->lvds_ssc_freq = 218 dev_priv->lvds_ssc_freq =
219 general->ssc_freq ? 66 : 48; 219 general->ssc_freq ? 66 : 48;
220 else if (IS_IGDNG(dev_priv->dev))
221 dev_priv->lvds_ssc_freq =
222 general->ssc_freq ? 100 : 120;
220 else 223 else
221 dev_priv->lvds_ssc_freq = 224 dev_priv->lvds_ssc_freq =
222 general->ssc_freq ? 100 : 96; 225 general->ssc_freq ? 100 : 96;
@@ -348,20 +351,18 @@ parse_driver_features(struct drm_i915_private *dev_priv,
348 struct drm_device *dev = dev_priv->dev; 351 struct drm_device *dev = dev_priv->dev;
349 struct bdb_driver_features *driver; 352 struct bdb_driver_features *driver;
350 353
351 /* set default for chips without eDP */
352 if (!SUPPORTS_EDP(dev)) {
353 dev_priv->edp_support = 0;
354 return;
355 }
356
357 driver = find_section(bdb, BDB_DRIVER_FEATURES); 354 driver = find_section(bdb, BDB_DRIVER_FEATURES);
358 if (!driver) 355 if (!driver)
359 return; 356 return;
360 357
361 if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP) 358 if (driver && SUPPORTS_EDP(dev) &&
359 driver->lvds_config == BDB_DRIVER_FEATURE_EDP) {
362 dev_priv->edp_support = 1; 360 dev_priv->edp_support = 1;
361 } else {
362 dev_priv->edp_support = 0;
363 }
363 364
364 if (driver->dual_frequency) 365 if (driver && driver->dual_frequency)
365 dev_priv->render_reclock_avail = true; 366 dev_priv->render_reclock_avail = true;
366} 367}
367 368
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 88814fa2dfd2..212e22740fc1 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -179,13 +179,10 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
179{ 179{
180 struct drm_device *dev = connector->dev; 180 struct drm_device *dev = connector->dev;
181 struct drm_i915_private *dev_priv = dev->dev_private; 181 struct drm_i915_private *dev_priv = dev->dev_private;
182 u32 adpa, temp; 182 u32 adpa;
183 bool ret; 183 bool ret;
184 184
185 temp = adpa = I915_READ(PCH_ADPA); 185 adpa = I915_READ(PCH_ADPA);
186
187 adpa &= ~ADPA_DAC_ENABLE;
188 I915_WRITE(PCH_ADPA, adpa);
189 186
190 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 187 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
191 188
@@ -212,8 +209,6 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
212 else 209 else
213 ret = false; 210 ret = false;
214 211
215 /* restore origin register */
216 I915_WRITE(PCH_ADPA, temp);
217 return ret; 212 return ret;
218} 213}
219 214
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 893903962e54..3ba6546b7c7f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,6 +24,8 @@
24 * Eric Anholt <eric@anholt.net> 24 * Eric Anholt <eric@anholt.net>
25 */ 25 */
26 26
27#include <linux/module.h>
28#include <linux/input.h>
27#include <linux/i2c.h> 29#include <linux/i2c.h>
28#include <linux/kernel.h> 30#include <linux/kernel.h>
29#include "drmP.h" 31#include "drmP.h"
@@ -875,7 +877,7 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
875 refclk, best_clock); 877 refclk, best_clock);
876 878
877 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 879 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
878 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 880 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
879 LVDS_CLKB_POWER_UP) 881 LVDS_CLKB_POWER_UP)
880 clock.p2 = limit->p2.p2_fast; 882 clock.p2 = limit->p2.p2_fast;
881 else 883 else
@@ -941,6 +943,7 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
941 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); 943 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
942 clock.p = (clock.p1 * clock.p2); 944 clock.p = (clock.p1 * clock.p2);
943 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; 945 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
946 clock.vco = 0;
944 memcpy(best_clock, &clock, sizeof(intel_clock_t)); 947 memcpy(best_clock, &clock, sizeof(intel_clock_t));
945 return true; 948 return true;
946} 949}
@@ -952,6 +955,241 @@ intel_wait_for_vblank(struct drm_device *dev)
952 mdelay(20); 955 mdelay(20);
953} 956}
954 957
958/* Parameters have changed, update FBC info */
959static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
960{
961 struct drm_device *dev = crtc->dev;
962 struct drm_i915_private *dev_priv = dev->dev_private;
963 struct drm_framebuffer *fb = crtc->fb;
964 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
965 struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private;
966 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
967 int plane, i;
968 u32 fbc_ctl, fbc_ctl2;
969
970 dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
971
972 if (fb->pitch < dev_priv->cfb_pitch)
973 dev_priv->cfb_pitch = fb->pitch;
974
975 /* FBC_CTL wants 64B units */
976 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
977 dev_priv->cfb_fence = obj_priv->fence_reg;
978 dev_priv->cfb_plane = intel_crtc->plane;
979 plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
980
981 /* Clear old tags */
982 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
983 I915_WRITE(FBC_TAG + (i * 4), 0);
984
985 /* Set it up... */
986 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
987 if (obj_priv->tiling_mode != I915_TILING_NONE)
988 fbc_ctl2 |= FBC_CTL_CPU_FENCE;
989 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
990 I915_WRITE(FBC_FENCE_OFF, crtc->y);
991
992 /* enable it... */
993 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
994 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
995 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
996 if (obj_priv->tiling_mode != I915_TILING_NONE)
997 fbc_ctl |= dev_priv->cfb_fence;
998 I915_WRITE(FBC_CONTROL, fbc_ctl);
999
1000 DRM_DEBUG("enabled FBC, pitch %ld, yoff %d, plane %d, ",
1001 dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
1002}
1003
1004void i8xx_disable_fbc(struct drm_device *dev)
1005{
1006 struct drm_i915_private *dev_priv = dev->dev_private;
1007 u32 fbc_ctl;
1008
1009 if (!I915_HAS_FBC(dev))
1010 return;
1011
1012 /* Disable compression */
1013 fbc_ctl = I915_READ(FBC_CONTROL);
1014 fbc_ctl &= ~FBC_CTL_EN;
1015 I915_WRITE(FBC_CONTROL, fbc_ctl);
1016
1017 /* Wait for compressing bit to clear */
1018 while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING)
1019 ; /* nothing */
1020
1021 intel_wait_for_vblank(dev);
1022
1023 DRM_DEBUG("disabled FBC\n");
1024}
1025
1026static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
1027{
1028 struct drm_device *dev = crtc->dev;
1029 struct drm_i915_private *dev_priv = dev->dev_private;
1030
1031 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1032}
1033
1034static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1035{
1036 struct drm_device *dev = crtc->dev;
1037 struct drm_i915_private *dev_priv = dev->dev_private;
1038 struct drm_framebuffer *fb = crtc->fb;
1039 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1040 struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private;
1041 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1042 int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA :
1043 DPFC_CTL_PLANEB);
1044 unsigned long stall_watermark = 200;
1045 u32 dpfc_ctl;
1046
1047 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1048 dev_priv->cfb_fence = obj_priv->fence_reg;
1049 dev_priv->cfb_plane = intel_crtc->plane;
1050
1051 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1052 if (obj_priv->tiling_mode != I915_TILING_NONE) {
1053 dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
1054 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1055 } else {
1056 I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
1057 }
1058
1059 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1060 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1061 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1062 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1063 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1064
1065 /* enable it... */
1066 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1067
1068 DRM_DEBUG("enabled fbc on plane %d\n", intel_crtc->plane);
1069}
1070
1071void g4x_disable_fbc(struct drm_device *dev)
1072{
1073 struct drm_i915_private *dev_priv = dev->dev_private;
1074 u32 dpfc_ctl;
1075
1076 /* Disable compression */
1077 dpfc_ctl = I915_READ(DPFC_CONTROL);
1078 dpfc_ctl &= ~DPFC_CTL_EN;
1079 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1080 intel_wait_for_vblank(dev);
1081
1082 DRM_DEBUG("disabled FBC\n");
1083}
1084
1085static bool g4x_fbc_enabled(struct drm_crtc *crtc)
1086{
1087 struct drm_device *dev = crtc->dev;
1088 struct drm_i915_private *dev_priv = dev->dev_private;
1089
1090 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1091}
1092
1093/**
1094 * intel_update_fbc - enable/disable FBC as needed
1095 * @crtc: CRTC to point the compressor at
1096 * @mode: mode in use
1097 *
1098 * Set up the framebuffer compression hardware at mode set time. We
1099 * enable it if possible:
1100 * - plane A only (on pre-965)
1101 * - no pixel mulitply/line duplication
1102 * - no alpha buffer discard
1103 * - no dual wide
1104 * - framebuffer <= 2048 in width, 1536 in height
1105 *
1106 * We can't assume that any compression will take place (worst case),
1107 * so the compressed buffer has to be the same size as the uncompressed
1108 * one. It also must reside (along with the line length buffer) in
1109 * stolen memory.
1110 *
1111 * We need to enable/disable FBC on a global basis.
1112 */
1113static void intel_update_fbc(struct drm_crtc *crtc,
1114 struct drm_display_mode *mode)
1115{
1116 struct drm_device *dev = crtc->dev;
1117 struct drm_i915_private *dev_priv = dev->dev_private;
1118 struct drm_framebuffer *fb = crtc->fb;
1119 struct intel_framebuffer *intel_fb;
1120 struct drm_i915_gem_object *obj_priv;
1121 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1122 int plane = intel_crtc->plane;
1123
1124 if (!i915_powersave)
1125 return;
1126
1127 if (!dev_priv->display.fbc_enabled ||
1128 !dev_priv->display.enable_fbc ||
1129 !dev_priv->display.disable_fbc)
1130 return;
1131
1132 if (!crtc->fb)
1133 return;
1134
1135 intel_fb = to_intel_framebuffer(fb);
1136 obj_priv = intel_fb->obj->driver_private;
1137
1138 /*
1139 * If FBC is already on, we just have to verify that we can
1140 * keep it that way...
1141 * Need to disable if:
1142 * - changing FBC params (stride, fence, mode)
1143 * - new fb is too large to fit in compressed buffer
1144 * - going to an unsupported config (interlace, pixel multiply, etc.)
1145 */
1146 if (intel_fb->obj->size > dev_priv->cfb_size) {
1147 DRM_DEBUG("framebuffer too large, disabling compression\n");
1148 goto out_disable;
1149 }
1150 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1151 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
1152 DRM_DEBUG("mode incompatible with compression, disabling\n");
1153 goto out_disable;
1154 }
1155 if ((mode->hdisplay > 2048) ||
1156 (mode->vdisplay > 1536)) {
1157 DRM_DEBUG("mode too large for compression, disabling\n");
1158 goto out_disable;
1159 }
1160 if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
1161 DRM_DEBUG("plane not 0, disabling compression\n");
1162 goto out_disable;
1163 }
1164 if (obj_priv->tiling_mode != I915_TILING_X) {
1165 DRM_DEBUG("framebuffer not tiled, disabling compression\n");
1166 goto out_disable;
1167 }
1168
1169 if (dev_priv->display.fbc_enabled(crtc)) {
1170 /* We can re-enable it in this case, but need to update pitch */
1171 if (fb->pitch > dev_priv->cfb_pitch)
1172 dev_priv->display.disable_fbc(dev);
1173 if (obj_priv->fence_reg != dev_priv->cfb_fence)
1174 dev_priv->display.disable_fbc(dev);
1175 if (plane != dev_priv->cfb_plane)
1176 dev_priv->display.disable_fbc(dev);
1177 }
1178
1179 if (!dev_priv->display.fbc_enabled(crtc)) {
1180 /* Now try to turn it back on if possible */
1181 dev_priv->display.enable_fbc(crtc, 500);
1182 }
1183
1184 return;
1185
1186out_disable:
1187 DRM_DEBUG("unsupported config, disabling FBC\n");
1188 /* Multiple disables should be harmless */
1189 if (dev_priv->display.fbc_enabled(crtc))
1190 dev_priv->display.disable_fbc(dev);
1191}
1192
955static int 1193static int
956intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 1194intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
957 struct drm_framebuffer *old_fb) 1195 struct drm_framebuffer *old_fb)
@@ -964,12 +1202,13 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
964 struct drm_i915_gem_object *obj_priv; 1202 struct drm_i915_gem_object *obj_priv;
965 struct drm_gem_object *obj; 1203 struct drm_gem_object *obj;
966 int pipe = intel_crtc->pipe; 1204 int pipe = intel_crtc->pipe;
1205 int plane = intel_crtc->plane;
967 unsigned long Start, Offset; 1206 unsigned long Start, Offset;
968 int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR); 1207 int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
969 int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF); 1208 int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
970 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; 1209 int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
971 int dsptileoff = (pipe == 0 ? DSPATILEOFF : DSPBTILEOFF); 1210 int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
972 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; 1211 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
973 u32 dspcntr, alignment; 1212 u32 dspcntr, alignment;
974 int ret; 1213 int ret;
975 1214
@@ -979,12 +1218,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
979 return 0; 1218 return 0;
980 } 1219 }
981 1220
982 switch (pipe) { 1221 switch (plane) {
983 case 0: 1222 case 0:
984 case 1: 1223 case 1:
985 break; 1224 break;
986 default: 1225 default:
987 DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); 1226 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
988 return -EINVAL; 1227 return -EINVAL;
989 } 1228 }
990 1229
@@ -1022,9 +1261,11 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1022 return ret; 1261 return ret;
1023 } 1262 }
1024 1263
1025 /* Pre-i965 needs to install a fence for tiled scan-out */ 1264 /* Install a fence for tiled scan-out. Pre-i965 always needs a fence,
1026 if (!IS_I965G(dev) && 1265 * whereas 965+ only requires a fence if using framebuffer compression.
1027 obj_priv->fence_reg == I915_FENCE_REG_NONE && 1266 * For simplicity, we always install a fence as the cost is not that onerous.
1267 */
1268 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
1028 obj_priv->tiling_mode != I915_TILING_NONE) { 1269 obj_priv->tiling_mode != I915_TILING_NONE) {
1029 ret = i915_gem_object_get_fence_reg(obj); 1270 ret = i915_gem_object_get_fence_reg(obj);
1030 if (ret != 0) { 1271 if (ret != 0) {
@@ -1086,6 +1327,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1086 I915_READ(dspbase); 1327 I915_READ(dspbase);
1087 } 1328 }
1088 1329
1330 if ((IS_I965G(dev) || plane == 0))
1331 intel_update_fbc(crtc, &crtc->mode);
1332
1089 intel_wait_for_vblank(dev); 1333 intel_wait_for_vblank(dev);
1090 1334
1091 if (old_fb) { 1335 if (old_fb) {
@@ -1217,6 +1461,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1217 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; 1461 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
1218 int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; 1462 int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
1219 int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; 1463 int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
1464 int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS;
1220 int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; 1465 int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
1221 int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; 1466 int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
1222 int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; 1467 int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
@@ -1268,6 +1513,19 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1268 } 1513 }
1269 } 1514 }
1270 1515
1516 /* Enable panel fitting for LVDS */
1517 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
1518 temp = I915_READ(pf_ctl_reg);
1519 I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
1520
1521 /* currently full aspect */
1522 I915_WRITE(pf_win_pos, 0);
1523
1524 I915_WRITE(pf_win_size,
1525 (dev_priv->panel_fixed_mode->hdisplay << 16) |
1526 (dev_priv->panel_fixed_mode->vdisplay));
1527 }
1528
1271 /* Enable CPU pipe */ 1529 /* Enable CPU pipe */
1272 temp = I915_READ(pipeconf_reg); 1530 temp = I915_READ(pipeconf_reg);
1273 if ((temp & PIPEACONF_ENABLE) == 0) { 1531 if ((temp & PIPEACONF_ENABLE) == 0) {
@@ -1532,9 +1790,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
1532 struct drm_i915_private *dev_priv = dev->dev_private; 1790 struct drm_i915_private *dev_priv = dev->dev_private;
1533 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1791 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1534 int pipe = intel_crtc->pipe; 1792 int pipe = intel_crtc->pipe;
1793 int plane = intel_crtc->plane;
1535 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 1794 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
1536 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; 1795 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
1537 int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR; 1796 int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
1538 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; 1797 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
1539 u32 temp; 1798 u32 temp;
1540 1799
@@ -1545,6 +1804,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
1545 case DRM_MODE_DPMS_ON: 1804 case DRM_MODE_DPMS_ON:
1546 case DRM_MODE_DPMS_STANDBY: 1805 case DRM_MODE_DPMS_STANDBY:
1547 case DRM_MODE_DPMS_SUSPEND: 1806 case DRM_MODE_DPMS_SUSPEND:
1807 intel_update_watermarks(dev);
1808
1548 /* Enable the DPLL */ 1809 /* Enable the DPLL */
1549 temp = I915_READ(dpll_reg); 1810 temp = I915_READ(dpll_reg);
1550 if ((temp & DPLL_VCO_ENABLE) == 0) { 1811 if ((temp & DPLL_VCO_ENABLE) == 0) {
@@ -1577,15 +1838,21 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
1577 1838
1578 intel_crtc_load_lut(crtc); 1839 intel_crtc_load_lut(crtc);
1579 1840
1841 if ((IS_I965G(dev) || plane == 0))
1842 intel_update_fbc(crtc, &crtc->mode);
1843
1580 /* Give the overlay scaler a chance to enable if it's on this pipe */ 1844 /* Give the overlay scaler a chance to enable if it's on this pipe */
1581 //intel_crtc_dpms_video(crtc, true); TODO 1845 //intel_crtc_dpms_video(crtc, true); TODO
1582 intel_update_watermarks(dev);
1583 break; 1846 break;
1584 case DRM_MODE_DPMS_OFF: 1847 case DRM_MODE_DPMS_OFF:
1585 intel_update_watermarks(dev); 1848 intel_update_watermarks(dev);
1586 /* Give the overlay scaler a chance to disable if it's on this pipe */ 1849 /* Give the overlay scaler a chance to disable if it's on this pipe */
1587 //intel_crtc_dpms_video(crtc, FALSE); TODO 1850 //intel_crtc_dpms_video(crtc, FALSE); TODO
1588 1851
1852 if (dev_priv->cfb_plane == plane &&
1853 dev_priv->display.disable_fbc)
1854 dev_priv->display.disable_fbc(dev);
1855
1589 /* Disable the VGA plane that we never use */ 1856 /* Disable the VGA plane that we never use */
1590 i915_disable_vga(dev); 1857 i915_disable_vga(dev);
1591 1858
@@ -1634,15 +1901,13 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
1634static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) 1901static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
1635{ 1902{
1636 struct drm_device *dev = crtc->dev; 1903 struct drm_device *dev = crtc->dev;
1904 struct drm_i915_private *dev_priv = dev->dev_private;
1637 struct drm_i915_master_private *master_priv; 1905 struct drm_i915_master_private *master_priv;
1638 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1906 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1639 int pipe = intel_crtc->pipe; 1907 int pipe = intel_crtc->pipe;
1640 bool enabled; 1908 bool enabled;
1641 1909
1642 if (IS_IGDNG(dev)) 1910 dev_priv->display.dpms(crtc, mode);
1643 igdng_crtc_dpms(crtc, mode);
1644 else
1645 i9xx_crtc_dpms(crtc, mode);
1646 1911
1647 intel_crtc->dpms_mode = mode; 1912 intel_crtc->dpms_mode = mode;
1648 1913
@@ -1709,56 +1974,68 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
1709 return true; 1974 return true;
1710} 1975}
1711 1976
1977static int i945_get_display_clock_speed(struct drm_device *dev)
1978{
1979 return 400000;
1980}
1981
1982static int i915_get_display_clock_speed(struct drm_device *dev)
1983{
1984 return 333000;
1985}
1712 1986
1713/** Returns the core display clock speed for i830 - i945 */ 1987static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
1714static int intel_get_core_clock_speed(struct drm_device *dev)
1715{ 1988{
1989 return 200000;
1990}
1716 1991
1717 /* Core clock values taken from the published datasheets. 1992static int i915gm_get_display_clock_speed(struct drm_device *dev)
1718 * The 830 may go up to 166 Mhz, which we should check. 1993{
1719 */ 1994 u16 gcfgc = 0;
1720 if (IS_I945G(dev))
1721 return 400000;
1722 else if (IS_I915G(dev))
1723 return 333000;
1724 else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
1725 return 200000;
1726 else if (IS_I915GM(dev)) {
1727 u16 gcfgc = 0;
1728 1995
1729 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 1996 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
1730 1997
1731 if (gcfgc & GC_LOW_FREQUENCY_ENABLE) 1998 if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
1732 return 133000; 1999 return 133000;
1733 else { 2000 else {
1734 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 2001 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
1735 case GC_DISPLAY_CLOCK_333_MHZ: 2002 case GC_DISPLAY_CLOCK_333_MHZ:
1736 return 333000; 2003 return 333000;
1737 default: 2004 default:
1738 case GC_DISPLAY_CLOCK_190_200_MHZ: 2005 case GC_DISPLAY_CLOCK_190_200_MHZ:
1739 return 190000; 2006 return 190000;
1740 }
1741 }
1742 } else if (IS_I865G(dev))
1743 return 266000;
1744 else if (IS_I855(dev)) {
1745 u16 hpllcc = 0;
1746 /* Assume that the hardware is in the high speed state. This
1747 * should be the default.
1748 */
1749 switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
1750 case GC_CLOCK_133_200:
1751 case GC_CLOCK_100_200:
1752 return 200000;
1753 case GC_CLOCK_166_250:
1754 return 250000;
1755 case GC_CLOCK_100_133:
1756 return 133000;
1757 } 2007 }
1758 } else /* 852, 830 */ 2008 }
2009}
2010
2011static int i865_get_display_clock_speed(struct drm_device *dev)
2012{
2013 return 266000;
2014}
2015
2016static int i855_get_display_clock_speed(struct drm_device *dev)
2017{
2018 u16 hpllcc = 0;
2019 /* Assume that the hardware is in the high speed state. This
2020 * should be the default.
2021 */
2022 switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
2023 case GC_CLOCK_133_200:
2024 case GC_CLOCK_100_200:
2025 return 200000;
2026 case GC_CLOCK_166_250:
2027 return 250000;
2028 case GC_CLOCK_100_133:
1759 return 133000; 2029 return 133000;
2030 }
2031
2032 /* Shouldn't happen */
2033 return 0;
2034}
1760 2035
1761 return 0; /* Silence gcc warning */ 2036static int i830_get_display_clock_speed(struct drm_device *dev)
2037{
2038 return 133000;
1762} 2039}
1763 2040
1764/** 2041/**
@@ -1809,7 +2086,7 @@ fdi_reduce_ratio(u32 *num, u32 *den)
1809#define LINK_N 0x80000 2086#define LINK_N 0x80000
1810 2087
1811static void 2088static void
1812igdng_compute_m_n(int bytes_per_pixel, int nlanes, 2089igdng_compute_m_n(int bits_per_pixel, int nlanes,
1813 int pixel_clock, int link_clock, 2090 int pixel_clock, int link_clock,
1814 struct fdi_m_n *m_n) 2091 struct fdi_m_n *m_n)
1815{ 2092{
@@ -1819,7 +2096,8 @@ igdng_compute_m_n(int bytes_per_pixel, int nlanes,
1819 2096
1820 temp = (u64) DATA_N * pixel_clock; 2097 temp = (u64) DATA_N * pixel_clock;
1821 temp = div_u64(temp, link_clock); 2098 temp = div_u64(temp, link_clock);
1822 m_n->gmch_m = div_u64(temp * bytes_per_pixel, nlanes); 2099 m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes);
2100 m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */
1823 m_n->gmch_n = DATA_N; 2101 m_n->gmch_n = DATA_N;
1824 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 2102 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
1825 2103
@@ -1867,6 +2145,13 @@ static struct intel_watermark_params igd_cursor_hplloff_wm = {
1867 IGD_CURSOR_GUARD_WM, 2145 IGD_CURSOR_GUARD_WM,
1868 IGD_FIFO_LINE_SIZE 2146 IGD_FIFO_LINE_SIZE
1869}; 2147};
2148static struct intel_watermark_params g4x_wm_info = {
2149 G4X_FIFO_SIZE,
2150 G4X_MAX_WM,
2151 G4X_MAX_WM,
2152 2,
2153 G4X_FIFO_LINE_SIZE,
2154};
1870static struct intel_watermark_params i945_wm_info = { 2155static struct intel_watermark_params i945_wm_info = {
1871 I945_FIFO_SIZE, 2156 I945_FIFO_SIZE,
1872 I915_MAX_WM, 2157 I915_MAX_WM,
@@ -1921,7 +2206,14 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
1921{ 2206{
1922 long entries_required, wm_size; 2207 long entries_required, wm_size;
1923 2208
1924 entries_required = (clock_in_khz * pixel_size * latency_ns) / 1000000; 2209 /*
2210 * Note: we need to make sure we don't overflow for various clock &
2211 * latency values.
2212 * clocks go from a few thousand to several hundred thousand.
2213 * latency is usually a few thousand
2214 */
2215 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
2216 1000;
1925 entries_required /= wm->cacheline_size; 2217 entries_required /= wm->cacheline_size;
1926 2218
1927 DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required); 2219 DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required);
@@ -1986,14 +2278,13 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
1986 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { 2278 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
1987 latency = &cxsr_latency_table[i]; 2279 latency = &cxsr_latency_table[i];
1988 if (is_desktop == latency->is_desktop && 2280 if (is_desktop == latency->is_desktop &&
1989 fsb == latency->fsb_freq && mem == latency->mem_freq) 2281 fsb == latency->fsb_freq && mem == latency->mem_freq)
1990 break; 2282 return latency;
1991 } 2283 }
1992 if (i >= ARRAY_SIZE(cxsr_latency_table)) { 2284
1993 DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); 2285 DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
1994 return NULL; 2286
1995 } 2287 return NULL;
1996 return latency;
1997} 2288}
1998 2289
1999static void igd_disable_cxsr(struct drm_device *dev) 2290static void igd_disable_cxsr(struct drm_device *dev)
@@ -2084,32 +2375,17 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
2084 */ 2375 */
2085const static int latency_ns = 5000; 2376const static int latency_ns = 5000;
2086 2377
2087static int intel_get_fifo_size(struct drm_device *dev, int plane) 2378static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
2088{ 2379{
2089 struct drm_i915_private *dev_priv = dev->dev_private; 2380 struct drm_i915_private *dev_priv = dev->dev_private;
2090 uint32_t dsparb = I915_READ(DSPARB); 2381 uint32_t dsparb = I915_READ(DSPARB);
2091 int size; 2382 int size;
2092 2383
2093 if (IS_I9XX(dev)) { 2384 if (plane == 0)
2094 if (plane == 0)
2095 size = dsparb & 0x7f;
2096 else
2097 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
2098 (dsparb & 0x7f);
2099 } else if (IS_I85X(dev)) {
2100 if (plane == 0)
2101 size = dsparb & 0x1ff;
2102 else
2103 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) -
2104 (dsparb & 0x1ff);
2105 size >>= 1; /* Convert to cachelines */
2106 } else if (IS_845G(dev)) {
2107 size = dsparb & 0x7f; 2385 size = dsparb & 0x7f;
2108 size >>= 2; /* Convert to cachelines */ 2386 else
2109 } else { 2387 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
2110 size = dsparb & 0x7f; 2388 (dsparb & 0x7f);
2111 size >>= 1; /* Convert to cachelines */
2112 }
2113 2389
2114 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", 2390 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
2115 size); 2391 size);
@@ -2117,19 +2393,127 @@ static int intel_get_fifo_size(struct drm_device *dev, int plane)
2117 return size; 2393 return size;
2118} 2394}
2119 2395
2120static void g4x_update_wm(struct drm_device *dev) 2396static int i85x_get_fifo_size(struct drm_device *dev, int plane)
2121{ 2397{
2122 struct drm_i915_private *dev_priv = dev->dev_private; 2398 struct drm_i915_private *dev_priv = dev->dev_private;
2123 u32 fw_blc_self = I915_READ(FW_BLC_SELF); 2399 uint32_t dsparb = I915_READ(DSPARB);
2400 int size;
2124 2401
2125 if (i915_powersave) 2402 if (plane == 0)
2126 fw_blc_self |= FW_BLC_SELF_EN; 2403 size = dsparb & 0x1ff;
2127 else 2404 else
2128 fw_blc_self &= ~FW_BLC_SELF_EN; 2405 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) -
2129 I915_WRITE(FW_BLC_SELF, fw_blc_self); 2406 (dsparb & 0x1ff);
2407 size >>= 1; /* Convert to cachelines */
2408
2409 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
2410 size);
2411
2412 return size;
2413}
2414
2415static int i845_get_fifo_size(struct drm_device *dev, int plane)
2416{
2417 struct drm_i915_private *dev_priv = dev->dev_private;
2418 uint32_t dsparb = I915_READ(DSPARB);
2419 int size;
2420
2421 size = dsparb & 0x7f;
2422 size >>= 2; /* Convert to cachelines */
2423
2424 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
2425 size);
2426
2427 return size;
2428}
2429
2430static int i830_get_fifo_size(struct drm_device *dev, int plane)
2431{
2432 struct drm_i915_private *dev_priv = dev->dev_private;
2433 uint32_t dsparb = I915_READ(DSPARB);
2434 int size;
2435
2436 size = dsparb & 0x7f;
2437 size >>= 1; /* Convert to cachelines */
2438
2439 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
2440 size);
2441
2442 return size;
2130} 2443}
2131 2444
2132static void i965_update_wm(struct drm_device *dev) 2445static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2446 int planeb_clock, int sr_hdisplay, int pixel_size)
2447{
2448 struct drm_i915_private *dev_priv = dev->dev_private;
2449 int total_size, cacheline_size;
2450 int planea_wm, planeb_wm, cursora_wm, cursorb_wm, cursor_sr;
2451 struct intel_watermark_params planea_params, planeb_params;
2452 unsigned long line_time_us;
2453 int sr_clock, sr_entries = 0, entries_required;
2454
2455 /* Create copies of the base settings for each pipe */
2456 planea_params = planeb_params = g4x_wm_info;
2457
2458 /* Grab a couple of global values before we overwrite them */
2459 total_size = planea_params.fifo_size;
2460 cacheline_size = planea_params.cacheline_size;
2461
2462 /*
2463 * Note: we need to make sure we don't overflow for various clock &
2464 * latency values.
2465 * clocks go from a few thousand to several hundred thousand.
2466 * latency is usually a few thousand
2467 */
2468 entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) /
2469 1000;
2470 entries_required /= G4X_FIFO_LINE_SIZE;
2471 planea_wm = entries_required + planea_params.guard_size;
2472
2473 entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) /
2474 1000;
2475 entries_required /= G4X_FIFO_LINE_SIZE;
2476 planeb_wm = entries_required + planeb_params.guard_size;
2477
2478 cursora_wm = cursorb_wm = 16;
2479 cursor_sr = 32;
2480
2481 DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
2482
2483 /* Calc sr entries for one plane configs */
2484 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
2485 /* self-refresh has much higher latency */
2486 const static int sr_latency_ns = 12000;
2487
2488 sr_clock = planea_clock ? planea_clock : planeb_clock;
2489 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
2490
2491 /* Use ns/us then divide to preserve precision */
2492 sr_entries = (((sr_latency_ns / line_time_us) + 1) *
2493 pixel_size * sr_hdisplay) / 1000;
2494 sr_entries = roundup(sr_entries / cacheline_size, 1);
2495 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
2496 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2497 }
2498
2499 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
2500 planea_wm, planeb_wm, sr_entries);
2501
2502 planea_wm &= 0x3f;
2503 planeb_wm &= 0x3f;
2504
2505 I915_WRITE(DSPFW1, (sr_entries << DSPFW_SR_SHIFT) |
2506 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
2507 (planeb_wm << DSPFW_PLANEB_SHIFT) | planea_wm);
2508 I915_WRITE(DSPFW2, (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
2509 (cursora_wm << DSPFW_CURSORA_SHIFT));
2510 /* HPLL off in SR has some issues on G4x... disable it */
2511 I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
2512 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
2513}
2514
2515static void i965_update_wm(struct drm_device *dev, int unused, int unused2,
2516 int unused3, int unused4)
2133{ 2517{
2134 struct drm_i915_private *dev_priv = dev->dev_private; 2518 struct drm_i915_private *dev_priv = dev->dev_private;
2135 2519
@@ -2165,8 +2549,8 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2165 cacheline_size = planea_params.cacheline_size; 2549 cacheline_size = planea_params.cacheline_size;
2166 2550
2167 /* Update per-plane FIFO sizes */ 2551 /* Update per-plane FIFO sizes */
2168 planea_params.fifo_size = intel_get_fifo_size(dev, 0); 2552 planea_params.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
2169 planeb_params.fifo_size = intel_get_fifo_size(dev, 1); 2553 planeb_params.fifo_size = dev_priv->display.get_fifo_size(dev, 1);
2170 2554
2171 planea_wm = intel_calculate_wm(planea_clock, &planea_params, 2555 planea_wm = intel_calculate_wm(planea_clock, &planea_params,
2172 pixel_size, latency_ns); 2556 pixel_size, latency_ns);
@@ -2213,14 +2597,14 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2213 I915_WRITE(FW_BLC2, fwater_hi); 2597 I915_WRITE(FW_BLC2, fwater_hi);
2214} 2598}
2215 2599
2216static void i830_update_wm(struct drm_device *dev, int planea_clock, 2600static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
2217 int pixel_size) 2601 int unused2, int pixel_size)
2218{ 2602{
2219 struct drm_i915_private *dev_priv = dev->dev_private; 2603 struct drm_i915_private *dev_priv = dev->dev_private;
2220 uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; 2604 uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff;
2221 int planea_wm; 2605 int planea_wm;
2222 2606
2223 i830_wm_info.fifo_size = intel_get_fifo_size(dev, 0); 2607 i830_wm_info.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
2224 2608
2225 planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info, 2609 planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info,
2226 pixel_size, latency_ns); 2610 pixel_size, latency_ns);
@@ -2264,12 +2648,16 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock,
2264 */ 2648 */
2265static void intel_update_watermarks(struct drm_device *dev) 2649static void intel_update_watermarks(struct drm_device *dev)
2266{ 2650{
2651 struct drm_i915_private *dev_priv = dev->dev_private;
2267 struct drm_crtc *crtc; 2652 struct drm_crtc *crtc;
2268 struct intel_crtc *intel_crtc; 2653 struct intel_crtc *intel_crtc;
2269 int sr_hdisplay = 0; 2654 int sr_hdisplay = 0;
2270 unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; 2655 unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
2271 int enabled = 0, pixel_size = 0; 2656 int enabled = 0, pixel_size = 0;
2272 2657
2658 if (!dev_priv->display.update_wm)
2659 return;
2660
2273 /* Get the clock config from both planes */ 2661 /* Get the clock config from both planes */
2274 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2662 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2275 intel_crtc = to_intel_crtc(crtc); 2663 intel_crtc = to_intel_crtc(crtc);
@@ -2302,15 +2690,8 @@ static void intel_update_watermarks(struct drm_device *dev)
2302 else if (IS_IGD(dev)) 2690 else if (IS_IGD(dev))
2303 igd_disable_cxsr(dev); 2691 igd_disable_cxsr(dev);
2304 2692
2305 if (IS_G4X(dev)) 2693 dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
2306 g4x_update_wm(dev); 2694 sr_hdisplay, pixel_size);
2307 else if (IS_I965G(dev))
2308 i965_update_wm(dev);
2309 else if (IS_I9XX(dev) || IS_MOBILE(dev))
2310 i9xx_update_wm(dev, planea_clock, planeb_clock, sr_hdisplay,
2311 pixel_size);
2312 else
2313 i830_update_wm(dev, planea_clock, pixel_size);
2314} 2695}
2315 2696
2316static int intel_crtc_mode_set(struct drm_crtc *crtc, 2697static int intel_crtc_mode_set(struct drm_crtc *crtc,
@@ -2323,10 +2704,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2323 struct drm_i915_private *dev_priv = dev->dev_private; 2704 struct drm_i915_private *dev_priv = dev->dev_private;
2324 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2705 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2325 int pipe = intel_crtc->pipe; 2706 int pipe = intel_crtc->pipe;
2707 int plane = intel_crtc->plane;
2326 int fp_reg = (pipe == 0) ? FPA0 : FPB0; 2708 int fp_reg = (pipe == 0) ? FPA0 : FPB0;
2327 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 2709 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
2328 int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD; 2710 int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
2329 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; 2711 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
2330 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; 2712 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
2331 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; 2713 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
2332 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; 2714 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
@@ -2334,8 +2716,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2334 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; 2716 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
2335 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; 2717 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
2336 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; 2718 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
2337 int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; 2719 int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
2338 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; 2720 int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
2339 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; 2721 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
2340 int refclk, num_outputs = 0; 2722 int refclk, num_outputs = 0;
2341 intel_clock_t clock, reduced_clock; 2723 intel_clock_t clock, reduced_clock;
@@ -2453,7 +2835,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2453 2835
2454 /* FDI link */ 2836 /* FDI link */
2455 if (IS_IGDNG(dev)) { 2837 if (IS_IGDNG(dev)) {
2456 int lane, link_bw; 2838 int lane, link_bw, bpp;
2457 /* eDP doesn't require FDI link, so just set DP M/N 2839 /* eDP doesn't require FDI link, so just set DP M/N
2458 according to current link config */ 2840 according to current link config */
2459 if (is_edp) { 2841 if (is_edp) {
@@ -2472,10 +2854,72 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2472 lane = 4; 2854 lane = 4;
2473 link_bw = 270000; 2855 link_bw = 270000;
2474 } 2856 }
2475 igdng_compute_m_n(3, lane, target_clock, 2857
2858 /* determine panel color depth */
2859 temp = I915_READ(pipeconf_reg);
2860
2861 switch (temp & PIPE_BPC_MASK) {
2862 case PIPE_8BPC:
2863 bpp = 24;
2864 break;
2865 case PIPE_10BPC:
2866 bpp = 30;
2867 break;
2868 case PIPE_6BPC:
2869 bpp = 18;
2870 break;
2871 case PIPE_12BPC:
2872 bpp = 36;
2873 break;
2874 default:
2875 DRM_ERROR("unknown pipe bpc value\n");
2876 bpp = 24;
2877 }
2878
2879 igdng_compute_m_n(bpp, lane, target_clock,
2476 link_bw, &m_n); 2880 link_bw, &m_n);
2477 } 2881 }
2478 2882
2883 /* Ironlake: try to setup display ref clock before DPLL
2884 * enabling. This is only under driver's control after
2885 * PCH B stepping, previous chipset stepping should be
2886 * ignoring this setting.
2887 */
2888 if (IS_IGDNG(dev)) {
2889 temp = I915_READ(PCH_DREF_CONTROL);
2890 /* Always enable nonspread source */
2891 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
2892 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
2893 I915_WRITE(PCH_DREF_CONTROL, temp);
2894 POSTING_READ(PCH_DREF_CONTROL);
2895
2896 temp &= ~DREF_SSC_SOURCE_MASK;
2897 temp |= DREF_SSC_SOURCE_ENABLE;
2898 I915_WRITE(PCH_DREF_CONTROL, temp);
2899 POSTING_READ(PCH_DREF_CONTROL);
2900
2901 udelay(200);
2902
2903 if (is_edp) {
2904 if (dev_priv->lvds_use_ssc) {
2905 temp |= DREF_SSC1_ENABLE;
2906 I915_WRITE(PCH_DREF_CONTROL, temp);
2907 POSTING_READ(PCH_DREF_CONTROL);
2908
2909 udelay(200);
2910
2911 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
2912 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
2913 I915_WRITE(PCH_DREF_CONTROL, temp);
2914 POSTING_READ(PCH_DREF_CONTROL);
2915 } else {
2916 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
2917 I915_WRITE(PCH_DREF_CONTROL, temp);
2918 POSTING_READ(PCH_DREF_CONTROL);
2919 }
2920 }
2921 }
2922
2479 if (IS_IGD(dev)) { 2923 if (IS_IGD(dev)) {
2480 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; 2924 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
2481 if (has_reduced_clock) 2925 if (has_reduced_clock)
@@ -2568,7 +3012,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2568 enable color space conversion */ 3012 enable color space conversion */
2569 if (!IS_IGDNG(dev)) { 3013 if (!IS_IGDNG(dev)) {
2570 if (pipe == 0) 3014 if (pipe == 0)
2571 dspcntr |= DISPPLANE_SEL_PIPE_A; 3015 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
2572 else 3016 else
2573 dspcntr |= DISPPLANE_SEL_PIPE_B; 3017 dspcntr |= DISPPLANE_SEL_PIPE_B;
2574 } 3018 }
@@ -2580,7 +3024,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2580 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the 3024 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
2581 * pipe == 0 check? 3025 * pipe == 0 check?
2582 */ 3026 */
2583 if (mode->clock > intel_get_core_clock_speed(dev) * 9 / 10) 3027 if (mode->clock >
3028 dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
2584 pipeconf |= PIPEACONF_DOUBLE_WIDE; 3029 pipeconf |= PIPEACONF_DOUBLE_WIDE;
2585 else 3030 else
2586 pipeconf &= ~PIPEACONF_DOUBLE_WIDE; 3031 pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
@@ -2625,6 +3070,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2625 3070
2626 lvds = I915_READ(lvds_reg); 3071 lvds = I915_READ(lvds_reg);
2627 lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT; 3072 lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
3073 /* set the corresponsding LVDS_BORDER bit */
3074 lvds |= dev_priv->lvds_border_bits;
2628 /* Set the B0-B3 data pairs corresponding to whether we're going to 3075 /* Set the B0-B3 data pairs corresponding to whether we're going to
2629 * set the DPLLs for dual-channel mode or not. 3076 * set the DPLLs for dual-channel mode or not.
2630 */ 3077 */
@@ -2652,9 +3099,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2652 udelay(150); 3099 udelay(150);
2653 3100
2654 if (IS_I965G(dev) && !IS_IGDNG(dev)) { 3101 if (IS_I965G(dev) && !IS_IGDNG(dev)) {
2655 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 3102 if (is_sdvo) {
2656 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | 3103 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
3104 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
2657 ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); 3105 ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
3106 } else
3107 I915_WRITE(dpll_md_reg, 0);
2658 } else { 3108 } else {
2659 /* write it again -- the BIOS does, after all */ 3109 /* write it again -- the BIOS does, after all */
2660 I915_WRITE(dpll_reg, dpll); 3110 I915_WRITE(dpll_reg, dpll);
@@ -2734,6 +3184,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2734 /* Flush the plane changes */ 3184 /* Flush the plane changes */
2735 ret = intel_pipe_set_base(crtc, x, y, old_fb); 3185 ret = intel_pipe_set_base(crtc, x, y, old_fb);
2736 3186
3187 if ((IS_I965G(dev) || plane == 0))
3188 intel_update_fbc(crtc, &crtc->mode);
3189
2737 intel_update_watermarks(dev); 3190 intel_update_watermarks(dev);
2738 3191
2739 drm_vblank_post_modeset(dev, pipe); 3192 drm_vblank_post_modeset(dev, pipe);
@@ -2863,6 +3316,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
2863 i915_gem_object_unpin(intel_crtc->cursor_bo); 3316 i915_gem_object_unpin(intel_crtc->cursor_bo);
2864 drm_gem_object_unreference(intel_crtc->cursor_bo); 3317 drm_gem_object_unreference(intel_crtc->cursor_bo);
2865 } 3318 }
3319
2866 mutex_unlock(&dev->struct_mutex); 3320 mutex_unlock(&dev->struct_mutex);
2867 3321
2868 intel_crtc->cursor_addr = addr; 3322 intel_crtc->cursor_addr = addr;
@@ -3555,6 +4009,14 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
3555 intel_crtc->lut_b[i] = i; 4009 intel_crtc->lut_b[i] = i;
3556 } 4010 }
3557 4011
4012 /* Swap pipes & planes for FBC on pre-965 */
4013 intel_crtc->pipe = pipe;
4014 intel_crtc->plane = pipe;
4015 if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
4016 DRM_DEBUG("swapping pipes & planes for FBC\n");
4017 intel_crtc->plane = ((pipe == 0) ? 1 : 0);
4018 }
4019
3558 intel_crtc->cursor_addr = 0; 4020 intel_crtc->cursor_addr = 0;
3559 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; 4021 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
3560 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 4022 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
@@ -3798,7 +4260,9 @@ void intel_init_clock_gating(struct drm_device *dev)
3798 * Disable clock gating reported to work incorrectly according to the 4260 * Disable clock gating reported to work incorrectly according to the
3799 * specs, but enable as much else as we can. 4261 * specs, but enable as much else as we can.
3800 */ 4262 */
3801 if (IS_G4X(dev)) { 4263 if (IS_IGDNG(dev)) {
4264 return;
4265 } else if (IS_G4X(dev)) {
3802 uint32_t dspclk_gate; 4266 uint32_t dspclk_gate;
3803 I915_WRITE(RENCLK_GATE_D1, 0); 4267 I915_WRITE(RENCLK_GATE_D1, 0);
3804 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | 4268 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
@@ -3837,6 +4301,75 @@ void intel_init_clock_gating(struct drm_device *dev)
3837 } 4301 }
3838} 4302}
3839 4303
4304/* Set up chip specific display functions */
4305static void intel_init_display(struct drm_device *dev)
4306{
4307 struct drm_i915_private *dev_priv = dev->dev_private;
4308
4309 /* We always want a DPMS function */
4310 if (IS_IGDNG(dev))
4311 dev_priv->display.dpms = igdng_crtc_dpms;
4312 else
4313 dev_priv->display.dpms = i9xx_crtc_dpms;
4314
4315 /* Only mobile has FBC, leave pointers NULL for other chips */
4316 if (IS_MOBILE(dev)) {
4317 if (IS_GM45(dev)) {
4318 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
4319 dev_priv->display.enable_fbc = g4x_enable_fbc;
4320 dev_priv->display.disable_fbc = g4x_disable_fbc;
4321 } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) {
4322 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
4323 dev_priv->display.enable_fbc = i8xx_enable_fbc;
4324 dev_priv->display.disable_fbc = i8xx_disable_fbc;
4325 }
4326 /* 855GM needs testing */
4327 }
4328
4329 /* Returns the core display clock speed */
4330 if (IS_I945G(dev))
4331 dev_priv->display.get_display_clock_speed =
4332 i945_get_display_clock_speed;
4333 else if (IS_I915G(dev))
4334 dev_priv->display.get_display_clock_speed =
4335 i915_get_display_clock_speed;
4336 else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
4337 dev_priv->display.get_display_clock_speed =
4338 i9xx_misc_get_display_clock_speed;
4339 else if (IS_I915GM(dev))
4340 dev_priv->display.get_display_clock_speed =
4341 i915gm_get_display_clock_speed;
4342 else if (IS_I865G(dev))
4343 dev_priv->display.get_display_clock_speed =
4344 i865_get_display_clock_speed;
4345 else if (IS_I855(dev))
4346 dev_priv->display.get_display_clock_speed =
4347 i855_get_display_clock_speed;
4348 else /* 852, 830 */
4349 dev_priv->display.get_display_clock_speed =
4350 i830_get_display_clock_speed;
4351
4352 /* For FIFO watermark updates */
4353 if (IS_IGDNG(dev))
4354 dev_priv->display.update_wm = NULL;
4355 else if (IS_G4X(dev))
4356 dev_priv->display.update_wm = g4x_update_wm;
4357 else if (IS_I965G(dev))
4358 dev_priv->display.update_wm = i965_update_wm;
4359 else if (IS_I9XX(dev) || IS_MOBILE(dev)) {
4360 dev_priv->display.update_wm = i9xx_update_wm;
4361 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
4362 } else {
4363 if (IS_I85X(dev))
4364 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
4365 else if (IS_845G(dev))
4366 dev_priv->display.get_fifo_size = i845_get_fifo_size;
4367 else
4368 dev_priv->display.get_fifo_size = i830_get_fifo_size;
4369 dev_priv->display.update_wm = i830_update_wm;
4370 }
4371}
4372
3840void intel_modeset_init(struct drm_device *dev) 4373void intel_modeset_init(struct drm_device *dev)
3841{ 4374{
3842 struct drm_i915_private *dev_priv = dev->dev_private; 4375 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3850,6 +4383,8 @@ void intel_modeset_init(struct drm_device *dev)
3850 4383
3851 dev->mode_config.funcs = (void *)&intel_mode_funcs; 4384 dev->mode_config.funcs = (void *)&intel_mode_funcs;
3852 4385
4386 intel_init_display(dev);
4387
3853 if (IS_I965G(dev)) { 4388 if (IS_I965G(dev)) {
3854 dev->mode_config.max_width = 8192; 4389 dev->mode_config.max_width = 8192;
3855 dev->mode_config.max_height = 8192; 4390 dev->mode_config.max_height = 8192;
@@ -3915,6 +4450,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
3915 4450
3916 mutex_unlock(&dev->struct_mutex); 4451 mutex_unlock(&dev->struct_mutex);
3917 4452
4453 if (dev_priv->display.disable_fbc)
4454 dev_priv->display.disable_fbc(dev);
4455
3918 drm_mode_config_cleanup(dev); 4456 drm_mode_config_cleanup(dev);
3919} 4457}
3920 4458
@@ -3928,3 +4466,20 @@ struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
3928 4466
3929 return &intel_output->enc; 4467 return &intel_output->enc;
3930} 4468}
4469
4470/*
4471 * set vga decode state - true == enable VGA decode
4472 */
4473int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
4474{
4475 struct drm_i915_private *dev_priv = dev->dev_private;
4476 u16 gmch_ctrl;
4477
4478 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
4479 if (state)
4480 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
4481 else
4482 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
4483 pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
4484 return 0;
4485}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f2afc4af4bc9..d83447557f9b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -232,7 +232,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
232 for (try = 0; try < 5; try++) { 232 for (try = 0; try < 5; try++) {
233 /* Load the send data into the aux channel data registers */ 233 /* Load the send data into the aux channel data registers */
234 for (i = 0; i < send_bytes; i += 4) { 234 for (i = 0; i < send_bytes; i += 4) {
235 uint32_t d = pack_aux(send + i, send_bytes - i);; 235 uint32_t d = pack_aux(send + i, send_bytes - i);
236 236
237 I915_WRITE(ch_data + i, d); 237 I915_WRITE(ch_data + i, d);
238 } 238 }
@@ -400,7 +400,7 @@ intel_dp_i2c_init(struct intel_output *intel_output, const char *name)
400{ 400{
401 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 401 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
402 402
403 DRM_ERROR("i2c_init %s\n", name); 403 DRM_DEBUG_KMS("i2c_init %s\n", name);
404 dp_priv->algo.running = false; 404 dp_priv->algo.running = false;
405 dp_priv->algo.address = 0; 405 dp_priv->algo.address = 0;
406 dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch; 406 dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch;
@@ -1263,7 +1263,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1263 1263
1264 if (IS_eDP(intel_output)) { 1264 if (IS_eDP(intel_output)) {
1265 intel_output->crtc_mask = (1 << 1); 1265 intel_output->crtc_mask = (1 << 1);
1266 intel_output->clone_mask = (1 << INTEL_OUTPUT_EDP); 1266 intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
1267 } else 1267 } else
1268 intel_output->crtc_mask = (1 << 0) | (1 << 1); 1268 intel_output->crtc_mask = (1 << 0) | (1 << 1);
1269 connector->interlace_allowed = true; 1269 connector->interlace_allowed = true;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index aa96b5221358..ef61fe9507e2 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -28,6 +28,7 @@
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/i2c-id.h> 29#include <linux/i2c-id.h>
30#include <linux/i2c-algo-bit.h> 30#include <linux/i2c-algo-bit.h>
31#include "i915_drv.h"
31#include "drm_crtc.h" 32#include "drm_crtc.h"
32 33
33#include "drm_crtc_helper.h" 34#include "drm_crtc_helper.h"
@@ -74,6 +75,7 @@
74#define INTEL_LVDS_CLONE_BIT 14 75#define INTEL_LVDS_CLONE_BIT 14
75#define INTEL_DVO_TMDS_CLONE_BIT 15 76#define INTEL_DVO_TMDS_CLONE_BIT 15
76#define INTEL_DVO_LVDS_CLONE_BIT 16 77#define INTEL_DVO_LVDS_CLONE_BIT 16
78#define INTEL_EDP_CLONE_BIT 17
77 79
78#define INTEL_DVO_CHIP_NONE 0 80#define INTEL_DVO_CHIP_NONE 0
79#define INTEL_DVO_CHIP_LVDS 1 81#define INTEL_DVO_CHIP_LVDS 1
@@ -110,8 +112,8 @@ struct intel_output {
110 112
111struct intel_crtc { 113struct intel_crtc {
112 struct drm_crtc base; 114 struct drm_crtc base;
113 int pipe; 115 enum pipe pipe;
114 int plane; 116 enum plane plane;
115 struct drm_gem_object *cursor_bo; 117 struct drm_gem_object *cursor_bo;
116 uint32_t cursor_addr; 118 uint32_t cursor_addr;
117 u8 lut_r[256], lut_g[256], lut_b[256]; 119 u8 lut_r[256], lut_g[256], lut_b[256];
@@ -180,4 +182,5 @@ extern int intel_framebuffer_create(struct drm_device *dev,
180 struct drm_mode_fb_cmd *mode_cmd, 182 struct drm_mode_fb_cmd *mode_cmd,
181 struct drm_framebuffer **fb, 183 struct drm_framebuffer **fb,
182 struct drm_gem_object *obj); 184 struct drm_gem_object *obj);
185
183#endif /* __INTEL_DRV_H__ */ 186#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index fa304e136010..663ab6de0b58 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -223,7 +223,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
223 223
224 connector = &intel_output->base; 224 connector = &intel_output->base;
225 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, 225 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
226 DRM_MODE_CONNECTOR_DVID); 226 DRM_MODE_CONNECTOR_HDMIA);
227 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); 227 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
228 228
229 intel_output->type = INTEL_OUTPUT_HDMI; 229 intel_output->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index dafc0da1c256..05598ae10c4b 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -27,6 +27,7 @@
27 * Jesse Barnes <jesse.barnes@intel.com> 27 * Jesse Barnes <jesse.barnes@intel.com>
28 */ 28 */
29 29
30#include <acpi/button.h>
30#include <linux/dmi.h> 31#include <linux/dmi.h>
31#include <linux/i2c.h> 32#include <linux/i2c.h>
32#include "drmP.h" 33#include "drmP.h"
@@ -295,6 +296,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
295 goto out; 296 goto out;
296 } 297 }
297 298
299 /* full screen scale for now */
300 if (IS_IGDNG(dev))
301 goto out;
302
298 /* 965+ wants fuzzy fitting */ 303 /* 965+ wants fuzzy fitting */
299 if (IS_I965G(dev)) 304 if (IS_I965G(dev))
300 pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) | 305 pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) |
@@ -322,8 +327,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
322 * to register description and PRM. 327 * to register description and PRM.
323 * Change the value here to see the borders for debugging 328 * Change the value here to see the borders for debugging
324 */ 329 */
325 I915_WRITE(BCLRPAT_A, 0); 330 if (!IS_IGDNG(dev)) {
326 I915_WRITE(BCLRPAT_B, 0); 331 I915_WRITE(BCLRPAT_A, 0);
332 I915_WRITE(BCLRPAT_B, 0);
333 }
327 334
328 switch (lvds_priv->fitting_mode) { 335 switch (lvds_priv->fitting_mode) {
329 case DRM_MODE_SCALE_CENTER: 336 case DRM_MODE_SCALE_CENTER:
@@ -373,7 +380,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
373 adjusted_mode->crtc_vblank_start + vsync_pos; 380 adjusted_mode->crtc_vblank_start + vsync_pos;
374 /* keep the vsync width constant */ 381 /* keep the vsync width constant */
375 adjusted_mode->crtc_vsync_end = 382 adjusted_mode->crtc_vsync_end =
376 adjusted_mode->crtc_vblank_start + vsync_width; 383 adjusted_mode->crtc_vsync_start + vsync_width;
377 border = 1; 384 border = 1;
378 break; 385 break;
379 case DRM_MODE_SCALE_ASPECT: 386 case DRM_MODE_SCALE_ASPECT:
@@ -519,6 +526,14 @@ out:
519 lvds_priv->pfit_control = pfit_control; 526 lvds_priv->pfit_control = pfit_control;
520 lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios; 527 lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios;
521 /* 528 /*
529 * When there exists the border, it means that the LVDS_BORDR
530 * should be enabled.
531 */
532 if (border)
533 dev_priv->lvds_border_bits |= LVDS_BORDER_ENABLE;
534 else
535 dev_priv->lvds_border_bits &= ~(LVDS_BORDER_ENABLE);
536 /*
522 * XXX: It would be nice to support lower refresh rates on the 537 * XXX: It would be nice to support lower refresh rates on the
523 * panels to reduce power consumption, and perhaps match the 538 * panels to reduce power consumption, and perhaps match the
524 * user's requested refresh rate. 539 * user's requested refresh rate.
@@ -572,7 +587,6 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
572 * settings. 587 * settings.
573 */ 588 */
574 589
575 /* No panel fitting yet, fixme */
576 if (IS_IGDNG(dev)) 590 if (IS_IGDNG(dev))
577 return; 591 return;
578 592
@@ -585,15 +599,33 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
585 I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control); 599 I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control);
586} 600}
587 601
602/* Some lid devices report incorrect lid status, assume they're connected */
603static const struct dmi_system_id bad_lid_status[] = {
604 {
605 .ident = "Aspire One",
606 .matches = {
607 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
608 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
609 },
610 },
611 { }
612};
613
588/** 614/**
589 * Detect the LVDS connection. 615 * Detect the LVDS connection.
590 * 616 *
591 * This always returns CONNECTOR_STATUS_CONNECTED. This connector should only have 617 * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means
592 * been set up if the LVDS was actually connected anyway. 618 * connected and closed means disconnected. We also send hotplug events as
619 * needed, using lid status notification from the input layer.
593 */ 620 */
594static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) 621static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
595{ 622{
596 return connector_status_connected; 623 enum drm_connector_status status = connector_status_connected;
624
625 if (!acpi_lid_open() && !dmi_check_system(bad_lid_status))
626 status = connector_status_disconnected;
627
628 return status;
597} 629}
598 630
599/** 631/**
@@ -632,6 +664,39 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
632 return 0; 664 return 0;
633} 665}
634 666
667/*
668 * Lid events. Note the use of 'modeset_on_lid':
669 * - we set it on lid close, and reset it on open
670 * - we use it as a "only once" bit (ie we ignore
671 * duplicate events where it was already properly
672 * set/reset)
673 * - the suspend/resume paths will also set it to
674 * zero, since they restore the mode ("lid open").
675 */
676static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
677 void *unused)
678{
679 struct drm_i915_private *dev_priv =
680 container_of(nb, struct drm_i915_private, lid_notifier);
681 struct drm_device *dev = dev_priv->dev;
682
683 if (!acpi_lid_open()) {
684 dev_priv->modeset_on_lid = 1;
685 return NOTIFY_OK;
686 }
687
688 if (!dev_priv->modeset_on_lid)
689 return NOTIFY_OK;
690
691 dev_priv->modeset_on_lid = 0;
692
693 mutex_lock(&dev->mode_config.mutex);
694 drm_helper_resume_force_mode(dev);
695 mutex_unlock(&dev->mode_config.mutex);
696
697 return NOTIFY_OK;
698}
699
635/** 700/**
636 * intel_lvds_destroy - unregister and free LVDS structures 701 * intel_lvds_destroy - unregister and free LVDS structures
637 * @connector: connector to free 702 * @connector: connector to free
@@ -641,10 +706,14 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
641 */ 706 */
642static void intel_lvds_destroy(struct drm_connector *connector) 707static void intel_lvds_destroy(struct drm_connector *connector)
643{ 708{
709 struct drm_device *dev = connector->dev;
644 struct intel_output *intel_output = to_intel_output(connector); 710 struct intel_output *intel_output = to_intel_output(connector);
711 struct drm_i915_private *dev_priv = dev->dev_private;
645 712
646 if (intel_output->ddc_bus) 713 if (intel_output->ddc_bus)
647 intel_i2c_destroy(intel_output->ddc_bus); 714 intel_i2c_destroy(intel_output->ddc_bus);
715 if (dev_priv->lid_notifier.notifier_call)
716 acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
648 drm_sysfs_connector_remove(connector); 717 drm_sysfs_connector_remove(connector);
649 drm_connector_cleanup(connector); 718 drm_connector_cleanup(connector);
650 kfree(connector); 719 kfree(connector);
@@ -1011,6 +1080,11 @@ out:
1011 pwm |= PWM_PCH_ENABLE; 1080 pwm |= PWM_PCH_ENABLE;
1012 I915_WRITE(BLC_PWM_PCH_CTL1, pwm); 1081 I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
1013 } 1082 }
1083 dev_priv->lid_notifier.notifier_call = intel_lid_notify;
1084 if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
1085 DRM_DEBUG("lid notifier registration failed\n");
1086 dev_priv->lid_notifier.notifier_call = NULL;
1087 }
1014 drm_sysfs_connector_add(connector); 1088 drm_sysfs_connector_add(connector);
1015 return; 1089 return;
1016 1090
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 0bf28efcf2c1..083bec2e50f9 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -135,6 +135,30 @@ struct intel_sdvo_priv {
135 struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; 135 struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
136 struct intel_sdvo_dtd save_output_dtd[16]; 136 struct intel_sdvo_dtd save_output_dtd[16];
137 u32 save_SDVOX; 137 u32 save_SDVOX;
138 /* add the property for the SDVO-TV */
139 struct drm_property *left_property;
140 struct drm_property *right_property;
141 struct drm_property *top_property;
142 struct drm_property *bottom_property;
143 struct drm_property *hpos_property;
144 struct drm_property *vpos_property;
145
146 /* add the property for the SDVO-TV/LVDS */
147 struct drm_property *brightness_property;
148 struct drm_property *contrast_property;
149 struct drm_property *saturation_property;
150 struct drm_property *hue_property;
151
152 /* Add variable to record current setting for the above property */
153 u32 left_margin, right_margin, top_margin, bottom_margin;
154 /* this is to get the range of margin.*/
155 u32 max_hscan, max_vscan;
156 u32 max_hpos, cur_hpos;
157 u32 max_vpos, cur_vpos;
158 u32 cur_brightness, max_brightness;
159 u32 cur_contrast, max_contrast;
160 u32 cur_saturation, max_saturation;
161 u32 cur_hue, max_hue;
138}; 162};
139 163
140static bool 164static bool
@@ -281,6 +305,31 @@ static const struct _sdvo_cmd_name {
281 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), 305 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
282 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), 306 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
283 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), 307 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
308 /* Add the op code for SDVO enhancements */
309 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_H),
310 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_H),
311 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_H),
312 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_V),
313 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_V),
314 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_V),
315 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
316 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
317 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
318 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
319 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
320 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
321 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
322 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
323 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
324 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
325 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
326 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
327 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
328 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
329 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
330 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
331 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
332 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
284 /* HDMI op code */ 333 /* HDMI op code */
285 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), 334 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
286 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), 335 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
@@ -981,7 +1030,7 @@ static void intel_sdvo_set_tv_format(struct intel_output *output)
981 1030
982 status = intel_sdvo_read_response(output, NULL, 0); 1031 status = intel_sdvo_read_response(output, NULL, 0);
983 if (status != SDVO_CMD_STATUS_SUCCESS) 1032 if (status != SDVO_CMD_STATUS_SUCCESS)
984 DRM_DEBUG("%s: Failed to set TV format\n", 1033 DRM_DEBUG_KMS("%s: Failed to set TV format\n",
985 SDVO_NAME(sdvo_priv)); 1034 SDVO_NAME(sdvo_priv));
986} 1035}
987 1036
@@ -1792,6 +1841,45 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
1792 return 1; 1841 return 1;
1793} 1842}
1794 1843
1844static
1845void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1846{
1847 struct intel_output *intel_output = to_intel_output(connector);
1848 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1849 struct drm_device *dev = connector->dev;
1850
1851 if (sdvo_priv->is_tv) {
1852 if (sdvo_priv->left_property)
1853 drm_property_destroy(dev, sdvo_priv->left_property);
1854 if (sdvo_priv->right_property)
1855 drm_property_destroy(dev, sdvo_priv->right_property);
1856 if (sdvo_priv->top_property)
1857 drm_property_destroy(dev, sdvo_priv->top_property);
1858 if (sdvo_priv->bottom_property)
1859 drm_property_destroy(dev, sdvo_priv->bottom_property);
1860 if (sdvo_priv->hpos_property)
1861 drm_property_destroy(dev, sdvo_priv->hpos_property);
1862 if (sdvo_priv->vpos_property)
1863 drm_property_destroy(dev, sdvo_priv->vpos_property);
1864 }
1865 if (sdvo_priv->is_tv) {
1866 if (sdvo_priv->saturation_property)
1867 drm_property_destroy(dev,
1868 sdvo_priv->saturation_property);
1869 if (sdvo_priv->contrast_property)
1870 drm_property_destroy(dev,
1871 sdvo_priv->contrast_property);
1872 if (sdvo_priv->hue_property)
1873 drm_property_destroy(dev, sdvo_priv->hue_property);
1874 }
1875 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
1876 if (sdvo_priv->brightness_property)
1877 drm_property_destroy(dev,
1878 sdvo_priv->brightness_property);
1879 }
1880 return;
1881}
1882
1795static void intel_sdvo_destroy(struct drm_connector *connector) 1883static void intel_sdvo_destroy(struct drm_connector *connector)
1796{ 1884{
1797 struct intel_output *intel_output = to_intel_output(connector); 1885 struct intel_output *intel_output = to_intel_output(connector);
@@ -1812,6 +1900,9 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1812 drm_property_destroy(connector->dev, 1900 drm_property_destroy(connector->dev,
1813 sdvo_priv->tv_format_property); 1901 sdvo_priv->tv_format_property);
1814 1902
1903 if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
1904 intel_sdvo_destroy_enhance_property(connector);
1905
1815 drm_sysfs_connector_remove(connector); 1906 drm_sysfs_connector_remove(connector);
1816 drm_connector_cleanup(connector); 1907 drm_connector_cleanup(connector);
1817 1908
@@ -1829,6 +1920,8 @@ intel_sdvo_set_property(struct drm_connector *connector,
1829 struct drm_crtc *crtc = encoder->crtc; 1920 struct drm_crtc *crtc = encoder->crtc;
1830 int ret = 0; 1921 int ret = 0;
1831 bool changed = false; 1922 bool changed = false;
1923 uint8_t cmd, status;
1924 uint16_t temp_value;
1832 1925
1833 ret = drm_connector_property_set_value(connector, property, val); 1926 ret = drm_connector_property_set_value(connector, property, val);
1834 if (ret < 0) 1927 if (ret < 0)
@@ -1845,11 +1938,102 @@ intel_sdvo_set_property(struct drm_connector *connector,
1845 1938
1846 sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val]; 1939 sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val];
1847 changed = true; 1940 changed = true;
1848 } else {
1849 ret = -EINVAL;
1850 goto out;
1851 } 1941 }
1852 1942
1943 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
1944 cmd = 0;
1945 temp_value = val;
1946 if (sdvo_priv->left_property == property) {
1947 drm_connector_property_set_value(connector,
1948 sdvo_priv->right_property, val);
1949 if (sdvo_priv->left_margin == temp_value)
1950 goto out;
1951
1952 sdvo_priv->left_margin = temp_value;
1953 sdvo_priv->right_margin = temp_value;
1954 temp_value = sdvo_priv->max_hscan -
1955 sdvo_priv->left_margin;
1956 cmd = SDVO_CMD_SET_OVERSCAN_H;
1957 } else if (sdvo_priv->right_property == property) {
1958 drm_connector_property_set_value(connector,
1959 sdvo_priv->left_property, val);
1960 if (sdvo_priv->right_margin == temp_value)
1961 goto out;
1962
1963 sdvo_priv->left_margin = temp_value;
1964 sdvo_priv->right_margin = temp_value;
1965 temp_value = sdvo_priv->max_hscan -
1966 sdvo_priv->left_margin;
1967 cmd = SDVO_CMD_SET_OVERSCAN_H;
1968 } else if (sdvo_priv->top_property == property) {
1969 drm_connector_property_set_value(connector,
1970 sdvo_priv->bottom_property, val);
1971 if (sdvo_priv->top_margin == temp_value)
1972 goto out;
1973
1974 sdvo_priv->top_margin = temp_value;
1975 sdvo_priv->bottom_margin = temp_value;
1976 temp_value = sdvo_priv->max_vscan -
1977 sdvo_priv->top_margin;
1978 cmd = SDVO_CMD_SET_OVERSCAN_V;
1979 } else if (sdvo_priv->bottom_property == property) {
1980 drm_connector_property_set_value(connector,
1981 sdvo_priv->top_property, val);
1982 if (sdvo_priv->bottom_margin == temp_value)
1983 goto out;
1984 sdvo_priv->top_margin = temp_value;
1985 sdvo_priv->bottom_margin = temp_value;
1986 temp_value = sdvo_priv->max_vscan -
1987 sdvo_priv->top_margin;
1988 cmd = SDVO_CMD_SET_OVERSCAN_V;
1989 } else if (sdvo_priv->hpos_property == property) {
1990 if (sdvo_priv->cur_hpos == temp_value)
1991 goto out;
1992
1993 cmd = SDVO_CMD_SET_POSITION_H;
1994 sdvo_priv->cur_hpos = temp_value;
1995 } else if (sdvo_priv->vpos_property == property) {
1996 if (sdvo_priv->cur_vpos == temp_value)
1997 goto out;
1998
1999 cmd = SDVO_CMD_SET_POSITION_V;
2000 sdvo_priv->cur_vpos = temp_value;
2001 } else if (sdvo_priv->saturation_property == property) {
2002 if (sdvo_priv->cur_saturation == temp_value)
2003 goto out;
2004
2005 cmd = SDVO_CMD_SET_SATURATION;
2006 sdvo_priv->cur_saturation = temp_value;
2007 } else if (sdvo_priv->contrast_property == property) {
2008 if (sdvo_priv->cur_contrast == temp_value)
2009 goto out;
2010
2011 cmd = SDVO_CMD_SET_CONTRAST;
2012 sdvo_priv->cur_contrast = temp_value;
2013 } else if (sdvo_priv->hue_property == property) {
2014 if (sdvo_priv->cur_hue == temp_value)
2015 goto out;
2016
2017 cmd = SDVO_CMD_SET_HUE;
2018 sdvo_priv->cur_hue = temp_value;
2019 } else if (sdvo_priv->brightness_property == property) {
2020 if (sdvo_priv->cur_brightness == temp_value)
2021 goto out;
2022
2023 cmd = SDVO_CMD_SET_BRIGHTNESS;
2024 sdvo_priv->cur_brightness = temp_value;
2025 }
2026 if (cmd) {
2027 intel_sdvo_write_cmd(intel_output, cmd, &temp_value, 2);
2028 status = intel_sdvo_read_response(intel_output,
2029 NULL, 0);
2030 if (status != SDVO_CMD_STATUS_SUCCESS) {
2031 DRM_DEBUG_KMS("Incorrect SDVO command \n");
2032 return -EINVAL;
2033 }
2034 changed = true;
2035 }
2036 }
1853 if (changed && crtc) 2037 if (changed && crtc)
1854 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, 2038 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
1855 crtc->y, crtc->fb); 2039 crtc->y, crtc->fb);
@@ -2090,6 +2274,8 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2090 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; 2274 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
2091 encoder->encoder_type = DRM_MODE_ENCODER_DAC; 2275 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
2092 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2276 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2277 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2278 (1 << INTEL_ANALOG_CLONE_BIT);
2093 } else if (flags & SDVO_OUTPUT_LVDS0) { 2279 } else if (flags & SDVO_OUTPUT_LVDS0) {
2094 2280
2095 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; 2281 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
@@ -2176,6 +2362,310 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector)
2176 2362
2177} 2363}
2178 2364
2365static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2366{
2367 struct intel_output *intel_output = to_intel_output(connector);
2368 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
2369 struct intel_sdvo_enhancements_reply sdvo_data;
2370 struct drm_device *dev = connector->dev;
2371 uint8_t status;
2372 uint16_t response, data_value[2];
2373
2374 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
2375 NULL, 0);
2376 status = intel_sdvo_read_response(intel_output, &sdvo_data,
2377 sizeof(sdvo_data));
2378 if (status != SDVO_CMD_STATUS_SUCCESS) {
2379 DRM_DEBUG_KMS(" incorrect response is returned\n");
2380 return;
2381 }
2382 response = *((uint16_t *)&sdvo_data);
2383 if (!response) {
2384 DRM_DEBUG_KMS("No enhancement is supported\n");
2385 return;
2386 }
2387 if (sdvo_priv->is_tv) {
2388 /* when horizontal overscan is supported, Add the left/right
2389 * property
2390 */
2391 if (sdvo_data.overscan_h) {
2392 intel_sdvo_write_cmd(intel_output,
2393 SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0);
2394 status = intel_sdvo_read_response(intel_output,
2395 &data_value, 4);
2396 if (status != SDVO_CMD_STATUS_SUCCESS) {
2397 DRM_DEBUG_KMS("Incorrect SDVO max "
2398 "h_overscan\n");
2399 return;
2400 }
2401 intel_sdvo_write_cmd(intel_output,
2402 SDVO_CMD_GET_OVERSCAN_H, NULL, 0);
2403 status = intel_sdvo_read_response(intel_output,
2404 &response, 2);
2405 if (status != SDVO_CMD_STATUS_SUCCESS) {
2406 DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n");
2407 return;
2408 }
2409 sdvo_priv->max_hscan = data_value[0];
2410 sdvo_priv->left_margin = data_value[0] - response;
2411 sdvo_priv->right_margin = sdvo_priv->left_margin;
2412 sdvo_priv->left_property =
2413 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2414 "left_margin", 2);
2415 sdvo_priv->left_property->values[0] = 0;
2416 sdvo_priv->left_property->values[1] = data_value[0];
2417 drm_connector_attach_property(connector,
2418 sdvo_priv->left_property,
2419 sdvo_priv->left_margin);
2420 sdvo_priv->right_property =
2421 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2422 "right_margin", 2);
2423 sdvo_priv->right_property->values[0] = 0;
2424 sdvo_priv->right_property->values[1] = data_value[0];
2425 drm_connector_attach_property(connector,
2426 sdvo_priv->right_property,
2427 sdvo_priv->right_margin);
2428 DRM_DEBUG_KMS("h_overscan: max %d, "
2429 "default %d, current %d\n",
2430 data_value[0], data_value[1], response);
2431 }
2432 if (sdvo_data.overscan_v) {
2433 intel_sdvo_write_cmd(intel_output,
2434 SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0);
2435 status = intel_sdvo_read_response(intel_output,
2436 &data_value, 4);
2437 if (status != SDVO_CMD_STATUS_SUCCESS) {
2438 DRM_DEBUG_KMS("Incorrect SDVO max "
2439 "v_overscan\n");
2440 return;
2441 }
2442 intel_sdvo_write_cmd(intel_output,
2443 SDVO_CMD_GET_OVERSCAN_V, NULL, 0);
2444 status = intel_sdvo_read_response(intel_output,
2445 &response, 2);
2446 if (status != SDVO_CMD_STATUS_SUCCESS) {
2447 DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n");
2448 return;
2449 }
2450 sdvo_priv->max_vscan = data_value[0];
2451 sdvo_priv->top_margin = data_value[0] - response;
2452 sdvo_priv->bottom_margin = sdvo_priv->top_margin;
2453 sdvo_priv->top_property =
2454 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2455 "top_margin", 2);
2456 sdvo_priv->top_property->values[0] = 0;
2457 sdvo_priv->top_property->values[1] = data_value[0];
2458 drm_connector_attach_property(connector,
2459 sdvo_priv->top_property,
2460 sdvo_priv->top_margin);
2461 sdvo_priv->bottom_property =
2462 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2463 "bottom_margin", 2);
2464 sdvo_priv->bottom_property->values[0] = 0;
2465 sdvo_priv->bottom_property->values[1] = data_value[0];
2466 drm_connector_attach_property(connector,
2467 sdvo_priv->bottom_property,
2468 sdvo_priv->bottom_margin);
2469 DRM_DEBUG_KMS("v_overscan: max %d, "
2470 "default %d, current %d\n",
2471 data_value[0], data_value[1], response);
2472 }
2473 if (sdvo_data.position_h) {
2474 intel_sdvo_write_cmd(intel_output,
2475 SDVO_CMD_GET_MAX_POSITION_H, NULL, 0);
2476 status = intel_sdvo_read_response(intel_output,
2477 &data_value, 4);
2478 if (status != SDVO_CMD_STATUS_SUCCESS) {
2479 DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n");
2480 return;
2481 }
2482 intel_sdvo_write_cmd(intel_output,
2483 SDVO_CMD_GET_POSITION_H, NULL, 0);
2484 status = intel_sdvo_read_response(intel_output,
2485 &response, 2);
2486 if (status != SDVO_CMD_STATUS_SUCCESS) {
2487 DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n");
2488 return;
2489 }
2490 sdvo_priv->max_hpos = data_value[0];
2491 sdvo_priv->cur_hpos = response;
2492 sdvo_priv->hpos_property =
2493 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2494 "hpos", 2);
2495 sdvo_priv->hpos_property->values[0] = 0;
2496 sdvo_priv->hpos_property->values[1] = data_value[0];
2497 drm_connector_attach_property(connector,
2498 sdvo_priv->hpos_property,
2499 sdvo_priv->cur_hpos);
2500 DRM_DEBUG_KMS("h_position: max %d, "
2501 "default %d, current %d\n",
2502 data_value[0], data_value[1], response);
2503 }
2504 if (sdvo_data.position_v) {
2505 intel_sdvo_write_cmd(intel_output,
2506 SDVO_CMD_GET_MAX_POSITION_V, NULL, 0);
2507 status = intel_sdvo_read_response(intel_output,
2508 &data_value, 4);
2509 if (status != SDVO_CMD_STATUS_SUCCESS) {
2510 DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n");
2511 return;
2512 }
2513 intel_sdvo_write_cmd(intel_output,
2514 SDVO_CMD_GET_POSITION_V, NULL, 0);
2515 status = intel_sdvo_read_response(intel_output,
2516 &response, 2);
2517 if (status != SDVO_CMD_STATUS_SUCCESS) {
2518 DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n");
2519 return;
2520 }
2521 sdvo_priv->max_vpos = data_value[0];
2522 sdvo_priv->cur_vpos = response;
2523 sdvo_priv->vpos_property =
2524 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2525 "vpos", 2);
2526 sdvo_priv->vpos_property->values[0] = 0;
2527 sdvo_priv->vpos_property->values[1] = data_value[0];
2528 drm_connector_attach_property(connector,
2529 sdvo_priv->vpos_property,
2530 sdvo_priv->cur_vpos);
2531 DRM_DEBUG_KMS("v_position: max %d, "
2532 "default %d, current %d\n",
2533 data_value[0], data_value[1], response);
2534 }
2535 }
2536 if (sdvo_priv->is_tv) {
2537 if (sdvo_data.saturation) {
2538 intel_sdvo_write_cmd(intel_output,
2539 SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
2540 status = intel_sdvo_read_response(intel_output,
2541 &data_value, 4);
2542 if (status != SDVO_CMD_STATUS_SUCCESS) {
2543 DRM_DEBUG_KMS("Incorrect SDVO Max sat\n");
2544 return;
2545 }
2546 intel_sdvo_write_cmd(intel_output,
2547 SDVO_CMD_GET_SATURATION, NULL, 0);
2548 status = intel_sdvo_read_response(intel_output,
2549 &response, 2);
2550 if (status != SDVO_CMD_STATUS_SUCCESS) {
2551 DRM_DEBUG_KMS("Incorrect SDVO get sat\n");
2552 return;
2553 }
2554 sdvo_priv->max_saturation = data_value[0];
2555 sdvo_priv->cur_saturation = response;
2556 sdvo_priv->saturation_property =
2557 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2558 "saturation", 2);
2559 sdvo_priv->saturation_property->values[0] = 0;
2560 sdvo_priv->saturation_property->values[1] =
2561 data_value[0];
2562 drm_connector_attach_property(connector,
2563 sdvo_priv->saturation_property,
2564 sdvo_priv->cur_saturation);
2565 DRM_DEBUG_KMS("saturation: max %d, "
2566 "default %d, current %d\n",
2567 data_value[0], data_value[1], response);
2568 }
2569 if (sdvo_data.contrast) {
2570 intel_sdvo_write_cmd(intel_output,
2571 SDVO_CMD_GET_MAX_CONTRAST, NULL, 0);
2572 status = intel_sdvo_read_response(intel_output,
2573 &data_value, 4);
2574 if (status != SDVO_CMD_STATUS_SUCCESS) {
2575 DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n");
2576 return;
2577 }
2578 intel_sdvo_write_cmd(intel_output,
2579 SDVO_CMD_GET_CONTRAST, NULL, 0);
2580 status = intel_sdvo_read_response(intel_output,
2581 &response, 2);
2582 if (status != SDVO_CMD_STATUS_SUCCESS) {
2583 DRM_DEBUG_KMS("Incorrect SDVO get contrast\n");
2584 return;
2585 }
2586 sdvo_priv->max_contrast = data_value[0];
2587 sdvo_priv->cur_contrast = response;
2588 sdvo_priv->contrast_property =
2589 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2590 "contrast", 2);
2591 sdvo_priv->contrast_property->values[0] = 0;
2592 sdvo_priv->contrast_property->values[1] = data_value[0];
2593 drm_connector_attach_property(connector,
2594 sdvo_priv->contrast_property,
2595 sdvo_priv->cur_contrast);
2596 DRM_DEBUG_KMS("contrast: max %d, "
2597 "default %d, current %d\n",
2598 data_value[0], data_value[1], response);
2599 }
2600 if (sdvo_data.hue) {
2601 intel_sdvo_write_cmd(intel_output,
2602 SDVO_CMD_GET_MAX_HUE, NULL, 0);
2603 status = intel_sdvo_read_response(intel_output,
2604 &data_value, 4);
2605 if (status != SDVO_CMD_STATUS_SUCCESS) {
2606 DRM_DEBUG_KMS("Incorrect SDVO Max hue\n");
2607 return;
2608 }
2609 intel_sdvo_write_cmd(intel_output,
2610 SDVO_CMD_GET_HUE, NULL, 0);
2611 status = intel_sdvo_read_response(intel_output,
2612 &response, 2);
2613 if (status != SDVO_CMD_STATUS_SUCCESS) {
2614 DRM_DEBUG_KMS("Incorrect SDVO get hue\n");
2615 return;
2616 }
2617 sdvo_priv->max_hue = data_value[0];
2618 sdvo_priv->cur_hue = response;
2619 sdvo_priv->hue_property =
2620 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2621 "hue", 2);
2622 sdvo_priv->hue_property->values[0] = 0;
2623 sdvo_priv->hue_property->values[1] =
2624 data_value[0];
2625 drm_connector_attach_property(connector,
2626 sdvo_priv->hue_property,
2627 sdvo_priv->cur_hue);
2628 DRM_DEBUG_KMS("hue: max %d, default %d, current %d\n",
2629 data_value[0], data_value[1], response);
2630 }
2631 }
2632 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
2633 if (sdvo_data.brightness) {
2634 intel_sdvo_write_cmd(intel_output,
2635 SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
2636 status = intel_sdvo_read_response(intel_output,
2637 &data_value, 4);
2638 if (status != SDVO_CMD_STATUS_SUCCESS) {
2639 DRM_DEBUG_KMS("Incorrect SDVO Max bright\n");
2640 return;
2641 }
2642 intel_sdvo_write_cmd(intel_output,
2643 SDVO_CMD_GET_BRIGHTNESS, NULL, 0);
2644 status = intel_sdvo_read_response(intel_output,
2645 &response, 2);
2646 if (status != SDVO_CMD_STATUS_SUCCESS) {
2647 DRM_DEBUG_KMS("Incorrect SDVO get brigh\n");
2648 return;
2649 }
2650 sdvo_priv->max_brightness = data_value[0];
2651 sdvo_priv->cur_brightness = response;
2652 sdvo_priv->brightness_property =
2653 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2654 "brightness", 2);
2655 sdvo_priv->brightness_property->values[0] = 0;
2656 sdvo_priv->brightness_property->values[1] =
2657 data_value[0];
2658 drm_connector_attach_property(connector,
2659 sdvo_priv->brightness_property,
2660 sdvo_priv->cur_brightness);
2661 DRM_DEBUG_KMS("brightness: max %d, "
2662 "default %d, current %d\n",
2663 data_value[0], data_value[1], response);
2664 }
2665 }
2666 return;
2667}
2668
2179bool intel_sdvo_init(struct drm_device *dev, int output_device) 2669bool intel_sdvo_init(struct drm_device *dev, int output_device)
2180{ 2670{
2181 struct drm_connector *connector; 2671 struct drm_connector *connector;
@@ -2264,6 +2754,10 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
2264 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 2754 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
2265 if (sdvo_priv->is_tv) 2755 if (sdvo_priv->is_tv)
2266 intel_sdvo_tv_create_property(connector); 2756 intel_sdvo_tv_create_property(connector);
2757
2758 if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
2759 intel_sdvo_create_enhance_property(connector);
2760
2267 drm_sysfs_connector_add(connector); 2761 drm_sysfs_connector_add(connector);
2268 2762
2269 intel_sdvo_select_ddc_bus(sdvo_priv); 2763 intel_sdvo_select_ddc_bus(sdvo_priv);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index a6c686cded54..9ca917931afb 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1082,7 +1082,8 @@ intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mo
1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
1083 1083
1084 /* Ensure TV refresh is close to desired refresh */ 1084 /* Ensure TV refresh is close to desired refresh */
1085 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 10) 1085 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
1086 < 1000)
1086 return MODE_OK; 1087 return MODE_OK;
1087 return MODE_CLOCK_RANGE; 1088 return MODE_CLOCK_RANGE;
1088} 1089}
@@ -1760,6 +1761,7 @@ intel_tv_init(struct drm_device *dev)
1760 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 1761 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
1761 tv_priv = (struct intel_tv_priv *)(intel_output + 1); 1762 tv_priv = (struct intel_tv_priv *)(intel_output + 1);
1762 intel_output->type = INTEL_OUTPUT_TVOUT; 1763 intel_output->type = INTEL_OUTPUT_TVOUT;
1764 intel_output->crtc_mask = (1 << 0) | (1 << 1);
1763 intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT); 1765 intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT);
1764 intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1)); 1766 intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1));
1765 intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); 1767 intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index b710fab21cb3..a53b848e0f17 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -239,7 +239,7 @@ static __inline__ void mga_g200_emit_pipe(drm_mga_private_t * dev_priv)
239 MGA_WR34, 0x00000000, 239 MGA_WR34, 0x00000000,
240 MGA_WR42, 0x0000ffff, MGA_WR60, 0x0000ffff); 240 MGA_WR42, 0x0000ffff, MGA_WR60, 0x0000ffff);
241 241
242 /* Padding required to to hardware bug. 242 /* Padding required due to hardware bug.
243 */ 243 */
244 DMA_BLOCK(MGA_DMAPAD, 0xffffffff, 244 DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
245 MGA_DMAPAD, 0xffffffff, 245 MGA_DMAPAD, 0xffffffff,
@@ -317,7 +317,7 @@ static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv)
317 MGA_WR52, MGA_G400_WR_MAGIC, /* tex1 width */ 317 MGA_WR52, MGA_G400_WR_MAGIC, /* tex1 width */
318 MGA_WR60, MGA_G400_WR_MAGIC); /* tex1 height */ 318 MGA_WR60, MGA_G400_WR_MAGIC); /* tex1 height */
319 319
320 /* Padding required to to hardware bug */ 320 /* Padding required due to hardware bug */
321 DMA_BLOCK(MGA_DMAPAD, 0xffffffff, 321 DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
322 MGA_DMAPAD, 0xffffffff, 322 MGA_DMAPAD, 0xffffffff,
323 MGA_DMAPAD, 0xffffffff, 323 MGA_DMAPAD, 0xffffffff,
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 5e821a313a8c..c9e93eabcf16 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1778,6 +1778,20 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
1778 rdev->mc.real_vram_size = rdev->mc.aper_size; 1778 rdev->mc.real_vram_size = rdev->mc.aper_size;
1779} 1779}
1780 1780
1781void r100_vga_set_state(struct radeon_device *rdev, bool state)
1782{
1783 uint32_t temp;
1784
1785 temp = RREG32(RADEON_CONFIG_CNTL);
1786 if (state == false) {
1787 temp &= ~(1<<8);
1788 temp |= (1<<9);
1789 } else {
1790 temp &= ~(1<<9);
1791 }
1792 WREG32(RADEON_CONFIG_CNTL, temp);
1793}
1794
1781void r100_vram_info(struct radeon_device *rdev) 1795void r100_vram_info(struct radeon_device *rdev)
1782{ 1796{
1783 r100_vram_get_type(rdev); 1797 r100_vram_get_type(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 8d6bc12192d2..278f646bc18e 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1537,6 +1537,20 @@ int r600_startup(struct radeon_device *rdev)
1537 return 0; 1537 return 0;
1538} 1538}
1539 1539
1540void r600_vga_set_state(struct radeon_device *rdev, bool state)
1541{
1542 uint32_t temp;
1543
1544 temp = RREG32(CONFIG_CNTL);
1545 if (state == false) {
1546 temp &= ~(1<<0);
1547 temp |= (1<<1);
1548 } else {
1549 temp &= ~(1<<1);
1550 }
1551 WREG32(CONFIG_CNTL, temp);
1552}
1553
1540int r600_resume(struct radeon_device *rdev) 1554int r600_resume(struct radeon_device *rdev)
1541{ 1555{
1542 int r; 1556 int r;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index b99f45d85d88..27ab428b149b 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -78,6 +78,7 @@
78#define CB_COLOR0_MASK 0x28100 78#define CB_COLOR0_MASK 0x28100
79 79
80#define CONFIG_MEMSIZE 0x5428 80#define CONFIG_MEMSIZE 0x5428
81#define CONFIG_CNTL 0x5424
81#define CP_STAT 0x8680 82#define CP_STAT 0x8680
82#define CP_COHER_BASE 0x85F8 83#define CP_COHER_BASE 0x85F8
83#define CP_DEBUG 0xC1FC 84#define CP_DEBUG 0xC1FC
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 9f0bd9847884..757f5cd37744 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -596,6 +596,7 @@ struct radeon_asic {
596 void (*fini)(struct radeon_device *rdev); 596 void (*fini)(struct radeon_device *rdev);
597 int (*resume)(struct radeon_device *rdev); 597 int (*resume)(struct radeon_device *rdev);
598 int (*suspend)(struct radeon_device *rdev); 598 int (*suspend)(struct radeon_device *rdev);
599 void (*vga_set_state)(struct radeon_device *rdev, bool state);
599 int (*gpu_reset)(struct radeon_device *rdev); 600 int (*gpu_reset)(struct radeon_device *rdev);
600 void (*gart_tlb_flush)(struct radeon_device *rdev); 601 void (*gart_tlb_flush)(struct radeon_device *rdev);
601 int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); 602 int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
@@ -945,6 +946,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
945#define radeon_resume(rdev) (rdev)->asic->resume((rdev)) 946#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
946#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) 947#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
947#define radeon_cs_parse(p) rdev->asic->cs_parse((p)) 948#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
949#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
948#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev)) 950#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
949#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) 951#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
950#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) 952#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 94991edc839f..c18fbee387d7 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -50,6 +50,7 @@ extern int r100_suspend(struct radeon_device *rdev);
50extern int r100_resume(struct radeon_device *rdev); 50extern int r100_resume(struct radeon_device *rdev);
51uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); 51uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
52void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 52void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
53void r100_vga_set_state(struct radeon_device *rdev, bool state);
53int r100_gpu_reset(struct radeon_device *rdev); 54int r100_gpu_reset(struct radeon_device *rdev);
54u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); 55u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
55void r100_pci_gart_tlb_flush(struct radeon_device *rdev); 56void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
@@ -81,6 +82,7 @@ static struct radeon_asic r100_asic = {
81 .fini = &r100_fini, 82 .fini = &r100_fini,
82 .suspend = &r100_suspend, 83 .suspend = &r100_suspend,
83 .resume = &r100_resume, 84 .resume = &r100_resume,
85 .vga_set_state = &r100_vga_set_state,
84 .gpu_reset = &r100_gpu_reset, 86 .gpu_reset = &r100_gpu_reset,
85 .gart_tlb_flush = &r100_pci_gart_tlb_flush, 87 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
86 .gart_set_page = &r100_pci_gart_set_page, 88 .gart_set_page = &r100_pci_gart_set_page,
@@ -135,6 +137,7 @@ static struct radeon_asic r300_asic = {
135 .fini = &r300_fini, 137 .fini = &r300_fini,
136 .suspend = &r300_suspend, 138 .suspend = &r300_suspend,
137 .resume = &r300_resume, 139 .resume = &r300_resume,
140 .vga_set_state = &r100_vga_set_state,
138 .gpu_reset = &r300_gpu_reset, 141 .gpu_reset = &r300_gpu_reset,
139 .gart_tlb_flush = &r100_pci_gart_tlb_flush, 142 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
140 .gart_set_page = &r100_pci_gart_set_page, 143 .gart_set_page = &r100_pci_gart_set_page,
@@ -173,6 +176,7 @@ static struct radeon_asic r420_asic = {
173 .fini = &r420_fini, 176 .fini = &r420_fini,
174 .suspend = &r420_suspend, 177 .suspend = &r420_suspend,
175 .resume = &r420_resume, 178 .resume = &r420_resume,
179 .vga_set_state = &r100_vga_set_state,
176 .gpu_reset = &r300_gpu_reset, 180 .gpu_reset = &r300_gpu_reset,
177 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 181 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
178 .gart_set_page = &rv370_pcie_gart_set_page, 182 .gart_set_page = &rv370_pcie_gart_set_page,
@@ -216,6 +220,7 @@ static struct radeon_asic rs400_asic = {
216 .fini = &rs400_fini, 220 .fini = &rs400_fini,
217 .suspend = &rs400_suspend, 221 .suspend = &rs400_suspend,
218 .resume = &rs400_resume, 222 .resume = &rs400_resume,
223 .vga_set_state = &r100_vga_set_state,
219 .gpu_reset = &r300_gpu_reset, 224 .gpu_reset = &r300_gpu_reset,
220 .gart_tlb_flush = &rs400_gart_tlb_flush, 225 .gart_tlb_flush = &rs400_gart_tlb_flush,
221 .gart_set_page = &rs400_gart_set_page, 226 .gart_set_page = &rs400_gart_set_page,
@@ -263,6 +268,7 @@ static struct radeon_asic rs600_asic = {
263 .fini = &rs600_fini, 268 .fini = &rs600_fini,
264 .suspend = &rs600_suspend, 269 .suspend = &rs600_suspend,
265 .resume = &rs600_resume, 270 .resume = &rs600_resume,
271 .vga_set_state = &r100_vga_set_state,
266 .gpu_reset = &r300_gpu_reset, 272 .gpu_reset = &r300_gpu_reset,
267 .gart_tlb_flush = &rs600_gart_tlb_flush, 273 .gart_tlb_flush = &rs600_gart_tlb_flush,
268 .gart_set_page = &rs600_gart_set_page, 274 .gart_set_page = &rs600_gart_set_page,
@@ -303,6 +309,7 @@ static struct radeon_asic rs690_asic = {
303 .fini = &rs690_fini, 309 .fini = &rs690_fini,
304 .suspend = &rs690_suspend, 310 .suspend = &rs690_suspend,
305 .resume = &rs690_resume, 311 .resume = &rs690_resume,
312 .vga_set_state = &r100_vga_set_state,
306 .gpu_reset = &r300_gpu_reset, 313 .gpu_reset = &r300_gpu_reset,
307 .gart_tlb_flush = &rs400_gart_tlb_flush, 314 .gart_tlb_flush = &rs400_gart_tlb_flush,
308 .gart_set_page = &rs400_gart_set_page, 315 .gart_set_page = &rs400_gart_set_page,
@@ -349,6 +356,7 @@ static struct radeon_asic rv515_asic = {
349 .fini = &rv515_fini, 356 .fini = &rv515_fini,
350 .suspend = &rv515_suspend, 357 .suspend = &rv515_suspend,
351 .resume = &rv515_resume, 358 .resume = &rv515_resume,
359 .vga_set_state = &r100_vga_set_state,
352 .gpu_reset = &rv515_gpu_reset, 360 .gpu_reset = &rv515_gpu_reset,
353 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 361 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
354 .gart_set_page = &rv370_pcie_gart_set_page, 362 .gart_set_page = &rv370_pcie_gart_set_page,
@@ -386,6 +394,7 @@ static struct radeon_asic r520_asic = {
386 .fini = &rv515_fini, 394 .fini = &rv515_fini,
387 .suspend = &rv515_suspend, 395 .suspend = &rv515_suspend,
388 .resume = &r520_resume, 396 .resume = &r520_resume,
397 .vga_set_state = &r100_vga_set_state,
389 .gpu_reset = &rv515_gpu_reset, 398 .gpu_reset = &rv515_gpu_reset,
390 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 399 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
391 .gart_set_page = &rv370_pcie_gart_set_page, 400 .gart_set_page = &rv370_pcie_gart_set_page,
@@ -419,6 +428,7 @@ int r600_init(struct radeon_device *rdev);
419void r600_fini(struct radeon_device *rdev); 428void r600_fini(struct radeon_device *rdev);
420int r600_suspend(struct radeon_device *rdev); 429int r600_suspend(struct radeon_device *rdev);
421int r600_resume(struct radeon_device *rdev); 430int r600_resume(struct radeon_device *rdev);
431void r600_vga_set_state(struct radeon_device *rdev, bool state);
422int r600_wb_init(struct radeon_device *rdev); 432int r600_wb_init(struct radeon_device *rdev);
423void r600_wb_fini(struct radeon_device *rdev); 433void r600_wb_fini(struct radeon_device *rdev);
424void r600_cp_commit(struct radeon_device *rdev); 434void r600_cp_commit(struct radeon_device *rdev);
@@ -452,6 +462,7 @@ static struct radeon_asic r600_asic = {
452 .suspend = &r600_suspend, 462 .suspend = &r600_suspend,
453 .resume = &r600_resume, 463 .resume = &r600_resume,
454 .cp_commit = &r600_cp_commit, 464 .cp_commit = &r600_cp_commit,
465 .vga_set_state = &r600_vga_set_state,
455 .gpu_reset = &r600_gpu_reset, 466 .gpu_reset = &r600_gpu_reset,
456 .gart_tlb_flush = &r600_pcie_gart_tlb_flush, 467 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
457 .gart_set_page = &rs600_gart_set_page, 468 .gart_set_page = &rs600_gart_set_page,
@@ -491,6 +502,7 @@ static struct radeon_asic rv770_asic = {
491 .resume = &rv770_resume, 502 .resume = &rv770_resume,
492 .cp_commit = &r600_cp_commit, 503 .cp_commit = &r600_cp_commit,
493 .gpu_reset = &rv770_gpu_reset, 504 .gpu_reset = &rv770_gpu_reset,
505 .vga_set_state = &r600_vga_set_state,
494 .gart_tlb_flush = &r600_pcie_gart_tlb_flush, 506 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
495 .gart_set_page = &rs600_gart_set_page, 507 .gart_set_page = &rs600_gart_set_page,
496 .ring_test = &r600_ring_test, 508 .ring_test = &r600_ring_test,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 88c19070247f..e3f9edfa40fe 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -29,6 +29,7 @@
29#include <drm/drmP.h> 29#include <drm/drmP.h>
30#include <drm/drm_crtc_helper.h> 30#include <drm/drm_crtc_helper.h>
31#include <drm/radeon_drm.h> 31#include <drm/radeon_drm.h>
32#include <linux/vgaarb.h>
32#include "radeon_reg.h" 33#include "radeon_reg.h"
33#include "radeon.h" 34#include "radeon.h"
34#include "radeon_asic.h" 35#include "radeon_asic.h"
@@ -481,6 +482,18 @@ void radeon_combios_fini(struct radeon_device *rdev)
481{ 482{
482} 483}
483 484
485/* if we get transitioned to only one device, tak VGA back */
486static unsigned int radeon_vga_set_decode(void *cookie, bool state)
487{
488 struct radeon_device *rdev = cookie;
489 radeon_vga_set_state(rdev, state);
490 if (state)
491 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
492 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
493 else
494 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
495}
496
484void radeon_agp_disable(struct radeon_device *rdev) 497void radeon_agp_disable(struct radeon_device *rdev)
485{ 498{
486 rdev->flags &= ~RADEON_IS_AGP; 499 rdev->flags &= ~RADEON_IS_AGP;
@@ -573,9 +586,15 @@ int radeon_device_init(struct radeon_device *rdev,
573 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); 586 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
574 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); 587 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
575 588
589 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
590 /* this will fail for cards that aren't VGA class devices, just
591 * ignore it */
592 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
593
576 r = radeon_init(rdev); 594 r = radeon_init(rdev);
577 if (r) 595 if (r)
578 return r; 596 return r;
597
579 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { 598 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
580 /* Acceleration not working on AGP card try again 599 /* Acceleration not working on AGP card try again
581 * with fallback to PCI or PCIE GART 600 * with fallback to PCI or PCIE GART
@@ -600,8 +619,8 @@ void radeon_device_fini(struct radeon_device *rdev)
600{ 619{
601 DRM_INFO("radeon: finishing device.\n"); 620 DRM_INFO("radeon: finishing device.\n");
602 rdev->shutdown = true; 621 rdev->shutdown = true;
603 /* Order matter so becarefull if you rearrange anythings */
604 radeon_fini(rdev); 622 radeon_fini(rdev);
623 vga_client_register(rdev->pdev, NULL, NULL, NULL);
605 iounmap(rdev->rmmio); 624 iounmap(rdev->rmmio);
606 rdev->rmmio = NULL; 625 rdev->rmmio = NULL;
607} 626}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index f489c0de6f13..1381e06d6af3 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -536,7 +536,7 @@ void radeon_ttm_fini(struct radeon_device *rdev)
536} 536}
537 537
538static struct vm_operations_struct radeon_ttm_vm_ops; 538static struct vm_operations_struct radeon_ttm_vm_ops;
539static struct vm_operations_struct *ttm_vm_ops = NULL; 539static const struct vm_operations_struct *ttm_vm_ops = NULL;
540 540
541static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 541static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
542{ 542{
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 33de7637c0c6..1c040d040338 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -228,7 +228,7 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma)
228 vma->vm_private_data = NULL; 228 vma->vm_private_data = NULL;
229} 229}
230 230
231static struct vm_operations_struct ttm_bo_vm_ops = { 231static const struct vm_operations_struct ttm_bo_vm_ops = {
232 .fault = ttm_bo_vm_fault, 232 .fault = ttm_bo_vm_fault,
233 .open = ttm_bo_vm_open, 233 .open = ttm_bo_vm_open,
234 .close = ttm_bo_vm_close 234 .close = ttm_bo_vm_close
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
new file mode 100644
index 000000000000..790e675b13eb
--- /dev/null
+++ b/drivers/gpu/vga/Kconfig
@@ -0,0 +1,10 @@
1config VGA_ARB
2 bool "VGA Arbitration" if EMBEDDED
3 default y
4 depends on PCI
5 help
6 Some "legacy" VGA devices implemented on PCI typically have the same
7 hard-decoded addresses as they did on ISA. When multiple PCI devices
8 are accessed at same time they need some kind of coordination. Please
9 see Documentation/vgaarbiter.txt for more details. Select this to
10 enable VGA arbiter.
diff --git a/drivers/gpu/vga/Makefile b/drivers/gpu/vga/Makefile
new file mode 100644
index 000000000000..7cc8c1ed645b
--- /dev/null
+++ b/drivers/gpu/vga/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_VGA_ARB) += vgaarb.o
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
new file mode 100644
index 000000000000..1ac0c93603c9
--- /dev/null
+++ b/drivers/gpu/vga/vgaarb.c
@@ -0,0 +1,1205 @@
1/*
2 * vgaarb.c
3 *
4 * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
5 * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com>
6 * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org>
7 *
8 * Implements the VGA arbitration. For details refer to
9 * Documentation/vgaarbiter.txt
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/pci.h>
15#include <linux/errno.h>
16#include <linux/init.h>
17#include <linux/list.h>
18#include <linux/sched.h>
19#include <linux/wait.h>
20#include <linux/spinlock.h>
21#include <linux/poll.h>
22#include <linux/miscdevice.h>
23
24#include <linux/uaccess.h>
25
26#include <linux/vgaarb.h>
27
28static void vga_arbiter_notify_clients(void);
29/*
30 * We keep a list of all vga devices in the system to speed
31 * up the various operations of the arbiter
32 */
33struct vga_device {
34 struct list_head list;
35 struct pci_dev *pdev;
36 unsigned int decodes; /* what does it decodes */
37 unsigned int owns; /* what does it owns */
38 unsigned int locks; /* what does it locks */
39 unsigned int io_lock_cnt; /* legacy IO lock count */
40 unsigned int mem_lock_cnt; /* legacy MEM lock count */
41 unsigned int io_norm_cnt; /* normal IO count */
42 unsigned int mem_norm_cnt; /* normal MEM count */
43
44 /* allow IRQ enable/disable hook */
45 void *cookie;
46 void (*irq_set_state)(void *cookie, bool enable);
47 unsigned int (*set_vga_decode)(void *cookie, bool decode);
48};
49
50static LIST_HEAD(vga_list);
51static int vga_count, vga_decode_count;
52static bool vga_arbiter_used;
53static DEFINE_SPINLOCK(vga_lock);
54static DECLARE_WAIT_QUEUE_HEAD(vga_wait_queue);
55
56
57static const char *vga_iostate_to_str(unsigned int iostate)
58{
59 /* Ignore VGA_RSRC_IO and VGA_RSRC_MEM */
60 iostate &= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
61 switch (iostate) {
62 case VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM:
63 return "io+mem";
64 case VGA_RSRC_LEGACY_IO:
65 return "io";
66 case VGA_RSRC_LEGACY_MEM:
67 return "mem";
68 }
69 return "none";
70}
71
72static int vga_str_to_iostate(char *buf, int str_size, int *io_state)
73{
74 /* we could in theory hand out locks on IO and mem
75 * separately to userspace but it can cause deadlocks */
76 if (strncmp(buf, "none", 4) == 0) {
77 *io_state = VGA_RSRC_NONE;
78 return 1;
79 }
80
81 /* XXX We're not chekcing the str_size! */
82 if (strncmp(buf, "io+mem", 6) == 0)
83 goto both;
84 else if (strncmp(buf, "io", 2) == 0)
85 goto both;
86 else if (strncmp(buf, "mem", 3) == 0)
87 goto both;
88 return 0;
89both:
90 *io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
91 return 1;
92}
93
94#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
95/* this is only used a cookie - it should not be dereferenced */
96static struct pci_dev *vga_default;
97#endif
98
99static void vga_arb_device_card_gone(struct pci_dev *pdev);
100
101/* Find somebody in our list */
102static struct vga_device *vgadev_find(struct pci_dev *pdev)
103{
104 struct vga_device *vgadev;
105
106 list_for_each_entry(vgadev, &vga_list, list)
107 if (pdev == vgadev->pdev)
108 return vgadev;
109 return NULL;
110}
111
112/* Returns the default VGA device (vgacon's babe) */
113#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
114struct pci_dev *vga_default_device(void)
115{
116 return vga_default;
117}
118#endif
119
120static inline void vga_irq_set_state(struct vga_device *vgadev, bool state)
121{
122 if (vgadev->irq_set_state)
123 vgadev->irq_set_state(vgadev->cookie, state);
124}
125
126
127/* If we don't ever use VGA arb we should avoid
128 turning off anything anywhere due to old X servers getting
129 confused about the boot device not being VGA */
130static void vga_check_first_use(void)
131{
132 /* we should inform all GPUs in the system that
133 * VGA arb has occured and to try and disable resources
134 * if they can */
135 if (!vga_arbiter_used) {
136 vga_arbiter_used = true;
137 vga_arbiter_notify_clients();
138 }
139}
140
141static struct vga_device *__vga_tryget(struct vga_device *vgadev,
142 unsigned int rsrc)
143{
144 unsigned int wants, legacy_wants, match;
145 struct vga_device *conflict;
146 unsigned int pci_bits;
147 /* Account for "normal" resources to lock. If we decode the legacy,
148 * counterpart, we need to request it as well
149 */
150 if ((rsrc & VGA_RSRC_NORMAL_IO) &&
151 (vgadev->decodes & VGA_RSRC_LEGACY_IO))
152 rsrc |= VGA_RSRC_LEGACY_IO;
153 if ((rsrc & VGA_RSRC_NORMAL_MEM) &&
154 (vgadev->decodes & VGA_RSRC_LEGACY_MEM))
155 rsrc |= VGA_RSRC_LEGACY_MEM;
156
157 pr_devel("%s: %d\n", __func__, rsrc);
158 pr_devel("%s: owns: %d\n", __func__, vgadev->owns);
159
160 /* Check what resources we need to acquire */
161 wants = rsrc & ~vgadev->owns;
162
163 /* We already own everything, just mark locked & bye bye */
164 if (wants == 0)
165 goto lock_them;
166
167 /* We don't need to request a legacy resource, we just enable
168 * appropriate decoding and go
169 */
170 legacy_wants = wants & VGA_RSRC_LEGACY_MASK;
171 if (legacy_wants == 0)
172 goto enable_them;
173
174 /* Ok, we don't, let's find out how we need to kick off */
175 list_for_each_entry(conflict, &vga_list, list) {
176 unsigned int lwants = legacy_wants;
177 unsigned int change_bridge = 0;
178
179 /* Don't conflict with myself */
180 if (vgadev == conflict)
181 continue;
182
183 /* Check if the architecture allows a conflict between those
184 * 2 devices or if they are on separate domains
185 */
186 if (!vga_conflicts(vgadev->pdev, conflict->pdev))
187 continue;
188
189 /* We have a possible conflict. before we go further, we must
190 * check if we sit on the same bus as the conflicting device.
191 * if we don't, then we must tie both IO and MEM resources
192 * together since there is only a single bit controlling
193 * VGA forwarding on P2P bridges
194 */
195 if (vgadev->pdev->bus != conflict->pdev->bus) {
196 change_bridge = 1;
197 lwants = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
198 }
199
200 /* Check if the guy has a lock on the resource. If he does,
201 * return the conflicting entry
202 */
203 if (conflict->locks & lwants)
204 return conflict;
205
206 /* Ok, now check if he owns the resource we want. We don't need
207 * to check "decodes" since it should be impossible to own
208 * own legacy resources you don't decode unless I have a bug
209 * in this code...
210 */
211 WARN_ON(conflict->owns & ~conflict->decodes);
212 match = lwants & conflict->owns;
213 if (!match)
214 continue;
215
216 /* looks like he doesn't have a lock, we can steal
217 * them from him
218 */
219 vga_irq_set_state(conflict, false);
220
221 pci_bits = 0;
222 if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
223 pci_bits |= PCI_COMMAND_MEMORY;
224 if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
225 pci_bits |= PCI_COMMAND_IO;
226
227 pci_set_vga_state(conflict->pdev, false, pci_bits,
228 change_bridge);
229 conflict->owns &= ~lwants;
230 /* If he also owned non-legacy, that is no longer the case */
231 if (lwants & VGA_RSRC_LEGACY_MEM)
232 conflict->owns &= ~VGA_RSRC_NORMAL_MEM;
233 if (lwants & VGA_RSRC_LEGACY_IO)
234 conflict->owns &= ~VGA_RSRC_NORMAL_IO;
235 }
236
237enable_them:
238 /* ok dude, we got it, everybody conflicting has been disabled, let's
239 * enable us. Make sure we don't mark a bit in "owns" that we don't
240 * also have in "decodes". We can lock resources we don't decode but
241 * not own them.
242 */
243 pci_bits = 0;
244 if (wants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
245 pci_bits |= PCI_COMMAND_MEMORY;
246 if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
247 pci_bits |= PCI_COMMAND_IO;
248 pci_set_vga_state(vgadev->pdev, true, pci_bits, !!(wants & VGA_RSRC_LEGACY_MASK));
249
250 vga_irq_set_state(vgadev, true);
251 vgadev->owns |= (wants & vgadev->decodes);
252lock_them:
253 vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK);
254 if (rsrc & VGA_RSRC_LEGACY_IO)
255 vgadev->io_lock_cnt++;
256 if (rsrc & VGA_RSRC_LEGACY_MEM)
257 vgadev->mem_lock_cnt++;
258 if (rsrc & VGA_RSRC_NORMAL_IO)
259 vgadev->io_norm_cnt++;
260 if (rsrc & VGA_RSRC_NORMAL_MEM)
261 vgadev->mem_norm_cnt++;
262
263 return NULL;
264}
265
266static void __vga_put(struct vga_device *vgadev, unsigned int rsrc)
267{
268 unsigned int old_locks = vgadev->locks;
269
270 pr_devel("%s\n", __func__);
271
272 /* Update our counters, and account for equivalent legacy resources
273 * if we decode them
274 */
275 if ((rsrc & VGA_RSRC_NORMAL_IO) && vgadev->io_norm_cnt > 0) {
276 vgadev->io_norm_cnt--;
277 if (vgadev->decodes & VGA_RSRC_LEGACY_IO)
278 rsrc |= VGA_RSRC_LEGACY_IO;
279 }
280 if ((rsrc & VGA_RSRC_NORMAL_MEM) && vgadev->mem_norm_cnt > 0) {
281 vgadev->mem_norm_cnt--;
282 if (vgadev->decodes & VGA_RSRC_LEGACY_MEM)
283 rsrc |= VGA_RSRC_LEGACY_MEM;
284 }
285 if ((rsrc & VGA_RSRC_LEGACY_IO) && vgadev->io_lock_cnt > 0)
286 vgadev->io_lock_cnt--;
287 if ((rsrc & VGA_RSRC_LEGACY_MEM) && vgadev->mem_lock_cnt > 0)
288 vgadev->mem_lock_cnt--;
289
290 /* Just clear lock bits, we do lazy operations so we don't really
291 * have to bother about anything else at this point
292 */
293 if (vgadev->io_lock_cnt == 0)
294 vgadev->locks &= ~VGA_RSRC_LEGACY_IO;
295 if (vgadev->mem_lock_cnt == 0)
296 vgadev->locks &= ~VGA_RSRC_LEGACY_MEM;
297
298 /* Kick the wait queue in case somebody was waiting if we actually
299 * released something
300 */
301 if (old_locks != vgadev->locks)
302 wake_up_all(&vga_wait_queue);
303}
304
305int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
306{
307 struct vga_device *vgadev, *conflict;
308 unsigned long flags;
309 wait_queue_t wait;
310 int rc = 0;
311
312 vga_check_first_use();
313 /* The one who calls us should check for this, but lets be sure... */
314 if (pdev == NULL)
315 pdev = vga_default_device();
316 if (pdev == NULL)
317 return 0;
318
319 for (;;) {
320 spin_lock_irqsave(&vga_lock, flags);
321 vgadev = vgadev_find(pdev);
322 if (vgadev == NULL) {
323 spin_unlock_irqrestore(&vga_lock, flags);
324 rc = -ENODEV;
325 break;
326 }
327 conflict = __vga_tryget(vgadev, rsrc);
328 spin_unlock_irqrestore(&vga_lock, flags);
329 if (conflict == NULL)
330 break;
331
332
333 /* We have a conflict, we wait until somebody kicks the
334 * work queue. Currently we have one work queue that we
335 * kick each time some resources are released, but it would
336 * be fairly easy to have a per device one so that we only
337 * need to attach to the conflicting device
338 */
339 init_waitqueue_entry(&wait, current);
340 add_wait_queue(&vga_wait_queue, &wait);
341 set_current_state(interruptible ?
342 TASK_INTERRUPTIBLE :
343 TASK_UNINTERRUPTIBLE);
344 if (signal_pending(current)) {
345 rc = -EINTR;
346 break;
347 }
348 schedule();
349 remove_wait_queue(&vga_wait_queue, &wait);
350 set_current_state(TASK_RUNNING);
351 }
352 return rc;
353}
354EXPORT_SYMBOL(vga_get);
355
356int vga_tryget(struct pci_dev *pdev, unsigned int rsrc)
357{
358 struct vga_device *vgadev;
359 unsigned long flags;
360 int rc = 0;
361
362 vga_check_first_use();
363
364 /* The one who calls us should check for this, but lets be sure... */
365 if (pdev == NULL)
366 pdev = vga_default_device();
367 if (pdev == NULL)
368 return 0;
369 spin_lock_irqsave(&vga_lock, flags);
370 vgadev = vgadev_find(pdev);
371 if (vgadev == NULL) {
372 rc = -ENODEV;
373 goto bail;
374 }
375 if (__vga_tryget(vgadev, rsrc))
376 rc = -EBUSY;
377bail:
378 spin_unlock_irqrestore(&vga_lock, flags);
379 return rc;
380}
381EXPORT_SYMBOL(vga_tryget);
382
383void vga_put(struct pci_dev *pdev, unsigned int rsrc)
384{
385 struct vga_device *vgadev;
386 unsigned long flags;
387
388 /* The one who calls us should check for this, but lets be sure... */
389 if (pdev == NULL)
390 pdev = vga_default_device();
391 if (pdev == NULL)
392 return;
393 spin_lock_irqsave(&vga_lock, flags);
394 vgadev = vgadev_find(pdev);
395 if (vgadev == NULL)
396 goto bail;
397 __vga_put(vgadev, rsrc);
398bail:
399 spin_unlock_irqrestore(&vga_lock, flags);
400}
401EXPORT_SYMBOL(vga_put);
402
403/*
404 * Currently, we assume that the "initial" setup of the system is
405 * not sane, that is we come up with conflicting devices and let
406 * the arbiter's client decides if devices decodes or not legacy
407 * things.
408 */
409static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
410{
411 struct vga_device *vgadev;
412 unsigned long flags;
413 struct pci_bus *bus;
414 struct pci_dev *bridge;
415 u16 cmd;
416
417 /* Only deal with VGA class devices */
418 if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
419 return false;
420
421 /* Allocate structure */
422 vgadev = kmalloc(sizeof(struct vga_device), GFP_KERNEL);
423 if (vgadev == NULL) {
424 pr_err("vgaarb: failed to allocate pci device\n");
425 /* What to do on allocation failure ? For now, let's
426 * just do nothing, I'm not sure there is anything saner
427 * to be done
428 */
429 return false;
430 }
431
432 memset(vgadev, 0, sizeof(*vgadev));
433
434 /* Take lock & check for duplicates */
435 spin_lock_irqsave(&vga_lock, flags);
436 if (vgadev_find(pdev) != NULL) {
437 BUG_ON(1);
438 goto fail;
439 }
440 vgadev->pdev = pdev;
441
442 /* By default, assume we decode everything */
443 vgadev->decodes = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
444 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
445
446 /* by default mark it as decoding */
447 vga_decode_count++;
448 /* Mark that we "own" resources based on our enables, we will
449 * clear that below if the bridge isn't forwarding
450 */
451 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
452 if (cmd & PCI_COMMAND_IO)
453 vgadev->owns |= VGA_RSRC_LEGACY_IO;
454 if (cmd & PCI_COMMAND_MEMORY)
455 vgadev->owns |= VGA_RSRC_LEGACY_MEM;
456
457 /* Check if VGA cycles can get down to us */
458 bus = pdev->bus;
459 while (bus) {
460 bridge = bus->self;
461 if (bridge) {
462 u16 l;
463 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
464 &l);
465 if (!(l & PCI_BRIDGE_CTL_VGA)) {
466 vgadev->owns = 0;
467 break;
468 }
469 }
470 bus = bus->parent;
471 }
472
473 /* Deal with VGA default device. Use first enabled one
474 * by default if arch doesn't have it's own hook
475 */
476#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
477 if (vga_default == NULL &&
478 ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK))
479 vga_default = pci_dev_get(pdev);
480#endif
481
482 /* Add to the list */
483 list_add(&vgadev->list, &vga_list);
484 vga_count++;
485 pr_info("vgaarb: device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n",
486 pci_name(pdev),
487 vga_iostate_to_str(vgadev->decodes),
488 vga_iostate_to_str(vgadev->owns),
489 vga_iostate_to_str(vgadev->locks));
490
491 spin_unlock_irqrestore(&vga_lock, flags);
492 return true;
493fail:
494 spin_unlock_irqrestore(&vga_lock, flags);
495 kfree(vgadev);
496 return false;
497}
498
499static bool vga_arbiter_del_pci_device(struct pci_dev *pdev)
500{
501 struct vga_device *vgadev;
502 unsigned long flags;
503 bool ret = true;
504
505 spin_lock_irqsave(&vga_lock, flags);
506 vgadev = vgadev_find(pdev);
507 if (vgadev == NULL) {
508 ret = false;
509 goto bail;
510 }
511
512 if (vga_default == pdev) {
513 pci_dev_put(vga_default);
514 vga_default = NULL;
515 }
516
517 if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
518 vga_decode_count--;
519
520 /* Remove entry from list */
521 list_del(&vgadev->list);
522 vga_count--;
523 /* Notify userland driver that the device is gone so it discards
524 * it's copies of the pci_dev pointer
525 */
526 vga_arb_device_card_gone(pdev);
527
528 /* Wake up all possible waiters */
529 wake_up_all(&vga_wait_queue);
530bail:
531 spin_unlock_irqrestore(&vga_lock, flags);
532 kfree(vgadev);
533 return ret;
534}
535
536/* this is called with the lock */
537static inline void vga_update_device_decodes(struct vga_device *vgadev,
538 int new_decodes)
539{
540 int old_decodes;
541 struct vga_device *new_vgadev, *conflict;
542
543 old_decodes = vgadev->decodes;
544 vgadev->decodes = new_decodes;
545
546 pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
547 pci_name(vgadev->pdev),
548 vga_iostate_to_str(old_decodes),
549 vga_iostate_to_str(vgadev->decodes),
550 vga_iostate_to_str(vgadev->owns));
551
552
553 /* if we own the decodes we should move them along to
554 another card */
555 if ((vgadev->owns & old_decodes) && (vga_count > 1)) {
556 /* set us to own nothing */
557 vgadev->owns &= ~old_decodes;
558 list_for_each_entry(new_vgadev, &vga_list, list) {
559 if ((new_vgadev != vgadev) &&
560 (new_vgadev->decodes & VGA_RSRC_LEGACY_MASK)) {
561 pr_info("vgaarb: transferring owner from PCI:%s to PCI:%s\n", pci_name(vgadev->pdev), pci_name(new_vgadev->pdev));
562 conflict = __vga_tryget(new_vgadev, VGA_RSRC_LEGACY_MASK);
563 if (!conflict)
564 __vga_put(new_vgadev, VGA_RSRC_LEGACY_MASK);
565 break;
566 }
567 }
568 }
569
570 /* change decodes counter */
571 if (old_decodes != new_decodes) {
572 if (new_decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
573 vga_decode_count++;
574 else
575 vga_decode_count--;
576 }
577}
578
579void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
580{
581 struct vga_device *vgadev;
582 unsigned long flags;
583
584 decodes &= VGA_RSRC_LEGACY_MASK;
585
586 spin_lock_irqsave(&vga_lock, flags);
587 vgadev = vgadev_find(pdev);
588 if (vgadev == NULL)
589 goto bail;
590
591 /* don't let userspace futz with kernel driver decodes */
592 if (userspace && vgadev->set_vga_decode)
593 goto bail;
594
595 /* update the device decodes + counter */
596 vga_update_device_decodes(vgadev, decodes);
597
598 /* XXX if somebody is going from "doesn't decode" to "decodes" state
599 * here, additional care must be taken as we may have pending owner
600 * ship of non-legacy region ...
601 */
602bail:
603 spin_unlock_irqrestore(&vga_lock, flags);
604}
605
606void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes)
607{
608 __vga_set_legacy_decoding(pdev, decodes, false);
609}
610EXPORT_SYMBOL(vga_set_legacy_decoding);
611
612/* call with NULL to unregister */
613int vga_client_register(struct pci_dev *pdev, void *cookie,
614 void (*irq_set_state)(void *cookie, bool state),
615 unsigned int (*set_vga_decode)(void *cookie, bool decode))
616{
617 int ret = -1;
618 struct vga_device *vgadev;
619 unsigned long flags;
620
621 spin_lock_irqsave(&vga_lock, flags);
622 vgadev = vgadev_find(pdev);
623 if (!vgadev)
624 goto bail;
625
626 vgadev->irq_set_state = irq_set_state;
627 vgadev->set_vga_decode = set_vga_decode;
628 vgadev->cookie = cookie;
629 ret = 0;
630
631bail:
632 spin_unlock_irqrestore(&vga_lock, flags);
633 return ret;
634
635}
636EXPORT_SYMBOL(vga_client_register);
637
638/*
639 * Char driver implementation
640 *
641 * Semantics is:
642 *
643 * open : open user instance of the arbitrer. by default, it's
644 * attached to the default VGA device of the system.
645 *
646 * close : close user instance, release locks
647 *
648 * read : return a string indicating the status of the target.
649 * an IO state string is of the form {io,mem,io+mem,none},
650 * mc and ic are respectively mem and io lock counts (for
651 * debugging/diagnostic only). "decodes" indicate what the
652 * card currently decodes, "owns" indicates what is currently
653 * enabled on it, and "locks" indicates what is locked by this
654 * card. If the card is unplugged, we get "invalid" then for
655 * card_ID and an -ENODEV error is returned for any command
656 * until a new card is targeted
657 *
658 * "<card_ID>,decodes=<io_state>,owns=<io_state>,locks=<io_state> (ic,mc)"
659 *
660 * write : write a command to the arbiter. List of commands is:
661 *
662 * target <card_ID> : switch target to card <card_ID> (see below)
663 * lock <io_state> : acquires locks on target ("none" is invalid io_state)
664 * trylock <io_state> : non-blocking acquire locks on target
665 * unlock <io_state> : release locks on target
666 * unlock all : release all locks on target held by this user
667 * decodes <io_state> : set the legacy decoding attributes for the card
668 *
669 * poll : event if something change on any card (not just the target)
670 *
671 * card_ID is of the form "PCI:domain:bus:dev.fn". It can be set to "default"
672 * to go back to the system default card (TODO: not implemented yet).
673 * Currently, only PCI is supported as a prefix, but the userland API may
674 * support other bus types in the future, even if the current kernel
675 * implementation doesn't.
676 *
677 * Note about locks:
678 *
679 * The driver keeps track of which user has what locks on which card. It
680 * supports stacking, like the kernel one. This complexifies the implementation
681 * a bit, but makes the arbiter more tolerant to userspace problems and able
682 * to properly cleanup in all cases when a process dies.
683 * Currently, a max of 16 cards simultaneously can have locks issued from
684 * userspace for a given user (file descriptor instance) of the arbiter.
685 *
686 * If the device is hot-unplugged, there is a hook inside the module to notify
687 * they being added/removed in the system and automatically added/removed in
688 * the arbiter.
689 */
690
691#define MAX_USER_CARDS 16
692#define PCI_INVALID_CARD ((struct pci_dev *)-1UL)
693
694/*
695 * Each user has an array of these, tracking which cards have locks
696 */
697struct vga_arb_user_card {
698 struct pci_dev *pdev;
699 unsigned int mem_cnt;
700 unsigned int io_cnt;
701};
702
703struct vga_arb_private {
704 struct list_head list;
705 struct pci_dev *target;
706 struct vga_arb_user_card cards[MAX_USER_CARDS];
707 spinlock_t lock;
708};
709
710static LIST_HEAD(vga_user_list);
711static DEFINE_SPINLOCK(vga_user_lock);
712
713
714/*
715 * This function gets a string in the format: "PCI:domain:bus:dev.fn" and
716 * returns the respective values. If the string is not in this format,
717 * it returns 0.
718 */
719static int vga_pci_str_to_vars(char *buf, int count, unsigned int *domain,
720 unsigned int *bus, unsigned int *devfn)
721{
722 int n;
723 unsigned int slot, func;
724
725
726 n = sscanf(buf, "PCI:%x:%x:%x.%x", domain, bus, &slot, &func);
727 if (n != 4)
728 return 0;
729
730 *devfn = PCI_DEVFN(slot, func);
731
732 return 1;
733}
734
735static ssize_t vga_arb_read(struct file *file, char __user * buf,
736 size_t count, loff_t *ppos)
737{
738 struct vga_arb_private *priv = file->private_data;
739 struct vga_device *vgadev;
740 struct pci_dev *pdev;
741 unsigned long flags;
742 size_t len;
743 int rc;
744 char *lbuf;
745
746 lbuf = kmalloc(1024, GFP_KERNEL);
747 if (lbuf == NULL)
748 return -ENOMEM;
749
750 /* Shields against vga_arb_device_card_gone (pci_dev going
751 * away), and allows access to vga list
752 */
753 spin_lock_irqsave(&vga_lock, flags);
754
755 /* If we are targetting the default, use it */
756 pdev = priv->target;
757 if (pdev == NULL || pdev == PCI_INVALID_CARD) {
758 spin_unlock_irqrestore(&vga_lock, flags);
759 len = sprintf(lbuf, "invalid");
760 goto done;
761 }
762
763 /* Find card vgadev structure */
764 vgadev = vgadev_find(pdev);
765 if (vgadev == NULL) {
766 /* Wow, it's not in the list, that shouldn't happen,
767 * let's fix us up and return invalid card
768 */
769 if (pdev == priv->target)
770 vga_arb_device_card_gone(pdev);
771 spin_unlock_irqrestore(&vga_lock, flags);
772 len = sprintf(lbuf, "invalid");
773 goto done;
774 }
775
776 /* Fill the buffer with infos */
777 len = snprintf(lbuf, 1024,
778 "count:%d,PCI:%s,decodes=%s,owns=%s,locks=%s(%d:%d)\n",
779 vga_decode_count, pci_name(pdev),
780 vga_iostate_to_str(vgadev->decodes),
781 vga_iostate_to_str(vgadev->owns),
782 vga_iostate_to_str(vgadev->locks),
783 vgadev->io_lock_cnt, vgadev->mem_lock_cnt);
784
785 spin_unlock_irqrestore(&vga_lock, flags);
786done:
787
788 /* Copy that to user */
789 if (len > count)
790 len = count;
791 rc = copy_to_user(buf, lbuf, len);
792 kfree(lbuf);
793 if (rc)
794 return -EFAULT;
795 return len;
796}
797
798/*
799 * TODO: To avoid parsing inside kernel and to improve the speed we may
800 * consider use ioctl here
801 */
802static ssize_t vga_arb_write(struct file *file, const char __user * buf,
803 size_t count, loff_t *ppos)
804{
805 struct vga_arb_private *priv = file->private_data;
806 struct vga_arb_user_card *uc = NULL;
807 struct pci_dev *pdev;
808
809 unsigned int io_state;
810
811 char *kbuf, *curr_pos;
812 size_t remaining = count;
813
814 int ret_val;
815 int i;
816
817
818 kbuf = kmalloc(count + 1, GFP_KERNEL);
819 if (!kbuf)
820 return -ENOMEM;
821
822 if (copy_from_user(kbuf, buf, count)) {
823 kfree(kbuf);
824 return -EFAULT;
825 }
826 curr_pos = kbuf;
827 kbuf[count] = '\0'; /* Just to make sure... */
828
829 if (strncmp(curr_pos, "lock ", 5) == 0) {
830 curr_pos += 5;
831 remaining -= 5;
832
833 pr_devel("client 0x%p called 'lock'\n", priv);
834
835 if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
836 ret_val = -EPROTO;
837 goto done;
838 }
839 if (io_state == VGA_RSRC_NONE) {
840 ret_val = -EPROTO;
841 goto done;
842 }
843
844 pdev = priv->target;
845 if (priv->target == NULL) {
846 ret_val = -ENODEV;
847 goto done;
848 }
849
850 vga_get_uninterruptible(pdev, io_state);
851
852 /* Update the client's locks lists... */
853 for (i = 0; i < MAX_USER_CARDS; i++) {
854 if (priv->cards[i].pdev == pdev) {
855 if (io_state & VGA_RSRC_LEGACY_IO)
856 priv->cards[i].io_cnt++;
857 if (io_state & VGA_RSRC_LEGACY_MEM)
858 priv->cards[i].mem_cnt++;
859 break;
860 }
861 }
862
863 ret_val = count;
864 goto done;
865 } else if (strncmp(curr_pos, "unlock ", 7) == 0) {
866 curr_pos += 7;
867 remaining -= 7;
868
869 pr_devel("client 0x%p called 'unlock'\n", priv);
870
871 if (strncmp(curr_pos, "all", 3) == 0)
872 io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
873 else {
874 if (!vga_str_to_iostate
875 (curr_pos, remaining, &io_state)) {
876 ret_val = -EPROTO;
877 goto done;
878 }
879 /* TODO: Add this?
880 if (io_state == VGA_RSRC_NONE) {
881 ret_val = -EPROTO;
882 goto done;
883 }
884 */
885 }
886
887 pdev = priv->target;
888 if (priv->target == NULL) {
889 ret_val = -ENODEV;
890 goto done;
891 }
892 for (i = 0; i < MAX_USER_CARDS; i++) {
893 if (priv->cards[i].pdev == pdev)
894 uc = &priv->cards[i];
895 }
896
897 if (!uc)
898 return -EINVAL;
899
900 if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
901 return -EINVAL;
902
903 if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
904 return -EINVAL;
905
906 vga_put(pdev, io_state);
907
908 if (io_state & VGA_RSRC_LEGACY_IO)
909 uc->io_cnt--;
910 if (io_state & VGA_RSRC_LEGACY_MEM)
911 uc->mem_cnt--;
912
913 ret_val = count;
914 goto done;
915 } else if (strncmp(curr_pos, "trylock ", 8) == 0) {
916 curr_pos += 8;
917 remaining -= 8;
918
919 pr_devel("client 0x%p called 'trylock'\n", priv);
920
921 if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
922 ret_val = -EPROTO;
923 goto done;
924 }
925 /* TODO: Add this?
926 if (io_state == VGA_RSRC_NONE) {
927 ret_val = -EPROTO;
928 goto done;
929 }
930 */
931
932 pdev = priv->target;
933 if (priv->target == NULL) {
934 ret_val = -ENODEV;
935 goto done;
936 }
937
938 if (vga_tryget(pdev, io_state)) {
939 /* Update the client's locks lists... */
940 for (i = 0; i < MAX_USER_CARDS; i++) {
941 if (priv->cards[i].pdev == pdev) {
942 if (io_state & VGA_RSRC_LEGACY_IO)
943 priv->cards[i].io_cnt++;
944 if (io_state & VGA_RSRC_LEGACY_MEM)
945 priv->cards[i].mem_cnt++;
946 break;
947 }
948 }
949 ret_val = count;
950 goto done;
951 } else {
952 ret_val = -EBUSY;
953 goto done;
954 }
955
956 } else if (strncmp(curr_pos, "target ", 7) == 0) {
957 unsigned int domain, bus, devfn;
958 struct vga_device *vgadev;
959
960 curr_pos += 7;
961 remaining -= 7;
962 pr_devel("client 0x%p called 'target'\n", priv);
963 /* if target is default */
964 if (!strncmp(buf, "default", 7))
965 pdev = pci_dev_get(vga_default_device());
966 else {
967 if (!vga_pci_str_to_vars(curr_pos, remaining,
968 &domain, &bus, &devfn)) {
969 ret_val = -EPROTO;
970 goto done;
971 }
972
973 pdev = pci_get_bus_and_slot(bus, devfn);
974 if (!pdev) {
975 pr_info("vgaarb: invalid PCI address!\n");
976 ret_val = -ENODEV;
977 goto done;
978 }
979 }
980
981 vgadev = vgadev_find(pdev);
982 if (vgadev == NULL) {
983 pr_info("vgaarb: this pci device is not a vga device\n");
984 pci_dev_put(pdev);
985 ret_val = -ENODEV;
986 goto done;
987 }
988
989 priv->target = pdev;
990 for (i = 0; i < MAX_USER_CARDS; i++) {
991 if (priv->cards[i].pdev == pdev)
992 break;
993 if (priv->cards[i].pdev == NULL) {
994 priv->cards[i].pdev = pdev;
995 priv->cards[i].io_cnt = 0;
996 priv->cards[i].mem_cnt = 0;
997 break;
998 }
999 }
1000 if (i == MAX_USER_CARDS) {
1001 pr_err("vgaarb: maximum user cards number reached!\n");
1002 pci_dev_put(pdev);
1003 /* XXX: which value to return? */
1004 ret_val = -ENOMEM;
1005 goto done;
1006 }
1007
1008 ret_val = count;
1009 pci_dev_put(pdev);
1010 goto done;
1011
1012
1013 } else if (strncmp(curr_pos, "decodes ", 8) == 0) {
1014 curr_pos += 8;
1015 remaining -= 8;
1016 pr_devel("vgaarb: client 0x%p called 'decodes'\n", priv);
1017
1018 if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
1019 ret_val = -EPROTO;
1020 goto done;
1021 }
1022 pdev = priv->target;
1023 if (priv->target == NULL) {
1024 ret_val = -ENODEV;
1025 goto done;
1026 }
1027
1028 __vga_set_legacy_decoding(pdev, io_state, true);
1029 ret_val = count;
1030 goto done;
1031 }
1032 /* If we got here, the message written is not part of the protocol! */
1033 kfree(kbuf);
1034 return -EPROTO;
1035
1036done:
1037 kfree(kbuf);
1038 return ret_val;
1039}
1040
1041static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait)
1042{
1043 struct vga_arb_private *priv = file->private_data;
1044
1045 pr_devel("%s\n", __func__);
1046
1047 if (priv == NULL)
1048 return -ENODEV;
1049 poll_wait(file, &vga_wait_queue, wait);
1050 return POLLIN;
1051}
1052
1053static int vga_arb_open(struct inode *inode, struct file *file)
1054{
1055 struct vga_arb_private *priv;
1056 unsigned long flags;
1057
1058 pr_devel("%s\n", __func__);
1059
1060 priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL);
1061 if (priv == NULL)
1062 return -ENOMEM;
1063 memset(priv, 0, sizeof(*priv));
1064 spin_lock_init(&priv->lock);
1065 file->private_data = priv;
1066
1067 spin_lock_irqsave(&vga_user_lock, flags);
1068 list_add(&priv->list, &vga_user_list);
1069 spin_unlock_irqrestore(&vga_user_lock, flags);
1070
1071 /* Set the client' lists of locks */
1072 priv->target = vga_default_device(); /* Maybe this is still null! */
1073 priv->cards[0].pdev = priv->target;
1074 priv->cards[0].io_cnt = 0;
1075 priv->cards[0].mem_cnt = 0;
1076
1077
1078 return 0;
1079}
1080
1081static int vga_arb_release(struct inode *inode, struct file *file)
1082{
1083 struct vga_arb_private *priv = file->private_data;
1084 struct vga_arb_user_card *uc;
1085 unsigned long flags;
1086 int i;
1087
1088 pr_devel("%s\n", __func__);
1089
1090 if (priv == NULL)
1091 return -ENODEV;
1092
1093 spin_lock_irqsave(&vga_user_lock, flags);
1094 list_del(&priv->list);
1095 for (i = 0; i < MAX_USER_CARDS; i++) {
1096 uc = &priv->cards[i];
1097 if (uc->pdev == NULL)
1098 continue;
1099 pr_devel("uc->io_cnt == %d, uc->mem_cnt == %d\n",
1100 uc->io_cnt, uc->mem_cnt);
1101 while (uc->io_cnt--)
1102 vga_put(uc->pdev, VGA_RSRC_LEGACY_IO);
1103 while (uc->mem_cnt--)
1104 vga_put(uc->pdev, VGA_RSRC_LEGACY_MEM);
1105 }
1106 spin_unlock_irqrestore(&vga_user_lock, flags);
1107
1108 kfree(priv);
1109
1110 return 0;
1111}
1112
1113static void vga_arb_device_card_gone(struct pci_dev *pdev)
1114{
1115}
1116
1117/*
1118 * callback any registered clients to let them know we have a
1119 * change in VGA cards
1120 */
1121static void vga_arbiter_notify_clients(void)
1122{
1123 struct vga_device *vgadev;
1124 unsigned long flags;
1125 uint32_t new_decodes;
1126 bool new_state;
1127
1128 if (!vga_arbiter_used)
1129 return;
1130
1131 spin_lock_irqsave(&vga_lock, flags);
1132 list_for_each_entry(vgadev, &vga_list, list) {
1133 if (vga_count > 1)
1134 new_state = false;
1135 else
1136 new_state = true;
1137 if (vgadev->set_vga_decode) {
1138 new_decodes = vgadev->set_vga_decode(vgadev->cookie, new_state);
1139 vga_update_device_decodes(vgadev, new_decodes);
1140 }
1141 }
1142 spin_unlock_irqrestore(&vga_lock, flags);
1143}
1144
1145static int pci_notify(struct notifier_block *nb, unsigned long action,
1146 void *data)
1147{
1148 struct device *dev = data;
1149 struct pci_dev *pdev = to_pci_dev(dev);
1150 bool notify = false;
1151
1152 pr_devel("%s\n", __func__);
1153
1154 /* For now we're only intereted in devices added and removed. I didn't
1155 * test this thing here, so someone needs to double check for the
1156 * cases of hotplugable vga cards. */
1157 if (action == BUS_NOTIFY_ADD_DEVICE)
1158 notify = vga_arbiter_add_pci_device(pdev);
1159 else if (action == BUS_NOTIFY_DEL_DEVICE)
1160 notify = vga_arbiter_del_pci_device(pdev);
1161
1162 if (notify)
1163 vga_arbiter_notify_clients();
1164 return 0;
1165}
1166
1167static struct notifier_block pci_notifier = {
1168 .notifier_call = pci_notify,
1169};
1170
1171static const struct file_operations vga_arb_device_fops = {
1172 .read = vga_arb_read,
1173 .write = vga_arb_write,
1174 .poll = vga_arb_fpoll,
1175 .open = vga_arb_open,
1176 .release = vga_arb_release,
1177};
1178
1179static struct miscdevice vga_arb_device = {
1180 MISC_DYNAMIC_MINOR, "vga_arbiter", &vga_arb_device_fops
1181};
1182
1183static int __init vga_arb_device_init(void)
1184{
1185 int rc;
1186 struct pci_dev *pdev;
1187
1188 rc = misc_register(&vga_arb_device);
1189 if (rc < 0)
1190 pr_err("vgaarb: error %d registering device\n", rc);
1191
1192 bus_register_notifier(&pci_bus_type, &pci_notifier);
1193
1194 /* We add all pci devices satisfying vga class in the arbiter by
1195 * default */
1196 pdev = NULL;
1197 while ((pdev =
1198 pci_get_subsys(PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
1199 PCI_ANY_ID, pdev)) != NULL)
1200 vga_arbiter_add_pci_device(pdev);
1201
1202 pr_info("vgaarb: loaded\n");
1203 return rc;
1204}
1205subsys_initcall(vga_arb_device_init);