aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-24 13:30:41 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-24 13:30:41 -0400
commit94e0fb086fc5663c38bbc0fe86d698be8314f82f (patch)
tree1c3be6c71ec3511aa2a4eb6dfa25f35677464ebb
parentb7f21bb2e23b4fec16b448a34889f467465be659 (diff)
parentc715089f49844260f1eeae8e3b55af9468ba1325 (diff)
Merge branch 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel
* 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel: (57 commits) drm/i915: Handle ERESTARTSYS during page fault drm/i915: Warn before mmaping a purgeable buffer. drm/i915: Track purged state. drm/i915: Remove eviction debug spam drm/i915: Immediately discard any backing storage for uneeded objects drm/i915: Do not mis-classify clean objects as purgeable drm/i915: Whitespace correction for madv drm/i915: BUG_ON page refleak during unbind drm/i915: Search harder for a reusable object drm/i915: Clean up evict from list. drm/i915: Add tracepoints drm/i915: framebuffer compression for GM45+ drm/i915: split display functions by chip type drm/i915: Skip the sanity checks if the current relocation is valid drm/i915: Check that the relocation points to within the target drm/i915: correct FBC update when pipe base update occurs drm/i915: blacklist Acer AspireOne lid status ACPI: make ACPI button funcs no-ops if not built in drm/i915: prevent FIFO calculation overflows on 32 bits with high dotclocks drm/i915: intel_display.c handle latency variable efficiently ... Fix up trivial conflicts in drivers/gpu/drm/i915/{i915_dma.c|i915_drv.h}
-rw-r--r--arch/x86/mm/pageattr.c1
-rw-r--r--drivers/acpi/button.c45
-rw-r--r--drivers/char/agp/intel-agp.c37
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/drm_gem.c13
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c6
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c194
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c133
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h77
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c876
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c92
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c22
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h34
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c170
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h315
-rw-r--r--drivers/gpu/drm/i915/i915_trace_points.c11
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c9
-rw-r--r--drivers/gpu/drm/i915/intel_display.c616
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h5
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c63
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c502
-rw-r--r--include/acpi/button.h25
-rw-r--r--include/drm/drm_pciids.h1
-rw-r--r--include/drm/i915_drm.h19
26 files changed, 2811 insertions, 460 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 24952fdc7e40..dd38bfbefd1f 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -144,6 +144,7 @@ void clflush_cache_range(void *vaddr, unsigned int size)
144 144
145 mb(); 145 mb();
146} 146}
147EXPORT_SYMBOL_GPL(clflush_cache_range);
147 148
148static void __cpa_flush_all(void *arg) 149static void __cpa_flush_all(void *arg)
149{ 150{
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index d295bdccc09c..9335b87c5174 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -115,6 +115,9 @@ static const struct file_operations acpi_button_state_fops = {
115 .release = single_release, 115 .release = single_release,
116}; 116};
117 117
118static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
119static struct acpi_device *lid_device;
120
118/* -------------------------------------------------------------------------- 121/* --------------------------------------------------------------------------
119 FS Interface (/proc) 122 FS Interface (/proc)
120 -------------------------------------------------------------------------- */ 123 -------------------------------------------------------------------------- */
@@ -231,11 +234,38 @@ static int acpi_button_remove_fs(struct acpi_device *device)
231/* -------------------------------------------------------------------------- 234/* --------------------------------------------------------------------------
232 Driver Interface 235 Driver Interface
233 -------------------------------------------------------------------------- */ 236 -------------------------------------------------------------------------- */
237int acpi_lid_notifier_register(struct notifier_block *nb)
238{
239 return blocking_notifier_chain_register(&acpi_lid_notifier, nb);
240}
241EXPORT_SYMBOL(acpi_lid_notifier_register);
242
243int acpi_lid_notifier_unregister(struct notifier_block *nb)
244{
245 return blocking_notifier_chain_unregister(&acpi_lid_notifier, nb);
246}
247EXPORT_SYMBOL(acpi_lid_notifier_unregister);
248
249int acpi_lid_open(void)
250{
251 acpi_status status;
252 unsigned long long state;
253
254 status = acpi_evaluate_integer(lid_device->handle, "_LID", NULL,
255 &state);
256 if (ACPI_FAILURE(status))
257 return -ENODEV;
258
259 return !!state;
260}
261EXPORT_SYMBOL(acpi_lid_open);
262
234static int acpi_lid_send_state(struct acpi_device *device) 263static int acpi_lid_send_state(struct acpi_device *device)
235{ 264{
236 struct acpi_button *button = acpi_driver_data(device); 265 struct acpi_button *button = acpi_driver_data(device);
237 unsigned long long state; 266 unsigned long long state;
238 acpi_status status; 267 acpi_status status;
268 int ret;
239 269
240 status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state); 270 status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state);
241 if (ACPI_FAILURE(status)) 271 if (ACPI_FAILURE(status))
@@ -244,7 +274,12 @@ static int acpi_lid_send_state(struct acpi_device *device)
244 /* input layer checks if event is redundant */ 274 /* input layer checks if event is redundant */
245 input_report_switch(button->input, SW_LID, !state); 275 input_report_switch(button->input, SW_LID, !state);
246 input_sync(button->input); 276 input_sync(button->input);
247 return 0; 277
278 ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
279 if (ret == NOTIFY_DONE)
280 ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
281 device);
282 return ret;
248} 283}
249 284
250static void acpi_button_notify(struct acpi_device *device, u32 event) 285static void acpi_button_notify(struct acpi_device *device, u32 event)
@@ -366,8 +401,14 @@ static int acpi_button_add(struct acpi_device *device)
366 error = input_register_device(input); 401 error = input_register_device(input);
367 if (error) 402 if (error)
368 goto err_remove_fs; 403 goto err_remove_fs;
369 if (button->type == ACPI_BUTTON_TYPE_LID) 404 if (button->type == ACPI_BUTTON_TYPE_LID) {
370 acpi_lid_send_state(device); 405 acpi_lid_send_state(device);
406 /*
407 * This assumes there's only one lid device, or if there are
408 * more we only care about the last one...
409 */
410 lid_device = device;
411 }
371 412
372 if (device->wakeup.flags.valid) { 413 if (device->wakeup.flags.valid) {
373 /* Button's GPE is run-wake GPE */ 414 /* Button's GPE is run-wake GPE */
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 1540e693d91e..4068467ce7b9 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -46,6 +46,8 @@
46#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 46#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
47#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 47#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
48#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 48#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
49#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
50#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
49#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 51#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
50#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 52#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
51#define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00 53#define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00
@@ -91,6 +93,7 @@
91 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ 93 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
92 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ 94 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
93 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ 95 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
96 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
94 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \ 97 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
95 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \ 98 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \
96 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB) 99 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB)
@@ -804,23 +807,39 @@ static void intel_i830_setup_flush(void)
804 if (!intel_private.i8xx_page) 807 if (!intel_private.i8xx_page)
805 return; 808 return;
806 809
807 /* make page uncached */
808 map_page_into_agp(intel_private.i8xx_page);
809
810 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); 810 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
811 if (!intel_private.i8xx_flush_page) 811 if (!intel_private.i8xx_flush_page)
812 intel_i830_fini_flush(); 812 intel_i830_fini_flush();
813} 813}
814 814
815static void
816do_wbinvd(void *null)
817{
818 wbinvd();
819}
820
821/* The chipset_flush interface needs to get data that has already been
822 * flushed out of the CPU all the way out to main memory, because the GPU
823 * doesn't snoop those buffers.
824 *
825 * The 8xx series doesn't have the same lovely interface for flushing the
826 * chipset write buffers that the later chips do. According to the 865
827 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
828 * that buffer out, we just fill 1KB and clflush it out, on the assumption
829 * that it'll push whatever was in there out. It appears to work.
830 */
815static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) 831static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
816{ 832{
817 unsigned int *pg = intel_private.i8xx_flush_page; 833 unsigned int *pg = intel_private.i8xx_flush_page;
818 int i;
819 834
820 for (i = 0; i < 256; i += 2) 835 memset(pg, 0, 1024);
821 *(pg + i) = i;
822 836
823 wmb(); 837 if (cpu_has_clflush) {
838 clflush_cache_range(pg, 1024);
839 } else {
840 if (on_each_cpu(do_wbinvd, NULL, 1) != 0)
841 printk(KERN_ERR "Timed out waiting for cache flush.\n");
842 }
824} 843}
825 844
826/* The intel i830 automatically initializes the agp aperture during POST. 845/* The intel i830 automatically initializes the agp aperture during POST.
@@ -1341,6 +1360,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1341 case PCI_DEVICE_ID_INTEL_Q45_HB: 1360 case PCI_DEVICE_ID_INTEL_Q45_HB:
1342 case PCI_DEVICE_ID_INTEL_G45_HB: 1361 case PCI_DEVICE_ID_INTEL_G45_HB:
1343 case PCI_DEVICE_ID_INTEL_G41_HB: 1362 case PCI_DEVICE_ID_INTEL_G41_HB:
1363 case PCI_DEVICE_ID_INTEL_B43_HB:
1344 case PCI_DEVICE_ID_INTEL_IGDNG_D_HB: 1364 case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
1345 case PCI_DEVICE_ID_INTEL_IGDNG_M_HB: 1365 case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
1346 case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB: 1366 case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB:
@@ -2335,6 +2355,8 @@ static const struct intel_driver_description {
2335 "Q45/Q43", NULL, &intel_i965_driver }, 2355 "Q45/Q43", NULL, &intel_i965_driver },
2336 { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0, 2356 { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0,
2337 "G45/G43", NULL, &intel_i965_driver }, 2357 "G45/G43", NULL, &intel_i965_driver },
2358 { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 0,
2359 "B43", NULL, &intel_i965_driver },
2338 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, 2360 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
2339 "G41", NULL, &intel_i965_driver }, 2361 "G41", NULL, &intel_i965_driver },
2340 { PCI_DEVICE_ID_INTEL_IGDNG_D_HB, PCI_DEVICE_ID_INTEL_IGDNG_D_IG, 0, 2362 { PCI_DEVICE_ID_INTEL_IGDNG_D_HB, PCI_DEVICE_ID_INTEL_IGDNG_D_IG, 0,
@@ -2535,6 +2557,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
2535 ID(PCI_DEVICE_ID_INTEL_Q45_HB), 2557 ID(PCI_DEVICE_ID_INTEL_Q45_HB),
2536 ID(PCI_DEVICE_ID_INTEL_G45_HB), 2558 ID(PCI_DEVICE_ID_INTEL_G45_HB),
2537 ID(PCI_DEVICE_ID_INTEL_G41_HB), 2559 ID(PCI_DEVICE_ID_INTEL_G41_HB),
2560 ID(PCI_DEVICE_ID_INTEL_B43_HB),
2538 ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB), 2561 ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
2539 ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB), 2562 ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
2540 ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB), 2563 ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index e4d971c8b9d0..f831ea159291 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -102,6 +102,7 @@ config DRM_I915
102 select BACKLIGHT_CLASS_DEVICE if ACPI 102 select BACKLIGHT_CLASS_DEVICE if ACPI
103 select INPUT if ACPI 103 select INPUT if ACPI
104 select ACPI_VIDEO if ACPI 104 select ACPI_VIDEO if ACPI
105 select ACPI_BUTTON if ACPI
105 help 106 help
106 Choose this option if you have a system that has Intel 830M, 845G, 107 Choose this option if you have a system that has Intel 830M, 845G,
107 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the 108 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 230c9ffdd5e9..80391995bdec 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -142,6 +142,19 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
142 if (IS_ERR(obj->filp)) 142 if (IS_ERR(obj->filp))
143 goto free; 143 goto free;
144 144
145 /* Basically we want to disable the OOM killer and handle ENOMEM
146 * ourselves by sacrificing pages from cached buffers.
147 * XXX shmem_file_[gs]et_gfp_mask()
148 */
149 mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
150 GFP_HIGHUSER |
151 __GFP_COLD |
152 __GFP_FS |
153 __GFP_RECLAIMABLE |
154 __GFP_NORETRY |
155 __GFP_NOWARN |
156 __GFP_NOMEMALLOC);
157
145 kref_init(&obj->refcount); 158 kref_init(&obj->refcount);
146 kref_init(&obj->handlecount); 159 kref_init(&obj->handlecount);
147 obj->size = size; 160 obj->size = size;
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 5269dfa5f620..fa7b9be096bc 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -9,6 +9,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
9 i915_gem.o \ 9 i915_gem.o \
10 i915_gem_debug.o \ 10 i915_gem_debug.o \
11 i915_gem_tiling.o \ 11 i915_gem_tiling.o \
12 i915_trace_points.o \
12 intel_display.o \ 13 intel_display.o \
13 intel_crt.o \ 14 intel_crt.o \
14 intel_lvds.o \ 15 intel_lvds.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1e3bdcee863c..f8ce9a3a420d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -96,11 +96,13 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
96 { 96 {
97 struct drm_gem_object *obj = obj_priv->obj; 97 struct drm_gem_object *obj = obj_priv->obj;
98 98
99 seq_printf(m, " %p: %s %08x %08x %d", 99 seq_printf(m, " %p: %s %8zd %08x %08x %d %s",
100 obj, 100 obj,
101 get_pin_flag(obj_priv), 101 get_pin_flag(obj_priv),
102 obj->size,
102 obj->read_domains, obj->write_domain, 103 obj->read_domains, obj->write_domain,
103 obj_priv->last_rendering_seqno); 104 obj_priv->last_rendering_seqno,
105 obj_priv->dirty ? "dirty" : "");
104 106
105 if (obj->name) 107 if (obj->name)
106 seq_printf(m, " (name: %d)", obj->name); 108 seq_printf(m, " (name: %d)", obj->name);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 5a49a1867b35..45d507ebd3ff 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -33,6 +33,7 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include "i915_drm.h" 34#include "i915_drm.h"
35#include "i915_drv.h" 35#include "i915_drv.h"
36#include "i915_trace.h"
36#include <linux/vgaarb.h> 37#include <linux/vgaarb.h>
37 38
38/* Really want an OS-independent resettable timer. Would like to have 39/* Really want an OS-independent resettable timer. Would like to have
@@ -50,14 +51,18 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
50 u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 51 u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
51 int i; 52 int i;
52 53
54 trace_i915_ring_wait_begin (dev);
55
53 for (i = 0; i < 100000; i++) { 56 for (i = 0; i < 100000; i++) {
54 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 57 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
55 acthd = I915_READ(acthd_reg); 58 acthd = I915_READ(acthd_reg);
56 ring->space = ring->head - (ring->tail + 8); 59 ring->space = ring->head - (ring->tail + 8);
57 if (ring->space < 0) 60 if (ring->space < 0)
58 ring->space += ring->Size; 61 ring->space += ring->Size;
59 if (ring->space >= n) 62 if (ring->space >= n) {
63 trace_i915_ring_wait_end (dev);
60 return 0; 64 return 0;
65 }
61 66
62 if (dev->primary->master) { 67 if (dev->primary->master) {
63 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 68 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -77,6 +82,7 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
77 82
78 } 83 }
79 84
85 trace_i915_ring_wait_end (dev);
80 return -EBUSY; 86 return -EBUSY;
81} 87}
82 88
@@ -922,7 +928,8 @@ static int i915_get_bridge_dev(struct drm_device *dev)
922 * how much was set aside so we can use it for our own purposes. 928 * how much was set aside so we can use it for our own purposes.
923 */ 929 */
924static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, 930static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
925 uint32_t *preallocated_size) 931 uint32_t *preallocated_size,
932 uint32_t *start)
926{ 933{
927 struct drm_i915_private *dev_priv = dev->dev_private; 934 struct drm_i915_private *dev_priv = dev->dev_private;
928 u16 tmp = 0; 935 u16 tmp = 0;
@@ -1009,10 +1016,159 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
1009 return -1; 1016 return -1;
1010 } 1017 }
1011 *preallocated_size = stolen - overhead; 1018 *preallocated_size = stolen - overhead;
1019 *start = overhead;
1012 1020
1013 return 0; 1021 return 0;
1014} 1022}
1015 1023
1024#define PTE_ADDRESS_MASK 0xfffff000
1025#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
1026#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
1027#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
1028#define PTE_MAPPING_TYPE_CACHED (3 << 1)
1029#define PTE_MAPPING_TYPE_MASK (3 << 1)
1030#define PTE_VALID (1 << 0)
1031
1032/**
1033 * i915_gtt_to_phys - take a GTT address and turn it into a physical one
1034 * @dev: drm device
1035 * @gtt_addr: address to translate
1036 *
1037 * Some chip functions require allocations from stolen space but need the
1038 * physical address of the memory in question. We use this routine
1039 * to get a physical address suitable for register programming from a given
1040 * GTT address.
1041 */
1042static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1043 unsigned long gtt_addr)
1044{
1045 unsigned long *gtt;
1046 unsigned long entry, phys;
1047 int gtt_bar = IS_I9XX(dev) ? 0 : 1;
1048 int gtt_offset, gtt_size;
1049
1050 if (IS_I965G(dev)) {
1051 if (IS_G4X(dev) || IS_IGDNG(dev)) {
1052 gtt_offset = 2*1024*1024;
1053 gtt_size = 2*1024*1024;
1054 } else {
1055 gtt_offset = 512*1024;
1056 gtt_size = 512*1024;
1057 }
1058 } else {
1059 gtt_bar = 3;
1060 gtt_offset = 0;
1061 gtt_size = pci_resource_len(dev->pdev, gtt_bar);
1062 }
1063
1064 gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset,
1065 gtt_size);
1066 if (!gtt) {
1067 DRM_ERROR("ioremap of GTT failed\n");
1068 return 0;
1069 }
1070
1071 entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
1072
1073 DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
1074
1075 /* Mask out these reserved bits on this hardware. */
1076 if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
1077 IS_I945G(dev) || IS_I945GM(dev)) {
1078 entry &= ~PTE_ADDRESS_MASK_HIGH;
1079 }
1080
1081 /* If it's not a mapping type we know, then bail. */
1082 if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
1083 (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) {
1084 iounmap(gtt);
1085 return 0;
1086 }
1087
1088 if (!(entry & PTE_VALID)) {
1089 DRM_ERROR("bad GTT entry in stolen space\n");
1090 iounmap(gtt);
1091 return 0;
1092 }
1093
1094 iounmap(gtt);
1095
1096 phys =(entry & PTE_ADDRESS_MASK) |
1097 ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
1098
1099 DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
1100
1101 return phys;
1102}
1103
1104static void i915_warn_stolen(struct drm_device *dev)
1105{
1106 DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
1107 DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
1108}
1109
1110static void i915_setup_compression(struct drm_device *dev, int size)
1111{
1112 struct drm_i915_private *dev_priv = dev->dev_private;
1113 struct drm_mm_node *compressed_fb, *compressed_llb;
1114 unsigned long cfb_base, ll_base;
1115
1116 /* Leave 1M for line length buffer & misc. */
1117 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
1118 if (!compressed_fb) {
1119 i915_warn_stolen(dev);
1120 return;
1121 }
1122
1123 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
1124 if (!compressed_fb) {
1125 i915_warn_stolen(dev);
1126 return;
1127 }
1128
1129 cfb_base = i915_gtt_to_phys(dev, compressed_fb->start);
1130 if (!cfb_base) {
1131 DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
1132 drm_mm_put_block(compressed_fb);
1133 }
1134
1135 if (!IS_GM45(dev)) {
1136 compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096,
1137 4096, 0);
1138 if (!compressed_llb) {
1139 i915_warn_stolen(dev);
1140 return;
1141 }
1142
1143 compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096);
1144 if (!compressed_llb) {
1145 i915_warn_stolen(dev);
1146 return;
1147 }
1148
1149 ll_base = i915_gtt_to_phys(dev, compressed_llb->start);
1150 if (!ll_base) {
1151 DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
1152 drm_mm_put_block(compressed_fb);
1153 drm_mm_put_block(compressed_llb);
1154 }
1155 }
1156
1157 dev_priv->cfb_size = size;
1158
1159 if (IS_GM45(dev)) {
1160 g4x_disable_fbc(dev);
1161 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
1162 } else {
1163 i8xx_disable_fbc(dev);
1164 I915_WRITE(FBC_CFB_BASE, cfb_base);
1165 I915_WRITE(FBC_LL_BASE, ll_base);
1166 }
1167
1168 DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
1169 ll_base, size >> 20);
1170}
1171
1016/* true = enable decode, false = disable decoder */ 1172/* true = enable decode, false = disable decoder */
1017static unsigned int i915_vga_set_decode(void *cookie, bool state) 1173static unsigned int i915_vga_set_decode(void *cookie, bool state)
1018{ 1174{
@@ -1027,6 +1183,7 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
1027} 1183}
1028 1184
1029static int i915_load_modeset_init(struct drm_device *dev, 1185static int i915_load_modeset_init(struct drm_device *dev,
1186 unsigned long prealloc_start,
1030 unsigned long prealloc_size, 1187 unsigned long prealloc_size,
1031 unsigned long agp_size) 1188 unsigned long agp_size)
1032{ 1189{
@@ -1047,6 +1204,10 @@ static int i915_load_modeset_init(struct drm_device *dev,
1047 1204
1048 /* Basic memrange allocator for stolen space (aka vram) */ 1205 /* Basic memrange allocator for stolen space (aka vram) */
1049 drm_mm_init(&dev_priv->vram, 0, prealloc_size); 1206 drm_mm_init(&dev_priv->vram, 0, prealloc_size);
1207 DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
1208
1209 /* We're off and running w/KMS */
1210 dev_priv->mm.suspended = 0;
1050 1211
1051 /* Let GEM Manage from end of prealloc space to end of aperture. 1212 /* Let GEM Manage from end of prealloc space to end of aperture.
1052 * 1213 *
@@ -1059,10 +1220,25 @@ static int i915_load_modeset_init(struct drm_device *dev,
1059 */ 1220 */
1060 i915_gem_do_init(dev, prealloc_size, agp_size - 4096); 1221 i915_gem_do_init(dev, prealloc_size, agp_size - 4096);
1061 1222
1223 mutex_lock(&dev->struct_mutex);
1062 ret = i915_gem_init_ringbuffer(dev); 1224 ret = i915_gem_init_ringbuffer(dev);
1225 mutex_unlock(&dev->struct_mutex);
1063 if (ret) 1226 if (ret)
1064 goto out; 1227 goto out;
1065 1228
1229 /* Try to set up FBC with a reasonable compressed buffer size */
1230 if (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev) || IS_GM45(dev)) &&
1231 i915_powersave) {
1232 int cfb_size;
1233
1234 /* Try to get an 8M buffer... */
1235 if (prealloc_size > (9*1024*1024))
1236 cfb_size = 8*1024*1024;
1237 else /* fall back to 7/8 of the stolen space */
1238 cfb_size = prealloc_size * 7 / 8;
1239 i915_setup_compression(dev, cfb_size);
1240 }
1241
1066 /* Allow hardware batchbuffers unless told otherwise. 1242 /* Allow hardware batchbuffers unless told otherwise.
1067 */ 1243 */
1068 dev_priv->allow_batchbuffer = 1; 1244 dev_priv->allow_batchbuffer = 1;
@@ -1180,7 +1356,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1180 struct drm_i915_private *dev_priv = dev->dev_private; 1356 struct drm_i915_private *dev_priv = dev->dev_private;
1181 resource_size_t base, size; 1357 resource_size_t base, size;
1182 int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; 1358 int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
1183 uint32_t agp_size, prealloc_size; 1359 uint32_t agp_size, prealloc_size, prealloc_start;
1184 1360
1185 /* i915 has 4 more counters */ 1361 /* i915 has 4 more counters */
1186 dev->counters += 4; 1362 dev->counters += 4;
@@ -1234,7 +1410,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1234 "performance may suffer.\n"); 1410 "performance may suffer.\n");
1235 } 1411 }
1236 1412
1237 ret = i915_probe_agp(dev, &agp_size, &prealloc_size); 1413 ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start);
1238 if (ret) 1414 if (ret)
1239 goto out_iomapfree; 1415 goto out_iomapfree;
1240 1416
@@ -1300,8 +1476,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1300 return ret; 1476 return ret;
1301 } 1477 }
1302 1478
1479 /* Start out suspended */
1480 dev_priv->mm.suspended = 1;
1481
1303 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1482 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1304 ret = i915_load_modeset_init(dev, prealloc_size, agp_size); 1483 ret = i915_load_modeset_init(dev, prealloc_start,
1484 prealloc_size, agp_size);
1305 if (ret < 0) { 1485 if (ret < 0) {
1306 DRM_ERROR("failed to init modeset\n"); 1486 DRM_ERROR("failed to init modeset\n");
1307 goto out_workqueue_free; 1487 goto out_workqueue_free;
@@ -1313,6 +1493,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1313 if (!IS_IGDNG(dev)) 1493 if (!IS_IGDNG(dev))
1314 intel_opregion_init(dev, 0); 1494 intel_opregion_init(dev, 0);
1315 1495
1496 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
1497 (unsigned long) dev);
1316 return 0; 1498 return 0;
1317 1499
1318out_workqueue_free: 1500out_workqueue_free:
@@ -1333,6 +1515,7 @@ int i915_driver_unload(struct drm_device *dev)
1333 struct drm_i915_private *dev_priv = dev->dev_private; 1515 struct drm_i915_private *dev_priv = dev->dev_private;
1334 1516
1335 destroy_workqueue(dev_priv->wq); 1517 destroy_workqueue(dev_priv->wq);
1518 del_timer_sync(&dev_priv->hangcheck_timer);
1336 1519
1337 io_mapping_free(dev_priv->mm.gtt_mapping); 1520 io_mapping_free(dev_priv->mm.gtt_mapping);
1338 if (dev_priv->mm.gtt_mtrr >= 0) { 1521 if (dev_priv->mm.gtt_mtrr >= 0) {
@@ -1472,6 +1655,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
1472 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), 1655 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
1473 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), 1656 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
1474 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), 1657 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
1658 DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0),
1475}; 1659};
1476 1660
1477int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 1661int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index dbe568c9327b..b93814c0d3e2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -89,6 +89,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
89 pci_set_power_state(dev->pdev, PCI_D3hot); 89 pci_set_power_state(dev->pdev, PCI_D3hot);
90 } 90 }
91 91
92 dev_priv->suspended = 1;
93
92 return 0; 94 return 0;
93} 95}
94 96
@@ -97,8 +99,6 @@ static int i915_resume(struct drm_device *dev)
97 struct drm_i915_private *dev_priv = dev->dev_private; 99 struct drm_i915_private *dev_priv = dev->dev_private;
98 int ret = 0; 100 int ret = 0;
99 101
100 pci_set_power_state(dev->pdev, PCI_D0);
101 pci_restore_state(dev->pdev);
102 if (pci_enable_device(dev->pdev)) 102 if (pci_enable_device(dev->pdev))
103 return -1; 103 return -1;
104 pci_set_master(dev->pdev); 104 pci_set_master(dev->pdev);
@@ -124,9 +124,135 @@ static int i915_resume(struct drm_device *dev)
124 drm_helper_resume_force_mode(dev); 124 drm_helper_resume_force_mode(dev);
125 } 125 }
126 126
127 dev_priv->suspended = 0;
128
127 return ret; 129 return ret;
128} 130}
129 131
132/**
133 * i965_reset - reset chip after a hang
134 * @dev: drm device to reset
135 * @flags: reset domains
136 *
137 * Reset the chip. Useful if a hang is detected. Returns zero on successful
138 * reset or otherwise an error code.
139 *
140 * Procedure is fairly simple:
141 * - reset the chip using the reset reg
142 * - re-init context state
143 * - re-init hardware status page
144 * - re-init ring buffer
145 * - re-init interrupt state
146 * - re-init display
147 */
148int i965_reset(struct drm_device *dev, u8 flags)
149{
150 drm_i915_private_t *dev_priv = dev->dev_private;
151 unsigned long timeout;
152 u8 gdrst;
153 /*
154 * We really should only reset the display subsystem if we actually
155 * need to
156 */
157 bool need_display = true;
158
159 mutex_lock(&dev->struct_mutex);
160
161 /*
162 * Clear request list
163 */
164 i915_gem_retire_requests(dev);
165
166 if (need_display)
167 i915_save_display(dev);
168
169 if (IS_I965G(dev) || IS_G4X(dev)) {
170 /*
171 * Set the domains we want to reset, then the reset bit (bit 0).
172 * Clear the reset bit after a while and wait for hardware status
173 * bit (bit 1) to be set
174 */
175 pci_read_config_byte(dev->pdev, GDRST, &gdrst);
176 pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0));
177 udelay(50);
178 pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe);
179
180 /* ...we don't want to loop forever though, 500ms should be plenty */
181 timeout = jiffies + msecs_to_jiffies(500);
182 do {
183 udelay(100);
184 pci_read_config_byte(dev->pdev, GDRST, &gdrst);
185 } while ((gdrst & 0x1) && time_after(timeout, jiffies));
186
187 if (gdrst & 0x1) {
188 WARN(true, "i915: Failed to reset chip\n");
189 mutex_unlock(&dev->struct_mutex);
190 return -EIO;
191 }
192 } else {
193 DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
194 return -ENODEV;
195 }
196
197 /* Ok, now get things going again... */
198
199 /*
200 * Everything depends on having the GTT running, so we need to start
201 * there. Fortunately we don't need to do this unless we reset the
202 * chip at a PCI level.
203 *
204 * Next we need to restore the context, but we don't use those
205 * yet either...
206 *
207 * Ring buffer needs to be re-initialized in the KMS case, or if X
208 * was running at the time of the reset (i.e. we weren't VT
209 * switched away).
210 */
211 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
212 !dev_priv->mm.suspended) {
213 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
214 struct drm_gem_object *obj = ring->ring_obj;
215 struct drm_i915_gem_object *obj_priv = obj->driver_private;
216 dev_priv->mm.suspended = 0;
217
218 /* Stop the ring if it's running. */
219 I915_WRITE(PRB0_CTL, 0);
220 I915_WRITE(PRB0_TAIL, 0);
221 I915_WRITE(PRB0_HEAD, 0);
222
223 /* Initialize the ring. */
224 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
225 I915_WRITE(PRB0_CTL,
226 ((obj->size - 4096) & RING_NR_PAGES) |
227 RING_NO_REPORT |
228 RING_VALID);
229 if (!drm_core_check_feature(dev, DRIVER_MODESET))
230 i915_kernel_lost_context(dev);
231 else {
232 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
233 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
234 ring->space = ring->head - (ring->tail + 8);
235 if (ring->space < 0)
236 ring->space += ring->Size;
237 }
238
239 mutex_unlock(&dev->struct_mutex);
240 drm_irq_uninstall(dev);
241 drm_irq_install(dev);
242 mutex_lock(&dev->struct_mutex);
243 }
244
245 /*
246 * Display needs restore too...
247 */
248 if (need_display)
249 i915_restore_display(dev);
250
251 mutex_unlock(&dev->struct_mutex);
252 return 0;
253}
254
255
130static int __devinit 256static int __devinit
131i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 257i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
132{ 258{
@@ -234,6 +360,8 @@ static int __init i915_init(void)
234{ 360{
235 driver.num_ioctls = i915_max_ioctl; 361 driver.num_ioctls = i915_max_ioctl;
236 362
363 i915_gem_shrinker_init();
364
237 /* 365 /*
238 * If CONFIG_DRM_I915_KMS is set, default to KMS unless 366 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
239 * explicitly disabled with the module pararmeter. 367 * explicitly disabled with the module pararmeter.
@@ -260,6 +388,7 @@ static int __init i915_init(void)
260 388
261static void __exit i915_exit(void) 389static void __exit i915_exit(void)
262{ 390{
391 i915_gem_shrinker_exit();
263 drm_exit(&driver); 392 drm_exit(&driver);
264} 393}
265 394
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a0632f8e76ac..b24b2d145b75 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -48,6 +48,11 @@ enum pipe {
48 PIPE_B, 48 PIPE_B,
49}; 49};
50 50
51enum plane {
52 PLANE_A = 0,
53 PLANE_B,
54};
55
51#define I915_NUM_PIPE 2 56#define I915_NUM_PIPE 2
52 57
53/* Interface history: 58/* Interface history:
@@ -148,6 +153,23 @@ struct drm_i915_error_state {
148 struct timeval time; 153 struct timeval time;
149}; 154};
150 155
156struct drm_i915_display_funcs {
157 void (*dpms)(struct drm_crtc *crtc, int mode);
158 bool (*fbc_enabled)(struct drm_crtc *crtc);
159 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
160 void (*disable_fbc)(struct drm_device *dev);
161 int (*get_display_clock_speed)(struct drm_device *dev);
162 int (*get_fifo_size)(struct drm_device *dev, int plane);
163 void (*update_wm)(struct drm_device *dev, int planea_clock,
164 int planeb_clock, int sr_hdisplay, int pixel_size);
165 /* clock updates for mode set */
166 /* cursor updates */
167 /* render clock increase/decrease */
168 /* display clock increase/decrease */
169 /* pll clock increase/decrease */
170 /* clock gating init */
171};
172
151typedef struct drm_i915_private { 173typedef struct drm_i915_private {
152 struct drm_device *dev; 174 struct drm_device *dev;
153 175
@@ -198,10 +220,21 @@ typedef struct drm_i915_private {
198 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 220 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
199 int vblank_pipe; 221 int vblank_pipe;
200 222
223 /* For hangcheck timer */
224#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
225 struct timer_list hangcheck_timer;
226 int hangcheck_count;
227 uint32_t last_acthd;
228
201 bool cursor_needs_physical; 229 bool cursor_needs_physical;
202 230
203 struct drm_mm vram; 231 struct drm_mm vram;
204 232
233 unsigned long cfb_size;
234 unsigned long cfb_pitch;
235 int cfb_fence;
236 int cfb_plane;
237
205 int irq_enabled; 238 int irq_enabled;
206 239
207 struct intel_opregion opregion; 240 struct intel_opregion opregion;
@@ -222,6 +255,8 @@ typedef struct drm_i915_private {
222 unsigned int edp_support:1; 255 unsigned int edp_support:1;
223 int lvds_ssc_freq; 256 int lvds_ssc_freq;
224 257
258 struct notifier_block lid_notifier;
259
225 int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */ 260 int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */
226 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ 261 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
227 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 262 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
@@ -234,7 +269,11 @@ typedef struct drm_i915_private {
234 struct work_struct error_work; 269 struct work_struct error_work;
235 struct workqueue_struct *wq; 270 struct workqueue_struct *wq;
236 271
272 /* Display functions */
273 struct drm_i915_display_funcs display;
274
237 /* Register state */ 275 /* Register state */
276 bool suspended;
238 u8 saveLBB; 277 u8 saveLBB;
239 u32 saveDSPACNTR; 278 u32 saveDSPACNTR;
240 u32 saveDSPBCNTR; 279 u32 saveDSPBCNTR;
@@ -350,6 +389,15 @@ typedef struct drm_i915_private {
350 int gtt_mtrr; 389 int gtt_mtrr;
351 390
352 /** 391 /**
392 * Membership on list of all loaded devices, used to evict
393 * inactive buffers under memory pressure.
394 *
395 * Modifications should only be done whilst holding the
396 * shrink_list_lock spinlock.
397 */
398 struct list_head shrink_list;
399
400 /**
353 * List of objects currently involved in rendering from the 401 * List of objects currently involved in rendering from the
354 * ringbuffer. 402 * ringbuffer.
355 * 403 *
@@ -432,7 +480,7 @@ typedef struct drm_i915_private {
432 * It prevents command submission from occuring and makes 480 * It prevents command submission from occuring and makes
433 * every pending request fail 481 * every pending request fail
434 */ 482 */
435 int wedged; 483 atomic_t wedged;
436 484
437 /** Bit 6 swizzling required for X tiling */ 485 /** Bit 6 swizzling required for X tiling */
438 uint32_t bit_6_swizzle_x; 486 uint32_t bit_6_swizzle_x;
@@ -491,10 +539,7 @@ struct drm_i915_gem_object {
491 * This is the same as gtt_space->start 539 * This is the same as gtt_space->start
492 */ 540 */
493 uint32_t gtt_offset; 541 uint32_t gtt_offset;
494 /** 542
495 * Required alignment for the object
496 */
497 uint32_t gtt_alignment;
498 /** 543 /**
499 * Fake offset for use by mmap(2) 544 * Fake offset for use by mmap(2)
500 */ 545 */
@@ -541,6 +586,11 @@ struct drm_i915_gem_object {
541 * in an execbuffer object list. 586 * in an execbuffer object list.
542 */ 587 */
543 int in_execbuffer; 588 int in_execbuffer;
589
590 /**
591 * Advice: are the backing pages purgeable?
592 */
593 int madv;
544}; 594};
545 595
546/** 596/**
@@ -585,6 +635,8 @@ extern int i915_max_ioctl;
585extern unsigned int i915_fbpercrtc; 635extern unsigned int i915_fbpercrtc;
586extern unsigned int i915_powersave; 636extern unsigned int i915_powersave;
587 637
638extern void i915_save_display(struct drm_device *dev);
639extern void i915_restore_display(struct drm_device *dev);
588extern int i915_master_create(struct drm_device *dev, struct drm_master *master); 640extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
589extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); 641extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
590 642
@@ -604,8 +656,10 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
604extern int i915_emit_box(struct drm_device *dev, 656extern int i915_emit_box(struct drm_device *dev,
605 struct drm_clip_rect *boxes, 657 struct drm_clip_rect *boxes,
606 int i, int DR1, int DR4); 658 int i, int DR1, int DR4);
659extern int i965_reset(struct drm_device *dev, u8 flags);
607 660
608/* i915_irq.c */ 661/* i915_irq.c */
662void i915_hangcheck_elapsed(unsigned long data);
609extern int i915_irq_emit(struct drm_device *dev, void *data, 663extern int i915_irq_emit(struct drm_device *dev, void *data,
610 struct drm_file *file_priv); 664 struct drm_file *file_priv);
611extern int i915_irq_wait(struct drm_device *dev, void *data, 665extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -676,6 +730,8 @@ int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
676 struct drm_file *file_priv); 730 struct drm_file *file_priv);
677int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 731int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
678 struct drm_file *file_priv); 732 struct drm_file *file_priv);
733int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
734 struct drm_file *file_priv);
679int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 735int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
680 struct drm_file *file_priv); 736 struct drm_file *file_priv);
681int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 737int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
@@ -695,6 +751,7 @@ int i915_gem_object_unbind(struct drm_gem_object *obj);
695void i915_gem_release_mmap(struct drm_gem_object *obj); 751void i915_gem_release_mmap(struct drm_gem_object *obj);
696void i915_gem_lastclose(struct drm_device *dev); 752void i915_gem_lastclose(struct drm_device *dev);
697uint32_t i915_get_gem_seqno(struct drm_device *dev); 753uint32_t i915_get_gem_seqno(struct drm_device *dev);
754bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
698int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); 755int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
699int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); 756int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
700void i915_gem_retire_requests(struct drm_device *dev); 757void i915_gem_retire_requests(struct drm_device *dev);
@@ -720,6 +777,9 @@ int i915_gem_object_get_pages(struct drm_gem_object *obj);
720void i915_gem_object_put_pages(struct drm_gem_object *obj); 777void i915_gem_object_put_pages(struct drm_gem_object *obj);
721void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); 778void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
722 779
780void i915_gem_shrinker_init(void);
781void i915_gem_shrinker_exit(void);
782
723/* i915_gem_tiling.c */ 783/* i915_gem_tiling.c */
724void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 784void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
725void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); 785void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
@@ -767,6 +827,8 @@ static inline void opregion_enable_asle(struct drm_device *dev) { return; }
767extern void intel_modeset_init(struct drm_device *dev); 827extern void intel_modeset_init(struct drm_device *dev);
768extern void intel_modeset_cleanup(struct drm_device *dev); 828extern void intel_modeset_cleanup(struct drm_device *dev);
769extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 829extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
830extern void i8xx_disable_fbc(struct drm_device *dev);
831extern void g4x_disable_fbc(struct drm_device *dev);
770 832
771/** 833/**
772 * Lock test for when it's just for synchronization of ring access. 834 * Lock test for when it's just for synchronization of ring access.
@@ -864,6 +926,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
864 (dev)->pci_device == 0x2E12 || \ 926 (dev)->pci_device == 0x2E12 || \
865 (dev)->pci_device == 0x2E22 || \ 927 (dev)->pci_device == 0x2E22 || \
866 (dev)->pci_device == 0x2E32 || \ 928 (dev)->pci_device == 0x2E32 || \
929 (dev)->pci_device == 0x2E42 || \
867 (dev)->pci_device == 0x0042 || \ 930 (dev)->pci_device == 0x0042 || \
868 (dev)->pci_device == 0x0046) 931 (dev)->pci_device == 0x0046)
869 932
@@ -876,6 +939,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
876 (dev)->pci_device == 0x2E12 || \ 939 (dev)->pci_device == 0x2E12 || \
877 (dev)->pci_device == 0x2E22 || \ 940 (dev)->pci_device == 0x2E22 || \
878 (dev)->pci_device == 0x2E32 || \ 941 (dev)->pci_device == 0x2E32 || \
942 (dev)->pci_device == 0x2E42 || \
879 IS_GM45(dev)) 943 IS_GM45(dev))
880 944
881#define IS_IGDG(dev) ((dev)->pci_device == 0xa001) 945#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
@@ -909,12 +973,13 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
909#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 973#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
910#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 974#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev))
911#define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev)) 975#define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev))
912#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) 976#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
913/* dsparb controlled by hw only */ 977/* dsparb controlled by hw only */
914#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 978#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev))
915 979
916#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev)) 980#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev))
917#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 981#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev))
982#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev)))
918 983
919#define PRIMARY_RINGBUFFER_SIZE (128*1024) 984#define PRIMARY_RINGBUFFER_SIZE (128*1024)
920 985
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c67317112f4a..40727d4c2919 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,6 +29,7 @@
29#include "drm.h" 29#include "drm.h"
30#include "i915_drm.h" 30#include "i915_drm.h"
31#include "i915_drv.h" 31#include "i915_drv.h"
32#include "i915_trace.h"
32#include "intel_drv.h" 33#include "intel_drv.h"
33#include <linux/swap.h> 34#include <linux/swap.h>
34#include <linux/pci.h> 35#include <linux/pci.h>
@@ -48,11 +49,15 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
48static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
49 unsigned alignment); 50 unsigned alignment);
50static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); 51static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
51static int i915_gem_evict_something(struct drm_device *dev); 52static int i915_gem_evict_something(struct drm_device *dev, int min_size);
53static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
52static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 54static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
53 struct drm_i915_gem_pwrite *args, 55 struct drm_i915_gem_pwrite *args,
54 struct drm_file *file_priv); 56 struct drm_file *file_priv);
55 57
58static LIST_HEAD(shrink_list);
59static DEFINE_SPINLOCK(shrink_list_lock);
60
56int i915_gem_do_init(struct drm_device *dev, unsigned long start, 61int i915_gem_do_init(struct drm_device *dev, unsigned long start,
57 unsigned long end) 62 unsigned long end)
58{ 63{
@@ -316,6 +321,45 @@ fail_unlock:
316 return ret; 321 return ret;
317} 322}
318 323
324static inline gfp_t
325i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
326{
327 return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
328}
329
330static inline void
331i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
332{
333 mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
334}
335
336static int
337i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
338{
339 int ret;
340
341 ret = i915_gem_object_get_pages(obj);
342
343 /* If we've insufficient memory to map in the pages, attempt
344 * to make some space by throwing out some old buffers.
345 */
346 if (ret == -ENOMEM) {
347 struct drm_device *dev = obj->dev;
348 gfp_t gfp;
349
350 ret = i915_gem_evict_something(dev, obj->size);
351 if (ret)
352 return ret;
353
354 gfp = i915_gem_object_get_page_gfp_mask(obj);
355 i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
356 ret = i915_gem_object_get_pages(obj);
357 i915_gem_object_set_page_gfp_mask (obj, gfp);
358 }
359
360 return ret;
361}
362
319/** 363/**
320 * This is the fallback shmem pread path, which allocates temporary storage 364 * This is the fallback shmem pread path, which allocates temporary storage
321 * in kernel space to copy_to_user into outside of the struct_mutex, so we 365 * in kernel space to copy_to_user into outside of the struct_mutex, so we
@@ -367,8 +411,8 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
367 411
368 mutex_lock(&dev->struct_mutex); 412 mutex_lock(&dev->struct_mutex);
369 413
370 ret = i915_gem_object_get_pages(obj); 414 ret = i915_gem_object_get_pages_or_evict(obj);
371 if (ret != 0) 415 if (ret)
372 goto fail_unlock; 416 goto fail_unlock;
373 417
374 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, 418 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
@@ -842,8 +886,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
842 886
843 mutex_lock(&dev->struct_mutex); 887 mutex_lock(&dev->struct_mutex);
844 888
845 ret = i915_gem_object_get_pages(obj); 889 ret = i915_gem_object_get_pages_or_evict(obj);
846 if (ret != 0) 890 if (ret)
847 goto fail_unlock; 891 goto fail_unlock;
848 892
849 ret = i915_gem_object_set_to_cpu_domain(obj, 1); 893 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
@@ -1155,28 +1199,22 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1155 /* Now bind it into the GTT if needed */ 1199 /* Now bind it into the GTT if needed */
1156 mutex_lock(&dev->struct_mutex); 1200 mutex_lock(&dev->struct_mutex);
1157 if (!obj_priv->gtt_space) { 1201 if (!obj_priv->gtt_space) {
1158 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); 1202 ret = i915_gem_object_bind_to_gtt(obj, 0);
1159 if (ret) { 1203 if (ret)
1160 mutex_unlock(&dev->struct_mutex); 1204 goto unlock;
1161 return VM_FAULT_SIGBUS;
1162 }
1163
1164 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1165 if (ret) {
1166 mutex_unlock(&dev->struct_mutex);
1167 return VM_FAULT_SIGBUS;
1168 }
1169 1205
1170 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1206 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1207
1208 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1209 if (ret)
1210 goto unlock;
1171 } 1211 }
1172 1212
1173 /* Need a new fence register? */ 1213 /* Need a new fence register? */
1174 if (obj_priv->tiling_mode != I915_TILING_NONE) { 1214 if (obj_priv->tiling_mode != I915_TILING_NONE) {
1175 ret = i915_gem_object_get_fence_reg(obj); 1215 ret = i915_gem_object_get_fence_reg(obj);
1176 if (ret) { 1216 if (ret)
1177 mutex_unlock(&dev->struct_mutex); 1217 goto unlock;
1178 return VM_FAULT_SIGBUS;
1179 }
1180 } 1218 }
1181 1219
1182 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + 1220 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
@@ -1184,18 +1222,18 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1184 1222
1185 /* Finally, remap it using the new GTT offset */ 1223 /* Finally, remap it using the new GTT offset */
1186 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); 1224 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1187 1225unlock:
1188 mutex_unlock(&dev->struct_mutex); 1226 mutex_unlock(&dev->struct_mutex);
1189 1227
1190 switch (ret) { 1228 switch (ret) {
1229 case 0:
1230 case -ERESTARTSYS:
1231 return VM_FAULT_NOPAGE;
1191 case -ENOMEM: 1232 case -ENOMEM:
1192 case -EAGAIN: 1233 case -EAGAIN:
1193 return VM_FAULT_OOM; 1234 return VM_FAULT_OOM;
1194 case -EFAULT:
1195 case -EINVAL:
1196 return VM_FAULT_SIGBUS;
1197 default: 1235 default:
1198 return VM_FAULT_NOPAGE; 1236 return VM_FAULT_SIGBUS;
1199 } 1237 }
1200} 1238}
1201 1239
@@ -1388,6 +1426,14 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1388 1426
1389 obj_priv = obj->driver_private; 1427 obj_priv = obj->driver_private;
1390 1428
1429 if (obj_priv->madv != I915_MADV_WILLNEED) {
1430 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1431 drm_gem_object_unreference(obj);
1432 mutex_unlock(&dev->struct_mutex);
1433 return -EINVAL;
1434 }
1435
1436
1391 if (!obj_priv->mmap_offset) { 1437 if (!obj_priv->mmap_offset) {
1392 ret = i915_gem_create_mmap_offset(obj); 1438 ret = i915_gem_create_mmap_offset(obj);
1393 if (ret) { 1439 if (ret) {
@@ -1399,22 +1445,12 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1399 1445
1400 args->offset = obj_priv->mmap_offset; 1446 args->offset = obj_priv->mmap_offset;
1401 1447
1402 obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
1403
1404 /* Make sure the alignment is correct for fence regs etc */
1405 if (obj_priv->agp_mem &&
1406 (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
1407 drm_gem_object_unreference(obj);
1408 mutex_unlock(&dev->struct_mutex);
1409 return -EINVAL;
1410 }
1411
1412 /* 1448 /*
1413 * Pull it into the GTT so that we have a page list (makes the 1449 * Pull it into the GTT so that we have a page list (makes the
1414 * initial fault faster and any subsequent flushing possible). 1450 * initial fault faster and any subsequent flushing possible).
1415 */ 1451 */
1416 if (!obj_priv->agp_mem) { 1452 if (!obj_priv->agp_mem) {
1417 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); 1453 ret = i915_gem_object_bind_to_gtt(obj, 0);
1418 if (ret) { 1454 if (ret) {
1419 drm_gem_object_unreference(obj); 1455 drm_gem_object_unreference(obj);
1420 mutex_unlock(&dev->struct_mutex); 1456 mutex_unlock(&dev->struct_mutex);
@@ -1437,6 +1473,7 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1437 int i; 1473 int i;
1438 1474
1439 BUG_ON(obj_priv->pages_refcount == 0); 1475 BUG_ON(obj_priv->pages_refcount == 0);
1476 BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
1440 1477
1441 if (--obj_priv->pages_refcount != 0) 1478 if (--obj_priv->pages_refcount != 0)
1442 return; 1479 return;
@@ -1444,13 +1481,21 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1444 if (obj_priv->tiling_mode != I915_TILING_NONE) 1481 if (obj_priv->tiling_mode != I915_TILING_NONE)
1445 i915_gem_object_save_bit_17_swizzle(obj); 1482 i915_gem_object_save_bit_17_swizzle(obj);
1446 1483
1447 for (i = 0; i < page_count; i++) 1484 if (obj_priv->madv == I915_MADV_DONTNEED)
1448 if (obj_priv->pages[i] != NULL) { 1485 obj_priv->dirty = 0;
1449 if (obj_priv->dirty) 1486
1450 set_page_dirty(obj_priv->pages[i]); 1487 for (i = 0; i < page_count; i++) {
1488 if (obj_priv->pages[i] == NULL)
1489 break;
1490
1491 if (obj_priv->dirty)
1492 set_page_dirty(obj_priv->pages[i]);
1493
1494 if (obj_priv->madv == I915_MADV_WILLNEED)
1451 mark_page_accessed(obj_priv->pages[i]); 1495 mark_page_accessed(obj_priv->pages[i]);
1452 page_cache_release(obj_priv->pages[i]); 1496
1453 } 1497 page_cache_release(obj_priv->pages[i]);
1498 }
1454 obj_priv->dirty = 0; 1499 obj_priv->dirty = 0;
1455 1500
1456 drm_free_large(obj_priv->pages); 1501 drm_free_large(obj_priv->pages);
@@ -1489,6 +1534,26 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1489 obj_priv->last_rendering_seqno = 0; 1534 obj_priv->last_rendering_seqno = 0;
1490} 1535}
1491 1536
1537/* Immediately discard the backing storage */
1538static void
1539i915_gem_object_truncate(struct drm_gem_object *obj)
1540{
1541 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1542 struct inode *inode;
1543
1544 inode = obj->filp->f_path.dentry->d_inode;
1545 if (inode->i_op->truncate)
1546 inode->i_op->truncate (inode);
1547
1548 obj_priv->madv = __I915_MADV_PURGED;
1549}
1550
1551static inline int
1552i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
1553{
1554 return obj_priv->madv == I915_MADV_DONTNEED;
1555}
1556
1492static void 1557static void
1493i915_gem_object_move_to_inactive(struct drm_gem_object *obj) 1558i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1494{ 1559{
@@ -1577,15 +1642,24 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1577 1642
1578 if ((obj->write_domain & flush_domains) == 1643 if ((obj->write_domain & flush_domains) ==
1579 obj->write_domain) { 1644 obj->write_domain) {
1645 uint32_t old_write_domain = obj->write_domain;
1646
1580 obj->write_domain = 0; 1647 obj->write_domain = 0;
1581 i915_gem_object_move_to_active(obj, seqno); 1648 i915_gem_object_move_to_active(obj, seqno);
1649
1650 trace_i915_gem_object_change_domain(obj,
1651 obj->read_domains,
1652 old_write_domain);
1582 } 1653 }
1583 } 1654 }
1584 1655
1585 } 1656 }
1586 1657
1587 if (was_empty && !dev_priv->mm.suspended) 1658 if (!dev_priv->mm.suspended) {
1588 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 1659 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1660 if (was_empty)
1661 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1662 }
1589 return seqno; 1663 return seqno;
1590} 1664}
1591 1665
@@ -1623,6 +1697,8 @@ i915_gem_retire_request(struct drm_device *dev,
1623{ 1697{
1624 drm_i915_private_t *dev_priv = dev->dev_private; 1698 drm_i915_private_t *dev_priv = dev->dev_private;
1625 1699
1700 trace_i915_gem_request_retire(dev, request->seqno);
1701
1626 /* Move any buffers on the active list that are no longer referenced 1702 /* Move any buffers on the active list that are no longer referenced
1627 * by the ringbuffer to the flushing/inactive lists as appropriate. 1703 * by the ringbuffer to the flushing/inactive lists as appropriate.
1628 */ 1704 */
@@ -1671,7 +1747,7 @@ out:
1671/** 1747/**
1672 * Returns true if seq1 is later than seq2. 1748 * Returns true if seq1 is later than seq2.
1673 */ 1749 */
1674static int 1750bool
1675i915_seqno_passed(uint32_t seq1, uint32_t seq2) 1751i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1676{ 1752{
1677 return (int32_t)(seq1 - seq2) >= 0; 1753 return (int32_t)(seq1 - seq2) >= 0;
@@ -1709,7 +1785,7 @@ i915_gem_retire_requests(struct drm_device *dev)
1709 retiring_seqno = request->seqno; 1785 retiring_seqno = request->seqno;
1710 1786
1711 if (i915_seqno_passed(seqno, retiring_seqno) || 1787 if (i915_seqno_passed(seqno, retiring_seqno) ||
1712 dev_priv->mm.wedged) { 1788 atomic_read(&dev_priv->mm.wedged)) {
1713 i915_gem_retire_request(dev, request); 1789 i915_gem_retire_request(dev, request);
1714 1790
1715 list_del(&request->list); 1791 list_del(&request->list);
@@ -1751,6 +1827,9 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1751 1827
1752 BUG_ON(seqno == 0); 1828 BUG_ON(seqno == 0);
1753 1829
1830 if (atomic_read(&dev_priv->mm.wedged))
1831 return -EIO;
1832
1754 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 1833 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1755 if (IS_IGDNG(dev)) 1834 if (IS_IGDNG(dev))
1756 ier = I915_READ(DEIER) | I915_READ(GTIER); 1835 ier = I915_READ(DEIER) | I915_READ(GTIER);
@@ -1763,16 +1842,20 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1763 i915_driver_irq_postinstall(dev); 1842 i915_driver_irq_postinstall(dev);
1764 } 1843 }
1765 1844
1845 trace_i915_gem_request_wait_begin(dev, seqno);
1846
1766 dev_priv->mm.waiting_gem_seqno = seqno; 1847 dev_priv->mm.waiting_gem_seqno = seqno;
1767 i915_user_irq_get(dev); 1848 i915_user_irq_get(dev);
1768 ret = wait_event_interruptible(dev_priv->irq_queue, 1849 ret = wait_event_interruptible(dev_priv->irq_queue,
1769 i915_seqno_passed(i915_get_gem_seqno(dev), 1850 i915_seqno_passed(i915_get_gem_seqno(dev),
1770 seqno) || 1851 seqno) ||
1771 dev_priv->mm.wedged); 1852 atomic_read(&dev_priv->mm.wedged));
1772 i915_user_irq_put(dev); 1853 i915_user_irq_put(dev);
1773 dev_priv->mm.waiting_gem_seqno = 0; 1854 dev_priv->mm.waiting_gem_seqno = 0;
1855
1856 trace_i915_gem_request_wait_end(dev, seqno);
1774 } 1857 }
1775 if (dev_priv->mm.wedged) 1858 if (atomic_read(&dev_priv->mm.wedged))
1776 ret = -EIO; 1859 ret = -EIO;
1777 1860
1778 if (ret && ret != -ERESTARTSYS) 1861 if (ret && ret != -ERESTARTSYS)
@@ -1803,6 +1886,8 @@ i915_gem_flush(struct drm_device *dev,
1803 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, 1886 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1804 invalidate_domains, flush_domains); 1887 invalidate_domains, flush_domains);
1805#endif 1888#endif
1889 trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
1890 invalidate_domains, flush_domains);
1806 1891
1807 if (flush_domains & I915_GEM_DOMAIN_CPU) 1892 if (flush_domains & I915_GEM_DOMAIN_CPU)
1808 drm_agp_chipset_flush(dev); 1893 drm_agp_chipset_flush(dev);
@@ -1915,6 +2000,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1915 return -EINVAL; 2000 return -EINVAL;
1916 } 2001 }
1917 2002
2003 /* blow away mappings if mapped through GTT */
2004 i915_gem_release_mmap(obj);
2005
2006 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2007 i915_gem_clear_fence_reg(obj);
2008
1918 /* Move the object to the CPU domain to ensure that 2009 /* Move the object to the CPU domain to ensure that
1919 * any possible CPU writes while it's not in the GTT 2010 * any possible CPU writes while it's not in the GTT
1920 * are flushed when we go to remap it. This will 2011 * are flushed when we go to remap it. This will
@@ -1928,21 +2019,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1928 return ret; 2019 return ret;
1929 } 2020 }
1930 2021
2022 BUG_ON(obj_priv->active);
2023
1931 if (obj_priv->agp_mem != NULL) { 2024 if (obj_priv->agp_mem != NULL) {
1932 drm_unbind_agp(obj_priv->agp_mem); 2025 drm_unbind_agp(obj_priv->agp_mem);
1933 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); 2026 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1934 obj_priv->agp_mem = NULL; 2027 obj_priv->agp_mem = NULL;
1935 } 2028 }
1936 2029
1937 BUG_ON(obj_priv->active);
1938
1939 /* blow away mappings if mapped through GTT */
1940 i915_gem_release_mmap(obj);
1941
1942 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1943 i915_gem_clear_fence_reg(obj);
1944
1945 i915_gem_object_put_pages(obj); 2030 i915_gem_object_put_pages(obj);
2031 BUG_ON(obj_priv->pages_refcount);
1946 2032
1947 if (obj_priv->gtt_space) { 2033 if (obj_priv->gtt_space) {
1948 atomic_dec(&dev->gtt_count); 2034 atomic_dec(&dev->gtt_count);
@@ -1956,40 +2042,113 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1956 if (!list_empty(&obj_priv->list)) 2042 if (!list_empty(&obj_priv->list))
1957 list_del_init(&obj_priv->list); 2043 list_del_init(&obj_priv->list);
1958 2044
2045 if (i915_gem_object_is_purgeable(obj_priv))
2046 i915_gem_object_truncate(obj);
2047
2048 trace_i915_gem_object_unbind(obj);
2049
1959 return 0; 2050 return 0;
1960} 2051}
1961 2052
2053static struct drm_gem_object *
2054i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
2055{
2056 drm_i915_private_t *dev_priv = dev->dev_private;
2057 struct drm_i915_gem_object *obj_priv;
2058 struct drm_gem_object *best = NULL;
2059 struct drm_gem_object *first = NULL;
2060
2061 /* Try to find the smallest clean object */
2062 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
2063 struct drm_gem_object *obj = obj_priv->obj;
2064 if (obj->size >= min_size) {
2065 if ((!obj_priv->dirty ||
2066 i915_gem_object_is_purgeable(obj_priv)) &&
2067 (!best || obj->size < best->size)) {
2068 best = obj;
2069 if (best->size == min_size)
2070 return best;
2071 }
2072 if (!first)
2073 first = obj;
2074 }
2075 }
2076
2077 return best ? best : first;
2078}
2079
1962static int 2080static int
1963i915_gem_evict_something(struct drm_device *dev) 2081i915_gem_evict_everything(struct drm_device *dev)
2082{
2083 drm_i915_private_t *dev_priv = dev->dev_private;
2084 uint32_t seqno;
2085 int ret;
2086 bool lists_empty;
2087
2088 spin_lock(&dev_priv->mm.active_list_lock);
2089 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2090 list_empty(&dev_priv->mm.flushing_list) &&
2091 list_empty(&dev_priv->mm.active_list));
2092 spin_unlock(&dev_priv->mm.active_list_lock);
2093
2094 if (lists_empty)
2095 return -ENOSPC;
2096
2097 /* Flush everything (on to the inactive lists) and evict */
2098 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2099 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
2100 if (seqno == 0)
2101 return -ENOMEM;
2102
2103 ret = i915_wait_request(dev, seqno);
2104 if (ret)
2105 return ret;
2106
2107 ret = i915_gem_evict_from_inactive_list(dev);
2108 if (ret)
2109 return ret;
2110
2111 spin_lock(&dev_priv->mm.active_list_lock);
2112 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2113 list_empty(&dev_priv->mm.flushing_list) &&
2114 list_empty(&dev_priv->mm.active_list));
2115 spin_unlock(&dev_priv->mm.active_list_lock);
2116 BUG_ON(!lists_empty);
2117
2118 return 0;
2119}
2120
2121static int
2122i915_gem_evict_something(struct drm_device *dev, int min_size)
1964{ 2123{
1965 drm_i915_private_t *dev_priv = dev->dev_private; 2124 drm_i915_private_t *dev_priv = dev->dev_private;
1966 struct drm_gem_object *obj; 2125 struct drm_gem_object *obj;
1967 struct drm_i915_gem_object *obj_priv; 2126 int ret;
1968 int ret = 0;
1969 2127
1970 for (;;) { 2128 for (;;) {
2129 i915_gem_retire_requests(dev);
2130
1971 /* If there's an inactive buffer available now, grab it 2131 /* If there's an inactive buffer available now, grab it
1972 * and be done. 2132 * and be done.
1973 */ 2133 */
1974 if (!list_empty(&dev_priv->mm.inactive_list)) { 2134 obj = i915_gem_find_inactive_object(dev, min_size);
1975 obj_priv = list_first_entry(&dev_priv->mm.inactive_list, 2135 if (obj) {
1976 struct drm_i915_gem_object, 2136 struct drm_i915_gem_object *obj_priv;
1977 list); 2137
1978 obj = obj_priv->obj;
1979 BUG_ON(obj_priv->pin_count != 0);
1980#if WATCH_LRU 2138#if WATCH_LRU
1981 DRM_INFO("%s: evicting %p\n", __func__, obj); 2139 DRM_INFO("%s: evicting %p\n", __func__, obj);
1982#endif 2140#endif
2141 obj_priv = obj->driver_private;
2142 BUG_ON(obj_priv->pin_count != 0);
1983 BUG_ON(obj_priv->active); 2143 BUG_ON(obj_priv->active);
1984 2144
1985 /* Wait on the rendering and unbind the buffer. */ 2145 /* Wait on the rendering and unbind the buffer. */
1986 ret = i915_gem_object_unbind(obj); 2146 return i915_gem_object_unbind(obj);
1987 break;
1988 } 2147 }
1989 2148
1990 /* If we didn't get anything, but the ring is still processing 2149 /* If we didn't get anything, but the ring is still processing
1991 * things, wait for one of those things to finish and hopefully 2150 * things, wait for the next to finish and hopefully leave us
1992 * leave us a buffer to evict. 2151 * a buffer to evict.
1993 */ 2152 */
1994 if (!list_empty(&dev_priv->mm.request_list)) { 2153 if (!list_empty(&dev_priv->mm.request_list)) {
1995 struct drm_i915_gem_request *request; 2154 struct drm_i915_gem_request *request;
@@ -2000,16 +2159,9 @@ i915_gem_evict_something(struct drm_device *dev)
2000 2159
2001 ret = i915_wait_request(dev, request->seqno); 2160 ret = i915_wait_request(dev, request->seqno);
2002 if (ret) 2161 if (ret)
2003 break; 2162 return ret;
2004 2163
2005 /* if waiting caused an object to become inactive, 2164 continue;
2006 * then loop around and wait for it. Otherwise, we
2007 * assume that waiting freed and unbound something,
2008 * so there should now be some space in the GTT
2009 */
2010 if (!list_empty(&dev_priv->mm.inactive_list))
2011 continue;
2012 break;
2013 } 2165 }
2014 2166
2015 /* If we didn't have anything on the request list but there 2167 /* If we didn't have anything on the request list but there
@@ -2018,46 +2170,44 @@ i915_gem_evict_something(struct drm_device *dev)
2018 * will get moved to inactive. 2170 * will get moved to inactive.
2019 */ 2171 */
2020 if (!list_empty(&dev_priv->mm.flushing_list)) { 2172 if (!list_empty(&dev_priv->mm.flushing_list)) {
2021 obj_priv = list_first_entry(&dev_priv->mm.flushing_list, 2173 struct drm_i915_gem_object *obj_priv;
2022 struct drm_i915_gem_object,
2023 list);
2024 obj = obj_priv->obj;
2025 2174
2026 i915_gem_flush(dev, 2175 /* Find an object that we can immediately reuse */
2027 obj->write_domain, 2176 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
2028 obj->write_domain); 2177 obj = obj_priv->obj;
2029 i915_add_request(dev, NULL, obj->write_domain); 2178 if (obj->size >= min_size)
2179 break;
2030 2180
2031 obj = NULL; 2181 obj = NULL;
2032 continue; 2182 }
2033 }
2034 2183
2035 DRM_ERROR("inactive empty %d request empty %d " 2184 if (obj != NULL) {
2036 "flushing empty %d\n", 2185 uint32_t seqno;
2037 list_empty(&dev_priv->mm.inactive_list),
2038 list_empty(&dev_priv->mm.request_list),
2039 list_empty(&dev_priv->mm.flushing_list));
2040 /* If we didn't do any of the above, there's nothing to be done
2041 * and we just can't fit it in.
2042 */
2043 return -ENOSPC;
2044 }
2045 return ret;
2046}
2047 2186
2048static int 2187 i915_gem_flush(dev,
2049i915_gem_evict_everything(struct drm_device *dev) 2188 obj->write_domain,
2050{ 2189 obj->write_domain);
2051 int ret; 2190 seqno = i915_add_request(dev, NULL, obj->write_domain);
2191 if (seqno == 0)
2192 return -ENOMEM;
2052 2193
2053 for (;;) { 2194 ret = i915_wait_request(dev, seqno);
2054 ret = i915_gem_evict_something(dev); 2195 if (ret)
2055 if (ret != 0) 2196 return ret;
2056 break; 2197
2198 continue;
2199 }
2200 }
2201
2202 /* If we didn't do any of the above, there's no single buffer
2203 * large enough to swap out for the new one, so just evict
2204 * everything and start again. (This should be rare.)
2205 */
2206 if (!list_empty (&dev_priv->mm.inactive_list))
2207 return i915_gem_evict_from_inactive_list(dev);
2208 else
2209 return i915_gem_evict_everything(dev);
2057 } 2210 }
2058 if (ret == -ENOSPC)
2059 return 0;
2060 return ret;
2061} 2211}
2062 2212
2063int 2213int
@@ -2080,7 +2230,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
2080 BUG_ON(obj_priv->pages != NULL); 2230 BUG_ON(obj_priv->pages != NULL);
2081 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *)); 2231 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
2082 if (obj_priv->pages == NULL) { 2232 if (obj_priv->pages == NULL) {
2083 DRM_ERROR("Faled to allocate page list\n");
2084 obj_priv->pages_refcount--; 2233 obj_priv->pages_refcount--;
2085 return -ENOMEM; 2234 return -ENOMEM;
2086 } 2235 }
@@ -2091,7 +2240,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
2091 page = read_mapping_page(mapping, i, NULL); 2240 page = read_mapping_page(mapping, i, NULL);
2092 if (IS_ERR(page)) { 2241 if (IS_ERR(page)) {
2093 ret = PTR_ERR(page); 2242 ret = PTR_ERR(page);
2094 DRM_ERROR("read_mapping_page failed: %d\n", ret);
2095 i915_gem_object_put_pages(obj); 2243 i915_gem_object_put_pages(obj);
2096 return ret; 2244 return ret;
2097 } 2245 }
@@ -2328,6 +2476,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2328 else 2476 else
2329 i830_write_fence_reg(reg); 2477 i830_write_fence_reg(reg);
2330 2478
2479 trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
2480
2331 return 0; 2481 return 0;
2332} 2482}
2333 2483
@@ -2410,10 +2560,17 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2410 drm_i915_private_t *dev_priv = dev->dev_private; 2560 drm_i915_private_t *dev_priv = dev->dev_private;
2411 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2561 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2412 struct drm_mm_node *free_space; 2562 struct drm_mm_node *free_space;
2413 int page_count, ret; 2563 bool retry_alloc = false;
2564 int ret;
2414 2565
2415 if (dev_priv->mm.suspended) 2566 if (dev_priv->mm.suspended)
2416 return -EBUSY; 2567 return -EBUSY;
2568
2569 if (obj_priv->madv != I915_MADV_WILLNEED) {
2570 DRM_ERROR("Attempting to bind a purgeable object\n");
2571 return -EINVAL;
2572 }
2573
2417 if (alignment == 0) 2574 if (alignment == 0)
2418 alignment = i915_gem_get_gtt_alignment(obj); 2575 alignment = i915_gem_get_gtt_alignment(obj);
2419 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) { 2576 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
@@ -2433,30 +2590,16 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2433 } 2590 }
2434 } 2591 }
2435 if (obj_priv->gtt_space == NULL) { 2592 if (obj_priv->gtt_space == NULL) {
2436 bool lists_empty;
2437
2438 /* If the gtt is empty and we're still having trouble 2593 /* If the gtt is empty and we're still having trouble
2439 * fitting our object in, we're out of memory. 2594 * fitting our object in, we're out of memory.
2440 */ 2595 */
2441#if WATCH_LRU 2596#if WATCH_LRU
2442 DRM_INFO("%s: GTT full, evicting something\n", __func__); 2597 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2443#endif 2598#endif
2444 spin_lock(&dev_priv->mm.active_list_lock); 2599 ret = i915_gem_evict_something(dev, obj->size);
2445 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 2600 if (ret)
2446 list_empty(&dev_priv->mm.flushing_list) &&
2447 list_empty(&dev_priv->mm.active_list));
2448 spin_unlock(&dev_priv->mm.active_list_lock);
2449 if (lists_empty) {
2450 DRM_ERROR("GTT full, but LRU list empty\n");
2451 return -ENOSPC;
2452 }
2453
2454 ret = i915_gem_evict_something(dev);
2455 if (ret != 0) {
2456 if (ret != -ERESTARTSYS)
2457 DRM_ERROR("Failed to evict a buffer %d\n", ret);
2458 return ret; 2601 return ret;
2459 } 2602
2460 goto search_free; 2603 goto search_free;
2461 } 2604 }
2462 2605
@@ -2464,27 +2607,56 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2464 DRM_INFO("Binding object of size %zd at 0x%08x\n", 2607 DRM_INFO("Binding object of size %zd at 0x%08x\n",
2465 obj->size, obj_priv->gtt_offset); 2608 obj->size, obj_priv->gtt_offset);
2466#endif 2609#endif
2610 if (retry_alloc) {
2611 i915_gem_object_set_page_gfp_mask (obj,
2612 i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
2613 }
2467 ret = i915_gem_object_get_pages(obj); 2614 ret = i915_gem_object_get_pages(obj);
2615 if (retry_alloc) {
2616 i915_gem_object_set_page_gfp_mask (obj,
2617 i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
2618 }
2468 if (ret) { 2619 if (ret) {
2469 drm_mm_put_block(obj_priv->gtt_space); 2620 drm_mm_put_block(obj_priv->gtt_space);
2470 obj_priv->gtt_space = NULL; 2621 obj_priv->gtt_space = NULL;
2622
2623 if (ret == -ENOMEM) {
2624 /* first try to clear up some space from the GTT */
2625 ret = i915_gem_evict_something(dev, obj->size);
2626 if (ret) {
2627 /* now try to shrink everyone else */
2628 if (! retry_alloc) {
2629 retry_alloc = true;
2630 goto search_free;
2631 }
2632
2633 return ret;
2634 }
2635
2636 goto search_free;
2637 }
2638
2471 return ret; 2639 return ret;
2472 } 2640 }
2473 2641
2474 page_count = obj->size / PAGE_SIZE;
2475 /* Create an AGP memory structure pointing at our pages, and bind it 2642 /* Create an AGP memory structure pointing at our pages, and bind it
2476 * into the GTT. 2643 * into the GTT.
2477 */ 2644 */
2478 obj_priv->agp_mem = drm_agp_bind_pages(dev, 2645 obj_priv->agp_mem = drm_agp_bind_pages(dev,
2479 obj_priv->pages, 2646 obj_priv->pages,
2480 page_count, 2647 obj->size >> PAGE_SHIFT,
2481 obj_priv->gtt_offset, 2648 obj_priv->gtt_offset,
2482 obj_priv->agp_type); 2649 obj_priv->agp_type);
2483 if (obj_priv->agp_mem == NULL) { 2650 if (obj_priv->agp_mem == NULL) {
2484 i915_gem_object_put_pages(obj); 2651 i915_gem_object_put_pages(obj);
2485 drm_mm_put_block(obj_priv->gtt_space); 2652 drm_mm_put_block(obj_priv->gtt_space);
2486 obj_priv->gtt_space = NULL; 2653 obj_priv->gtt_space = NULL;
2487 return -ENOMEM; 2654
2655 ret = i915_gem_evict_something(dev, obj->size);
2656 if (ret)
2657 return ret;
2658
2659 goto search_free;
2488 } 2660 }
2489 atomic_inc(&dev->gtt_count); 2661 atomic_inc(&dev->gtt_count);
2490 atomic_add(obj->size, &dev->gtt_memory); 2662 atomic_add(obj->size, &dev->gtt_memory);
@@ -2496,6 +2668,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2496 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); 2668 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2497 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); 2669 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2498 2670
2671 trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
2672
2499 return 0; 2673 return 0;
2500} 2674}
2501 2675
@@ -2511,15 +2685,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
2511 if (obj_priv->pages == NULL) 2685 if (obj_priv->pages == NULL)
2512 return; 2686 return;
2513 2687
2514 /* XXX: The 865 in particular appears to be weird in how it handles 2688 trace_i915_gem_object_clflush(obj);
2515 * cache flushing. We haven't figured it out, but the
2516 * clflush+agp_chipset_flush doesn't appear to successfully get the
2517 * data visible to the PGU, while wbinvd + agp_chipset_flush does.
2518 */
2519 if (IS_I865G(obj->dev)) {
2520 wbinvd();
2521 return;
2522 }
2523 2689
2524 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); 2690 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
2525} 2691}
@@ -2530,21 +2696,29 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2530{ 2696{
2531 struct drm_device *dev = obj->dev; 2697 struct drm_device *dev = obj->dev;
2532 uint32_t seqno; 2698 uint32_t seqno;
2699 uint32_t old_write_domain;
2533 2700
2534 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 2701 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2535 return; 2702 return;
2536 2703
2537 /* Queue the GPU write cache flushing we need. */ 2704 /* Queue the GPU write cache flushing we need. */
2705 old_write_domain = obj->write_domain;
2538 i915_gem_flush(dev, 0, obj->write_domain); 2706 i915_gem_flush(dev, 0, obj->write_domain);
2539 seqno = i915_add_request(dev, NULL, obj->write_domain); 2707 seqno = i915_add_request(dev, NULL, obj->write_domain);
2540 obj->write_domain = 0; 2708 obj->write_domain = 0;
2541 i915_gem_object_move_to_active(obj, seqno); 2709 i915_gem_object_move_to_active(obj, seqno);
2710
2711 trace_i915_gem_object_change_domain(obj,
2712 obj->read_domains,
2713 old_write_domain);
2542} 2714}
2543 2715
2544/** Flushes the GTT write domain for the object if it's dirty. */ 2716/** Flushes the GTT write domain for the object if it's dirty. */
2545static void 2717static void
2546i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) 2718i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2547{ 2719{
2720 uint32_t old_write_domain;
2721
2548 if (obj->write_domain != I915_GEM_DOMAIN_GTT) 2722 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2549 return; 2723 return;
2550 2724
@@ -2552,7 +2726,12 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2552 * to it immediately go to main memory as far as we know, so there's 2726 * to it immediately go to main memory as far as we know, so there's
2553 * no chipset flush. It also doesn't land in render cache. 2727 * no chipset flush. It also doesn't land in render cache.
2554 */ 2728 */
2729 old_write_domain = obj->write_domain;
2555 obj->write_domain = 0; 2730 obj->write_domain = 0;
2731
2732 trace_i915_gem_object_change_domain(obj,
2733 obj->read_domains,
2734 old_write_domain);
2556} 2735}
2557 2736
2558/** Flushes the CPU write domain for the object if it's dirty. */ 2737/** Flushes the CPU write domain for the object if it's dirty. */
@@ -2560,13 +2739,19 @@ static void
2560i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) 2739i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2561{ 2740{
2562 struct drm_device *dev = obj->dev; 2741 struct drm_device *dev = obj->dev;
2742 uint32_t old_write_domain;
2563 2743
2564 if (obj->write_domain != I915_GEM_DOMAIN_CPU) 2744 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2565 return; 2745 return;
2566 2746
2567 i915_gem_clflush_object(obj); 2747 i915_gem_clflush_object(obj);
2568 drm_agp_chipset_flush(dev); 2748 drm_agp_chipset_flush(dev);
2749 old_write_domain = obj->write_domain;
2569 obj->write_domain = 0; 2750 obj->write_domain = 0;
2751
2752 trace_i915_gem_object_change_domain(obj,
2753 obj->read_domains,
2754 old_write_domain);
2570} 2755}
2571 2756
2572/** 2757/**
@@ -2579,6 +2764,7 @@ int
2579i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) 2764i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2580{ 2765{
2581 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2766 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2767 uint32_t old_write_domain, old_read_domains;
2582 int ret; 2768 int ret;
2583 2769
2584 /* Not valid to be called on unbound objects. */ 2770 /* Not valid to be called on unbound objects. */
@@ -2591,6 +2777,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2591 if (ret != 0) 2777 if (ret != 0)
2592 return ret; 2778 return ret;
2593 2779
2780 old_write_domain = obj->write_domain;
2781 old_read_domains = obj->read_domains;
2782
2594 /* If we're writing through the GTT domain, then CPU and GPU caches 2783 /* If we're writing through the GTT domain, then CPU and GPU caches
2595 * will need to be invalidated at next use. 2784 * will need to be invalidated at next use.
2596 */ 2785 */
@@ -2609,6 +2798,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2609 obj_priv->dirty = 1; 2798 obj_priv->dirty = 1;
2610 } 2799 }
2611 2800
2801 trace_i915_gem_object_change_domain(obj,
2802 old_read_domains,
2803 old_write_domain);
2804
2612 return 0; 2805 return 0;
2613} 2806}
2614 2807
@@ -2621,6 +2814,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2621static int 2814static int
2622i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) 2815i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2623{ 2816{
2817 uint32_t old_write_domain, old_read_domains;
2624 int ret; 2818 int ret;
2625 2819
2626 i915_gem_object_flush_gpu_write_domain(obj); 2820 i915_gem_object_flush_gpu_write_domain(obj);
@@ -2636,6 +2830,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2636 */ 2830 */
2637 i915_gem_object_set_to_full_cpu_read_domain(obj); 2831 i915_gem_object_set_to_full_cpu_read_domain(obj);
2638 2832
2833 old_write_domain = obj->write_domain;
2834 old_read_domains = obj->read_domains;
2835
2639 /* Flush the CPU cache if it's still invalid. */ 2836 /* Flush the CPU cache if it's still invalid. */
2640 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { 2837 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2641 i915_gem_clflush_object(obj); 2838 i915_gem_clflush_object(obj);
@@ -2656,6 +2853,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2656 obj->write_domain = I915_GEM_DOMAIN_CPU; 2853 obj->write_domain = I915_GEM_DOMAIN_CPU;
2657 } 2854 }
2658 2855
2856 trace_i915_gem_object_change_domain(obj,
2857 old_read_domains,
2858 old_write_domain);
2859
2659 return 0; 2860 return 0;
2660} 2861}
2661 2862
@@ -2777,6 +2978,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2777 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2978 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2778 uint32_t invalidate_domains = 0; 2979 uint32_t invalidate_domains = 0;
2779 uint32_t flush_domains = 0; 2980 uint32_t flush_domains = 0;
2981 uint32_t old_read_domains;
2780 2982
2781 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); 2983 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2782 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); 2984 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
@@ -2823,6 +3025,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2823 i915_gem_clflush_object(obj); 3025 i915_gem_clflush_object(obj);
2824 } 3026 }
2825 3027
3028 old_read_domains = obj->read_domains;
3029
2826 /* The actual obj->write_domain will be updated with 3030 /* The actual obj->write_domain will be updated with
2827 * pending_write_domain after we emit the accumulated flush for all 3031 * pending_write_domain after we emit the accumulated flush for all
2828 * of our domain changes in execbuffers (which clears objects' 3032 * of our domain changes in execbuffers (which clears objects'
@@ -2841,6 +3045,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2841 obj->read_domains, obj->write_domain, 3045 obj->read_domains, obj->write_domain,
2842 dev->invalidate_domains, dev->flush_domains); 3046 dev->invalidate_domains, dev->flush_domains);
2843#endif 3047#endif
3048
3049 trace_i915_gem_object_change_domain(obj,
3050 old_read_domains,
3051 obj->write_domain);
2844} 3052}
2845 3053
2846/** 3054/**
@@ -2893,6 +3101,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2893 uint64_t offset, uint64_t size) 3101 uint64_t offset, uint64_t size)
2894{ 3102{
2895 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3103 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3104 uint32_t old_read_domains;
2896 int i, ret; 3105 int i, ret;
2897 3106
2898 if (offset == 0 && size == obj->size) 3107 if (offset == 0 && size == obj->size)
@@ -2939,8 +3148,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2939 */ 3148 */
2940 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); 3149 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2941 3150
3151 old_read_domains = obj->read_domains;
2942 obj->read_domains |= I915_GEM_DOMAIN_CPU; 3152 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2943 3153
3154 trace_i915_gem_object_change_domain(obj,
3155 old_read_domains,
3156 obj->write_domain);
3157
2944 return 0; 3158 return 0;
2945} 3159}
2946 3160
@@ -2984,6 +3198,21 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2984 } 3198 }
2985 target_obj_priv = target_obj->driver_private; 3199 target_obj_priv = target_obj->driver_private;
2986 3200
3201#if WATCH_RELOC
3202 DRM_INFO("%s: obj %p offset %08x target %d "
3203 "read %08x write %08x gtt %08x "
3204 "presumed %08x delta %08x\n",
3205 __func__,
3206 obj,
3207 (int) reloc->offset,
3208 (int) reloc->target_handle,
3209 (int) reloc->read_domains,
3210 (int) reloc->write_domain,
3211 (int) target_obj_priv->gtt_offset,
3212 (int) reloc->presumed_offset,
3213 reloc->delta);
3214#endif
3215
2987 /* The target buffer should have appeared before us in the 3216 /* The target buffer should have appeared before us in the
2988 * exec_object list, so it should have a GTT space bound by now. 3217 * exec_object list, so it should have a GTT space bound by now.
2989 */ 3218 */
@@ -2995,25 +3224,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2995 return -EINVAL; 3224 return -EINVAL;
2996 } 3225 }
2997 3226
2998 if (reloc->offset > obj->size - 4) { 3227 /* Validate that the target is in a valid r/w GPU domain */
2999 DRM_ERROR("Relocation beyond object bounds: "
3000 "obj %p target %d offset %d size %d.\n",
3001 obj, reloc->target_handle,
3002 (int) reloc->offset, (int) obj->size);
3003 drm_gem_object_unreference(target_obj);
3004 i915_gem_object_unpin(obj);
3005 return -EINVAL;
3006 }
3007 if (reloc->offset & 3) {
3008 DRM_ERROR("Relocation not 4-byte aligned: "
3009 "obj %p target %d offset %d.\n",
3010 obj, reloc->target_handle,
3011 (int) reloc->offset);
3012 drm_gem_object_unreference(target_obj);
3013 i915_gem_object_unpin(obj);
3014 return -EINVAL;
3015 }
3016
3017 if (reloc->write_domain & I915_GEM_DOMAIN_CPU || 3228 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3018 reloc->read_domains & I915_GEM_DOMAIN_CPU) { 3229 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3019 DRM_ERROR("reloc with read/write CPU domains: " 3230 DRM_ERROR("reloc with read/write CPU domains: "
@@ -3027,7 +3238,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3027 i915_gem_object_unpin(obj); 3238 i915_gem_object_unpin(obj);
3028 return -EINVAL; 3239 return -EINVAL;
3029 } 3240 }
3030
3031 if (reloc->write_domain && target_obj->pending_write_domain && 3241 if (reloc->write_domain && target_obj->pending_write_domain &&
3032 reloc->write_domain != target_obj->pending_write_domain) { 3242 reloc->write_domain != target_obj->pending_write_domain) {
3033 DRM_ERROR("Write domain conflict: " 3243 DRM_ERROR("Write domain conflict: "
@@ -3042,21 +3252,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3042 return -EINVAL; 3252 return -EINVAL;
3043 } 3253 }
3044 3254
3045#if WATCH_RELOC
3046 DRM_INFO("%s: obj %p offset %08x target %d "
3047 "read %08x write %08x gtt %08x "
3048 "presumed %08x delta %08x\n",
3049 __func__,
3050 obj,
3051 (int) reloc->offset,
3052 (int) reloc->target_handle,
3053 (int) reloc->read_domains,
3054 (int) reloc->write_domain,
3055 (int) target_obj_priv->gtt_offset,
3056 (int) reloc->presumed_offset,
3057 reloc->delta);
3058#endif
3059
3060 target_obj->pending_read_domains |= reloc->read_domains; 3255 target_obj->pending_read_domains |= reloc->read_domains;
3061 target_obj->pending_write_domain |= reloc->write_domain; 3256 target_obj->pending_write_domain |= reloc->write_domain;
3062 3257
@@ -3068,6 +3263,37 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3068 continue; 3263 continue;
3069 } 3264 }
3070 3265
3266 /* Check that the relocation address is valid... */
3267 if (reloc->offset > obj->size - 4) {
3268 DRM_ERROR("Relocation beyond object bounds: "
3269 "obj %p target %d offset %d size %d.\n",
3270 obj, reloc->target_handle,
3271 (int) reloc->offset, (int) obj->size);
3272 drm_gem_object_unreference(target_obj);
3273 i915_gem_object_unpin(obj);
3274 return -EINVAL;
3275 }
3276 if (reloc->offset & 3) {
3277 DRM_ERROR("Relocation not 4-byte aligned: "
3278 "obj %p target %d offset %d.\n",
3279 obj, reloc->target_handle,
3280 (int) reloc->offset);
3281 drm_gem_object_unreference(target_obj);
3282 i915_gem_object_unpin(obj);
3283 return -EINVAL;
3284 }
3285
3286 /* and points to somewhere within the target object. */
3287 if (reloc->delta >= target_obj->size) {
3288 DRM_ERROR("Relocation beyond target object bounds: "
3289 "obj %p target %d delta %d size %d.\n",
3290 obj, reloc->target_handle,
3291 (int) reloc->delta, (int) target_obj->size);
3292 drm_gem_object_unreference(target_obj);
3293 i915_gem_object_unpin(obj);
3294 return -EINVAL;
3295 }
3296
3071 ret = i915_gem_object_set_to_gtt_domain(obj, 1); 3297 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3072 if (ret != 0) { 3298 if (ret != 0) {
3073 drm_gem_object_unreference(target_obj); 3299 drm_gem_object_unreference(target_obj);
@@ -3126,6 +3352,8 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
3126 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 3352 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3127 exec_len = (uint32_t) exec->batch_len; 3353 exec_len = (uint32_t) exec->batch_len;
3128 3354
3355 trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno);
3356
3129 count = nbox ? nbox : 1; 3357 count = nbox ? nbox : 1;
3130 3358
3131 for (i = 0; i < count; i++) { 3359 for (i = 0; i < count; i++) {
@@ -3363,7 +3591,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3363 3591
3364 i915_verify_inactive(dev, __FILE__, __LINE__); 3592 i915_verify_inactive(dev, __FILE__, __LINE__);
3365 3593
3366 if (dev_priv->mm.wedged) { 3594 if (atomic_read(&dev_priv->mm.wedged)) {
3367 DRM_ERROR("Execbuf while wedged\n"); 3595 DRM_ERROR("Execbuf while wedged\n");
3368 mutex_unlock(&dev->struct_mutex); 3596 mutex_unlock(&dev->struct_mutex);
3369 ret = -EIO; 3597 ret = -EIO;
@@ -3421,8 +3649,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3421 3649
3422 /* error other than GTT full, or we've already tried again */ 3650 /* error other than GTT full, or we've already tried again */
3423 if (ret != -ENOSPC || pin_tries >= 1) { 3651 if (ret != -ENOSPC || pin_tries >= 1) {
3424 if (ret != -ERESTARTSYS) 3652 if (ret != -ERESTARTSYS) {
3425 DRM_ERROR("Failed to pin buffers %d\n", ret); 3653 unsigned long long total_size = 0;
3654 for (i = 0; i < args->buffer_count; i++)
3655 total_size += object_list[i]->size;
3656 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
3657 pinned+1, args->buffer_count,
3658 total_size, ret);
3659 DRM_ERROR("%d objects [%d pinned], "
3660 "%d object bytes [%d pinned], "
3661 "%d/%d gtt bytes\n",
3662 atomic_read(&dev->object_count),
3663 atomic_read(&dev->pin_count),
3664 atomic_read(&dev->object_memory),
3665 atomic_read(&dev->pin_memory),
3666 atomic_read(&dev->gtt_memory),
3667 dev->gtt_total);
3668 }
3426 goto err; 3669 goto err;
3427 } 3670 }
3428 3671
@@ -3433,7 +3676,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3433 3676
3434 /* evict everyone we can from the aperture */ 3677 /* evict everyone we can from the aperture */
3435 ret = i915_gem_evict_everything(dev); 3678 ret = i915_gem_evict_everything(dev);
3436 if (ret) 3679 if (ret && ret != -ENOSPC)
3437 goto err; 3680 goto err;
3438 } 3681 }
3439 3682
@@ -3489,8 +3732,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3489 3732
3490 for (i = 0; i < args->buffer_count; i++) { 3733 for (i = 0; i < args->buffer_count; i++) {
3491 struct drm_gem_object *obj = object_list[i]; 3734 struct drm_gem_object *obj = object_list[i];
3735 uint32_t old_write_domain = obj->write_domain;
3492 3736
3493 obj->write_domain = obj->pending_write_domain; 3737 obj->write_domain = obj->pending_write_domain;
3738 trace_i915_gem_object_change_domain(obj,
3739 obj->read_domains,
3740 old_write_domain);
3494 } 3741 }
3495 3742
3496 i915_verify_inactive(dev, __FILE__, __LINE__); 3743 i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -3607,11 +3854,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3607 i915_verify_inactive(dev, __FILE__, __LINE__); 3854 i915_verify_inactive(dev, __FILE__, __LINE__);
3608 if (obj_priv->gtt_space == NULL) { 3855 if (obj_priv->gtt_space == NULL) {
3609 ret = i915_gem_object_bind_to_gtt(obj, alignment); 3856 ret = i915_gem_object_bind_to_gtt(obj, alignment);
3610 if (ret != 0) { 3857 if (ret)
3611 if (ret != -EBUSY && ret != -ERESTARTSYS)
3612 DRM_ERROR("Failure to bind: %d\n", ret);
3613 return ret; 3858 return ret;
3614 }
3615 } 3859 }
3616 /* 3860 /*
3617 * Pre-965 chips need a fence register set up in order to 3861 * Pre-965 chips need a fence register set up in order to
@@ -3691,6 +3935,13 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3691 } 3935 }
3692 obj_priv = obj->driver_private; 3936 obj_priv = obj->driver_private;
3693 3937
3938 if (obj_priv->madv != I915_MADV_WILLNEED) {
3939 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3940 drm_gem_object_unreference(obj);
3941 mutex_unlock(&dev->struct_mutex);
3942 return -EINVAL;
3943 }
3944
3694 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { 3945 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
3695 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", 3946 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3696 args->handle); 3947 args->handle);
@@ -3803,6 +4054,56 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3803 return i915_gem_ring_throttle(dev, file_priv); 4054 return i915_gem_ring_throttle(dev, file_priv);
3804} 4055}
3805 4056
4057int
4058i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4059 struct drm_file *file_priv)
4060{
4061 struct drm_i915_gem_madvise *args = data;
4062 struct drm_gem_object *obj;
4063 struct drm_i915_gem_object *obj_priv;
4064
4065 switch (args->madv) {
4066 case I915_MADV_DONTNEED:
4067 case I915_MADV_WILLNEED:
4068 break;
4069 default:
4070 return -EINVAL;
4071 }
4072
4073 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4074 if (obj == NULL) {
4075 DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
4076 args->handle);
4077 return -EBADF;
4078 }
4079
4080 mutex_lock(&dev->struct_mutex);
4081 obj_priv = obj->driver_private;
4082
4083 if (obj_priv->pin_count) {
4084 drm_gem_object_unreference(obj);
4085 mutex_unlock(&dev->struct_mutex);
4086
4087 DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
4088 return -EINVAL;
4089 }
4090
4091 if (obj_priv->madv != __I915_MADV_PURGED)
4092 obj_priv->madv = args->madv;
4093
4094 /* if the object is no longer bound, discard its backing storage */
4095 if (i915_gem_object_is_purgeable(obj_priv) &&
4096 obj_priv->gtt_space == NULL)
4097 i915_gem_object_truncate(obj);
4098
4099 args->retained = obj_priv->madv != __I915_MADV_PURGED;
4100
4101 drm_gem_object_unreference(obj);
4102 mutex_unlock(&dev->struct_mutex);
4103
4104 return 0;
4105}
4106
3806int i915_gem_init_object(struct drm_gem_object *obj) 4107int i915_gem_init_object(struct drm_gem_object *obj)
3807{ 4108{
3808 struct drm_i915_gem_object *obj_priv; 4109 struct drm_i915_gem_object *obj_priv;
@@ -3827,6 +4128,9 @@ int i915_gem_init_object(struct drm_gem_object *obj)
3827 obj_priv->fence_reg = I915_FENCE_REG_NONE; 4128 obj_priv->fence_reg = I915_FENCE_REG_NONE;
3828 INIT_LIST_HEAD(&obj_priv->list); 4129 INIT_LIST_HEAD(&obj_priv->list);
3829 INIT_LIST_HEAD(&obj_priv->fence_list); 4130 INIT_LIST_HEAD(&obj_priv->fence_list);
4131 obj_priv->madv = I915_MADV_WILLNEED;
4132
4133 trace_i915_gem_object_create(obj);
3830 4134
3831 return 0; 4135 return 0;
3832} 4136}
@@ -3836,6 +4140,8 @@ void i915_gem_free_object(struct drm_gem_object *obj)
3836 struct drm_device *dev = obj->dev; 4140 struct drm_device *dev = obj->dev;
3837 struct drm_i915_gem_object *obj_priv = obj->driver_private; 4141 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3838 4142
4143 trace_i915_gem_object_destroy(obj);
4144
3839 while (obj_priv->pin_count > 0) 4145 while (obj_priv->pin_count > 0)
3840 i915_gem_object_unpin(obj); 4146 i915_gem_object_unpin(obj);
3841 4147
@@ -3844,43 +4150,35 @@ void i915_gem_free_object(struct drm_gem_object *obj)
3844 4150
3845 i915_gem_object_unbind(obj); 4151 i915_gem_object_unbind(obj);
3846 4152
3847 i915_gem_free_mmap_offset(obj); 4153 if (obj_priv->mmap_offset)
4154 i915_gem_free_mmap_offset(obj);
3848 4155
3849 kfree(obj_priv->page_cpu_valid); 4156 kfree(obj_priv->page_cpu_valid);
3850 kfree(obj_priv->bit_17); 4157 kfree(obj_priv->bit_17);
3851 kfree(obj->driver_private); 4158 kfree(obj->driver_private);
3852} 4159}
3853 4160
3854/** Unbinds all objects that are on the given buffer list. */ 4161/** Unbinds all inactive objects. */
3855static int 4162static int
3856i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) 4163i915_gem_evict_from_inactive_list(struct drm_device *dev)
3857{ 4164{
3858 struct drm_gem_object *obj; 4165 drm_i915_private_t *dev_priv = dev->dev_private;
3859 struct drm_i915_gem_object *obj_priv;
3860 int ret;
3861 4166
3862 while (!list_empty(head)) { 4167 while (!list_empty(&dev_priv->mm.inactive_list)) {
3863 obj_priv = list_first_entry(head, 4168 struct drm_gem_object *obj;
3864 struct drm_i915_gem_object, 4169 int ret;
3865 list);
3866 obj = obj_priv->obj;
3867 4170
3868 if (obj_priv->pin_count != 0) { 4171 obj = list_first_entry(&dev_priv->mm.inactive_list,
3869 DRM_ERROR("Pinned object in unbind list\n"); 4172 struct drm_i915_gem_object,
3870 mutex_unlock(&dev->struct_mutex); 4173 list)->obj;
3871 return -EINVAL;
3872 }
3873 4174
3874 ret = i915_gem_object_unbind(obj); 4175 ret = i915_gem_object_unbind(obj);
3875 if (ret != 0) { 4176 if (ret != 0) {
3876 DRM_ERROR("Error unbinding object in LeaveVT: %d\n", 4177 DRM_ERROR("Error unbinding object: %d\n", ret);
3877 ret);
3878 mutex_unlock(&dev->struct_mutex);
3879 return ret; 4178 return ret;
3880 } 4179 }
3881 } 4180 }
3882 4181
3883
3884 return 0; 4182 return 0;
3885} 4183}
3886 4184
@@ -3902,6 +4200,7 @@ i915_gem_idle(struct drm_device *dev)
3902 * We need to replace this with a semaphore, or something. 4200 * We need to replace this with a semaphore, or something.
3903 */ 4201 */
3904 dev_priv->mm.suspended = 1; 4202 dev_priv->mm.suspended = 1;
4203 del_timer(&dev_priv->hangcheck_timer);
3905 4204
3906 /* Cancel the retire work handler, wait for it to finish if running 4205 /* Cancel the retire work handler, wait for it to finish if running
3907 */ 4206 */
@@ -3931,7 +4230,7 @@ i915_gem_idle(struct drm_device *dev)
3931 if (last_seqno == cur_seqno) { 4230 if (last_seqno == cur_seqno) {
3932 if (stuck++ > 100) { 4231 if (stuck++ > 100) {
3933 DRM_ERROR("hardware wedged\n"); 4232 DRM_ERROR("hardware wedged\n");
3934 dev_priv->mm.wedged = 1; 4233 atomic_set(&dev_priv->mm.wedged, 1);
3935 DRM_WAKEUP(&dev_priv->irq_queue); 4234 DRM_WAKEUP(&dev_priv->irq_queue);
3936 break; 4235 break;
3937 } 4236 }
@@ -3944,7 +4243,7 @@ i915_gem_idle(struct drm_device *dev)
3944 i915_gem_retire_requests(dev); 4243 i915_gem_retire_requests(dev);
3945 4244
3946 spin_lock(&dev_priv->mm.active_list_lock); 4245 spin_lock(&dev_priv->mm.active_list_lock);
3947 if (!dev_priv->mm.wedged) { 4246 if (!atomic_read(&dev_priv->mm.wedged)) {
3948 /* Active and flushing should now be empty as we've 4247 /* Active and flushing should now be empty as we've
3949 * waited for a sequence higher than any pending execbuffer 4248 * waited for a sequence higher than any pending execbuffer
3950 */ 4249 */
@@ -3962,29 +4261,41 @@ i915_gem_idle(struct drm_device *dev)
3962 * the GPU domains and just stuff them onto inactive. 4261 * the GPU domains and just stuff them onto inactive.
3963 */ 4262 */
3964 while (!list_empty(&dev_priv->mm.active_list)) { 4263 while (!list_empty(&dev_priv->mm.active_list)) {
3965 struct drm_i915_gem_object *obj_priv; 4264 struct drm_gem_object *obj;
4265 uint32_t old_write_domain;
3966 4266
3967 obj_priv = list_first_entry(&dev_priv->mm.active_list, 4267 obj = list_first_entry(&dev_priv->mm.active_list,
3968 struct drm_i915_gem_object, 4268 struct drm_i915_gem_object,
3969 list); 4269 list)->obj;
3970 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; 4270 old_write_domain = obj->write_domain;
3971 i915_gem_object_move_to_inactive(obj_priv->obj); 4271 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4272 i915_gem_object_move_to_inactive(obj);
4273
4274 trace_i915_gem_object_change_domain(obj,
4275 obj->read_domains,
4276 old_write_domain);
3972 } 4277 }
3973 spin_unlock(&dev_priv->mm.active_list_lock); 4278 spin_unlock(&dev_priv->mm.active_list_lock);
3974 4279
3975 while (!list_empty(&dev_priv->mm.flushing_list)) { 4280 while (!list_empty(&dev_priv->mm.flushing_list)) {
3976 struct drm_i915_gem_object *obj_priv; 4281 struct drm_gem_object *obj;
4282 uint32_t old_write_domain;
3977 4283
3978 obj_priv = list_first_entry(&dev_priv->mm.flushing_list, 4284 obj = list_first_entry(&dev_priv->mm.flushing_list,
3979 struct drm_i915_gem_object, 4285 struct drm_i915_gem_object,
3980 list); 4286 list)->obj;
3981 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; 4287 old_write_domain = obj->write_domain;
3982 i915_gem_object_move_to_inactive(obj_priv->obj); 4288 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4289 i915_gem_object_move_to_inactive(obj);
4290
4291 trace_i915_gem_object_change_domain(obj,
4292 obj->read_domains,
4293 old_write_domain);
3983 } 4294 }
3984 4295
3985 4296
3986 /* Move all inactive buffers out of the GTT. */ 4297 /* Move all inactive buffers out of the GTT. */
3987 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); 4298 ret = i915_gem_evict_from_inactive_list(dev);
3988 WARN_ON(!list_empty(&dev_priv->mm.inactive_list)); 4299 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
3989 if (ret) { 4300 if (ret) {
3990 mutex_unlock(&dev->struct_mutex); 4301 mutex_unlock(&dev->struct_mutex);
@@ -4206,9 +4517,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4206 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4517 if (drm_core_check_feature(dev, DRIVER_MODESET))
4207 return 0; 4518 return 0;
4208 4519
4209 if (dev_priv->mm.wedged) { 4520 if (atomic_read(&dev_priv->mm.wedged)) {
4210 DRM_ERROR("Reenabling wedged hardware, good luck\n"); 4521 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4211 dev_priv->mm.wedged = 0; 4522 atomic_set(&dev_priv->mm.wedged, 0);
4212 } 4523 }
4213 4524
4214 mutex_lock(&dev->struct_mutex); 4525 mutex_lock(&dev->struct_mutex);
@@ -4274,6 +4585,10 @@ i915_gem_load(struct drm_device *dev)
4274 i915_gem_retire_work_handler); 4585 i915_gem_retire_work_handler);
4275 dev_priv->mm.next_gem_seqno = 1; 4586 dev_priv->mm.next_gem_seqno = 1;
4276 4587
4588 spin_lock(&shrink_list_lock);
4589 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4590 spin_unlock(&shrink_list_lock);
4591
4277 /* Old X drivers will take 0-2 for front, back, depth buffers */ 4592 /* Old X drivers will take 0-2 for front, back, depth buffers */
4278 dev_priv->fence_reg_start = 3; 4593 dev_priv->fence_reg_start = 3;
4279 4594
@@ -4491,3 +4806,116 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
4491 list_del_init(i915_file_priv->mm.request_list.next); 4806 list_del_init(i915_file_priv->mm.request_list.next);
4492 mutex_unlock(&dev->struct_mutex); 4807 mutex_unlock(&dev->struct_mutex);
4493} 4808}
4809
4810static int
4811i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
4812{
4813 drm_i915_private_t *dev_priv, *next_dev;
4814 struct drm_i915_gem_object *obj_priv, *next_obj;
4815 int cnt = 0;
4816 int would_deadlock = 1;
4817
4818 /* "fast-path" to count number of available objects */
4819 if (nr_to_scan == 0) {
4820 spin_lock(&shrink_list_lock);
4821 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
4822 struct drm_device *dev = dev_priv->dev;
4823
4824 if (mutex_trylock(&dev->struct_mutex)) {
4825 list_for_each_entry(obj_priv,
4826 &dev_priv->mm.inactive_list,
4827 list)
4828 cnt++;
4829 mutex_unlock(&dev->struct_mutex);
4830 }
4831 }
4832 spin_unlock(&shrink_list_lock);
4833
4834 return (cnt / 100) * sysctl_vfs_cache_pressure;
4835 }
4836
4837 spin_lock(&shrink_list_lock);
4838
4839 /* first scan for clean buffers */
4840 list_for_each_entry_safe(dev_priv, next_dev,
4841 &shrink_list, mm.shrink_list) {
4842 struct drm_device *dev = dev_priv->dev;
4843
4844 if (! mutex_trylock(&dev->struct_mutex))
4845 continue;
4846
4847 spin_unlock(&shrink_list_lock);
4848
4849 i915_gem_retire_requests(dev);
4850
4851 list_for_each_entry_safe(obj_priv, next_obj,
4852 &dev_priv->mm.inactive_list,
4853 list) {
4854 if (i915_gem_object_is_purgeable(obj_priv)) {
4855 i915_gem_object_unbind(obj_priv->obj);
4856 if (--nr_to_scan <= 0)
4857 break;
4858 }
4859 }
4860
4861 spin_lock(&shrink_list_lock);
4862 mutex_unlock(&dev->struct_mutex);
4863
4864 would_deadlock = 0;
4865
4866 if (nr_to_scan <= 0)
4867 break;
4868 }
4869
4870 /* second pass, evict/count anything still on the inactive list */
4871 list_for_each_entry_safe(dev_priv, next_dev,
4872 &shrink_list, mm.shrink_list) {
4873 struct drm_device *dev = dev_priv->dev;
4874
4875 if (! mutex_trylock(&dev->struct_mutex))
4876 continue;
4877
4878 spin_unlock(&shrink_list_lock);
4879
4880 list_for_each_entry_safe(obj_priv, next_obj,
4881 &dev_priv->mm.inactive_list,
4882 list) {
4883 if (nr_to_scan > 0) {
4884 i915_gem_object_unbind(obj_priv->obj);
4885 nr_to_scan--;
4886 } else
4887 cnt++;
4888 }
4889
4890 spin_lock(&shrink_list_lock);
4891 mutex_unlock(&dev->struct_mutex);
4892
4893 would_deadlock = 0;
4894 }
4895
4896 spin_unlock(&shrink_list_lock);
4897
4898 if (would_deadlock)
4899 return -1;
4900 else if (cnt > 0)
4901 return (cnt / 100) * sysctl_vfs_cache_pressure;
4902 else
4903 return 0;
4904}
4905
4906static struct shrinker shrinker = {
4907 .shrink = i915_gem_shrink,
4908 .seeks = DEFAULT_SEEKS,
4909};
4910
4911__init void
4912i915_gem_shrinker_init(void)
4913{
4914 register_shrinker(&shrinker);
4915}
4916
4917__exit void
4918i915_gem_shrinker_exit(void)
4919{
4920 unregister_shrinker(&shrinker);
4921}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 6c89f2ff2495..4dfeec7cdd42 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -31,6 +31,7 @@
31#include "drm.h" 31#include "drm.h"
32#include "i915_drm.h" 32#include "i915_drm.h"
33#include "i915_drv.h" 33#include "i915_drv.h"
34#include "i915_trace.h"
34#include "intel_drv.h" 35#include "intel_drv.h"
35 36
36#define MAX_NOPID ((u32)~0) 37#define MAX_NOPID ((u32)~0)
@@ -279,7 +280,9 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
279 } 280 }
280 281
281 if (gt_iir & GT_USER_INTERRUPT) { 282 if (gt_iir & GT_USER_INTERRUPT) {
282 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); 283 u32 seqno = i915_get_gem_seqno(dev);
284 dev_priv->mm.irq_gem_seqno = seqno;
285 trace_i915_gem_request_complete(dev, seqno);
283 DRM_WAKEUP(&dev_priv->irq_queue); 286 DRM_WAKEUP(&dev_priv->irq_queue);
284 } 287 }
285 288
@@ -302,12 +305,25 @@ static void i915_error_work_func(struct work_struct *work)
302 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 305 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
303 error_work); 306 error_work);
304 struct drm_device *dev = dev_priv->dev; 307 struct drm_device *dev = dev_priv->dev;
305 char *event_string = "ERROR=1"; 308 char *error_event[] = { "ERROR=1", NULL };
306 char *envp[] = { event_string, NULL }; 309 char *reset_event[] = { "RESET=1", NULL };
310 char *reset_done_event[] = { "ERROR=0", NULL };
307 311
308 DRM_DEBUG("generating error event\n"); 312 DRM_DEBUG("generating error event\n");
309 313 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
310 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp); 314
315 if (atomic_read(&dev_priv->mm.wedged)) {
316 if (IS_I965G(dev)) {
317 DRM_DEBUG("resetting chip\n");
318 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
319 if (!i965_reset(dev, GDRST_RENDER)) {
320 atomic_set(&dev_priv->mm.wedged, 0);
321 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
322 }
323 } else {
324 printk("reboot required\n");
325 }
326 }
311} 327}
312 328
313/** 329/**
@@ -372,7 +388,7 @@ out:
372 * so userspace knows something bad happened (should trigger collection 388 * so userspace knows something bad happened (should trigger collection
373 * of a ring dump etc.). 389 * of a ring dump etc.).
374 */ 390 */
375static void i915_handle_error(struct drm_device *dev) 391static void i915_handle_error(struct drm_device *dev, bool wedged)
376{ 392{
377 struct drm_i915_private *dev_priv = dev->dev_private; 393 struct drm_i915_private *dev_priv = dev->dev_private;
378 u32 eir = I915_READ(EIR); 394 u32 eir = I915_READ(EIR);
@@ -482,6 +498,16 @@ static void i915_handle_error(struct drm_device *dev)
482 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 498 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
483 } 499 }
484 500
501 if (wedged) {
502 atomic_set(&dev_priv->mm.wedged, 1);
503
504 /*
505 * Wakeup waiting processes so they don't hang
506 */
507 printk("i915: Waking up sleeping processes\n");
508 DRM_WAKEUP(&dev_priv->irq_queue);
509 }
510
485 queue_work(dev_priv->wq, &dev_priv->error_work); 511 queue_work(dev_priv->wq, &dev_priv->error_work);
486} 512}
487 513
@@ -527,7 +553,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
527 pipeb_stats = I915_READ(PIPEBSTAT); 553 pipeb_stats = I915_READ(PIPEBSTAT);
528 554
529 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 555 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
530 i915_handle_error(dev); 556 i915_handle_error(dev, false);
531 557
532 /* 558 /*
533 * Clear the PIPE(A|B)STAT regs before the IIR 559 * Clear the PIPE(A|B)STAT regs before the IIR
@@ -599,8 +625,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
599 } 625 }
600 626
601 if (iir & I915_USER_INTERRUPT) { 627 if (iir & I915_USER_INTERRUPT) {
602 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); 628 u32 seqno = i915_get_gem_seqno(dev);
629 dev_priv->mm.irq_gem_seqno = seqno;
630 trace_i915_gem_request_complete(dev, seqno);
603 DRM_WAKEUP(&dev_priv->irq_queue); 631 DRM_WAKEUP(&dev_priv->irq_queue);
632 dev_priv->hangcheck_count = 0;
633 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
604 } 634 }
605 635
606 if (pipea_stats & vblank_status) { 636 if (pipea_stats & vblank_status) {
@@ -880,6 +910,52 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
880 return -EINVAL; 910 return -EINVAL;
881} 911}
882 912
913struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) {
914 drm_i915_private_t *dev_priv = dev->dev_private;
915 return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list);
916}
917
918/**
919 * This is called when the chip hasn't reported back with completed
920 * batchbuffers in a long time. The first time this is called we simply record
921 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
922 * again, we assume the chip is wedged and try to fix it.
923 */
924void i915_hangcheck_elapsed(unsigned long data)
925{
926 struct drm_device *dev = (struct drm_device *)data;
927 drm_i915_private_t *dev_priv = dev->dev_private;
928 uint32_t acthd;
929
930 if (!IS_I965G(dev))
931 acthd = I915_READ(ACTHD);
932 else
933 acthd = I915_READ(ACTHD_I965);
934
935 /* If all work is done then ACTHD clearly hasn't advanced. */
936 if (list_empty(&dev_priv->mm.request_list) ||
937 i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) {
938 dev_priv->hangcheck_count = 0;
939 return;
940 }
941
942 if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) {
943 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
944 i915_handle_error(dev, true);
945 return;
946 }
947
948 /* Reset timer case chip hangs without another request being added */
949 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
950
951 if (acthd != dev_priv->last_acthd)
952 dev_priv->hangcheck_count = 0;
953 else
954 dev_priv->hangcheck_count++;
955
956 dev_priv->last_acthd = acthd;
957}
958
883/* drm_dma.h hooks 959/* drm_dma.h hooks
884*/ 960*/
885static void igdng_irq_preinstall(struct drm_device *dev) 961static void igdng_irq_preinstall(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index e4b4e8898e39..2d5193556d3f 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -148,6 +148,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
148 struct drm_i915_private *dev_priv = dev->dev_private; 148 struct drm_i915_private *dev_priv = dev->dev_private;
149 struct opregion_asle *asle = dev_priv->opregion.asle; 149 struct opregion_asle *asle = dev_priv->opregion.asle;
150 u32 blc_pwm_ctl, blc_pwm_ctl2; 150 u32 blc_pwm_ctl, blc_pwm_ctl2;
151 u32 max_backlight, level, shift;
151 152
152 if (!(bclp & ASLE_BCLP_VALID)) 153 if (!(bclp & ASLE_BCLP_VALID))
153 return ASLE_BACKLIGHT_FAIL; 154 return ASLE_BACKLIGHT_FAIL;
@@ -157,14 +158,25 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
157 return ASLE_BACKLIGHT_FAIL; 158 return ASLE_BACKLIGHT_FAIL;
158 159
159 blc_pwm_ctl = I915_READ(BLC_PWM_CTL); 160 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
160 blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
161 blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2); 161 blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
162 162
163 if (blc_pwm_ctl2 & BLM_COMBINATION_MODE) 163 if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
164 pci_write_config_dword(dev->pdev, PCI_LBPC, bclp); 164 pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
165 else 165 else {
166 I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101)-1)); 166 if (IS_IGD(dev)) {
167 167 blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
168 max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
169 BACKLIGHT_MODULATION_FREQ_SHIFT;
170 shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1;
171 } else {
172 blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
173 max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
174 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
175 shift = BACKLIGHT_DUTY_CYCLE_SHIFT;
176 }
177 level = (bclp * max_backlight) / 255;
178 I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift));
179 }
168 asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; 180 asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
169 181
170 return 0; 182 return 0;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3f7963553464..0466ddbeba32 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -86,6 +86,10 @@
86#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) 86#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
87#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) 87#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
88#define LBB 0xf4 88#define LBB 0xf4
89#define GDRST 0xc0
90#define GDRST_FULL (0<<2)
91#define GDRST_RENDER (1<<2)
92#define GDRST_MEDIA (3<<2)
89 93
90/* VGA stuff */ 94/* VGA stuff */
91 95
@@ -344,9 +348,37 @@
344#define FBC_CTL_PLANEA (0<<0) 348#define FBC_CTL_PLANEA (0<<0)
345#define FBC_CTL_PLANEB (1<<0) 349#define FBC_CTL_PLANEB (1<<0)
346#define FBC_FENCE_OFF 0x0321b 350#define FBC_FENCE_OFF 0x0321b
351#define FBC_TAG 0x03300
347 352
348#define FBC_LL_SIZE (1536) 353#define FBC_LL_SIZE (1536)
349 354
355/* Framebuffer compression for GM45+ */
356#define DPFC_CB_BASE 0x3200
357#define DPFC_CONTROL 0x3208
358#define DPFC_CTL_EN (1<<31)
359#define DPFC_CTL_PLANEA (0<<30)
360#define DPFC_CTL_PLANEB (1<<30)
361#define DPFC_CTL_FENCE_EN (1<<29)
362#define DPFC_SR_EN (1<<10)
363#define DPFC_CTL_LIMIT_1X (0<<6)
364#define DPFC_CTL_LIMIT_2X (1<<6)
365#define DPFC_CTL_LIMIT_4X (2<<6)
366#define DPFC_RECOMP_CTL 0x320c
367#define DPFC_RECOMP_STALL_EN (1<<27)
368#define DPFC_RECOMP_STALL_WM_SHIFT (16)
369#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
370#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
371#define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f)
372#define DPFC_STATUS 0x3210
373#define DPFC_INVAL_SEG_SHIFT (16)
374#define DPFC_INVAL_SEG_MASK (0x07ff0000)
375#define DPFC_COMP_SEG_SHIFT (0)
376#define DPFC_COMP_SEG_MASK (0x000003ff)
377#define DPFC_STATUS2 0x3214
378#define DPFC_FENCE_YOFF 0x3218
379#define DPFC_CHICKEN 0x3224
380#define DPFC_HT_MODIFY (1<<31)
381
350/* 382/*
351 * GPIO regs 383 * GPIO regs
352 */ 384 */
@@ -2000,6 +2032,8 @@
2000#define PF_ENABLE (1<<31) 2032#define PF_ENABLE (1<<31)
2001#define PFA_WIN_SZ 0x68074 2033#define PFA_WIN_SZ 0x68074
2002#define PFB_WIN_SZ 0x68874 2034#define PFB_WIN_SZ 0x68874
2035#define PFA_WIN_POS 0x68070
2036#define PFB_WIN_POS 0x68870
2003 2037
2004/* legacy palette */ 2038/* legacy palette */
2005#define LGC_PALETTE_A 0x4a000 2039#define LGC_PALETTE_A 0x4a000
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 20d4d19f5568..bd6d8d91ca9f 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -228,6 +228,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
228 228
229 if (drm_core_check_feature(dev, DRIVER_MODESET)) 229 if (drm_core_check_feature(dev, DRIVER_MODESET))
230 return; 230 return;
231
231 /* Pipe & plane A info */ 232 /* Pipe & plane A info */
232 dev_priv->savePIPEACONF = I915_READ(PIPEACONF); 233 dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
233 dev_priv->savePIPEASRC = I915_READ(PIPEASRC); 234 dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
@@ -285,6 +286,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
285 dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); 286 dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
286 return; 287 return;
287} 288}
289
288static void i915_restore_modeset_reg(struct drm_device *dev) 290static void i915_restore_modeset_reg(struct drm_device *dev)
289{ 291{
290 struct drm_i915_private *dev_priv = dev->dev_private; 292 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -379,19 +381,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
379 381
380 return; 382 return;
381} 383}
382int i915_save_state(struct drm_device *dev) 384
385void i915_save_display(struct drm_device *dev)
383{ 386{
384 struct drm_i915_private *dev_priv = dev->dev_private; 387 struct drm_i915_private *dev_priv = dev->dev_private;
385 int i;
386
387 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
388
389 /* Render Standby */
390 if (IS_I965G(dev) && IS_MOBILE(dev))
391 dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
392
393 /* Hardware status page */
394 dev_priv->saveHWS = I915_READ(HWS_PGA);
395 388
396 /* Display arbitration control */ 389 /* Display arbitration control */
397 dev_priv->saveDSPARB = I915_READ(DSPARB); 390 dev_priv->saveDSPARB = I915_READ(DSPARB);
@@ -399,6 +392,7 @@ int i915_save_state(struct drm_device *dev)
399 /* This is only meaningful in non-KMS mode */ 392 /* This is only meaningful in non-KMS mode */
400 /* Don't save them in KMS mode */ 393 /* Don't save them in KMS mode */
401 i915_save_modeset_reg(dev); 394 i915_save_modeset_reg(dev);
395
402 /* Cursor state */ 396 /* Cursor state */
403 dev_priv->saveCURACNTR = I915_READ(CURACNTR); 397 dev_priv->saveCURACNTR = I915_READ(CURACNTR);
404 dev_priv->saveCURAPOS = I915_READ(CURAPOS); 398 dev_priv->saveCURAPOS = I915_READ(CURAPOS);
@@ -448,81 +442,22 @@ int i915_save_state(struct drm_device *dev)
448 dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); 442 dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
449 dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); 443 dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
450 444
451 /* Interrupt state */
452 dev_priv->saveIIR = I915_READ(IIR);
453 dev_priv->saveIER = I915_READ(IER);
454 dev_priv->saveIMR = I915_READ(IMR);
455
456 /* VGA state */ 445 /* VGA state */
457 dev_priv->saveVGA0 = I915_READ(VGA0); 446 dev_priv->saveVGA0 = I915_READ(VGA0);
458 dev_priv->saveVGA1 = I915_READ(VGA1); 447 dev_priv->saveVGA1 = I915_READ(VGA1);
459 dev_priv->saveVGA_PD = I915_READ(VGA_PD); 448 dev_priv->saveVGA_PD = I915_READ(VGA_PD);
460 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); 449 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
461 450
462 /* Clock gating state */
463 dev_priv->saveD_STATE = I915_READ(D_STATE);
464 dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
465
466 /* Cache mode state */
467 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
468
469 /* Memory Arbitration state */
470 dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
471
472 /* Scratch space */
473 for (i = 0; i < 16; i++) {
474 dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
475 dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
476 }
477 for (i = 0; i < 3; i++)
478 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
479
480 /* Fences */
481 if (IS_I965G(dev)) {
482 for (i = 0; i < 16; i++)
483 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
484 } else {
485 for (i = 0; i < 8; i++)
486 dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
487
488 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
489 for (i = 0; i < 8; i++)
490 dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
491 }
492 i915_save_vga(dev); 451 i915_save_vga(dev);
493
494 return 0;
495} 452}
496 453
497int i915_restore_state(struct drm_device *dev) 454void i915_restore_display(struct drm_device *dev)
498{ 455{
499 struct drm_i915_private *dev_priv = dev->dev_private; 456 struct drm_i915_private *dev_priv = dev->dev_private;
500 int i;
501
502 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
503
504 /* Render Standby */
505 if (IS_I965G(dev) && IS_MOBILE(dev))
506 I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
507
508 /* Hardware status page */
509 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
510 457
511 /* Display arbitration */ 458 /* Display arbitration */
512 I915_WRITE(DSPARB, dev_priv->saveDSPARB); 459 I915_WRITE(DSPARB, dev_priv->saveDSPARB);
513 460
514 /* Fences */
515 if (IS_I965G(dev)) {
516 for (i = 0; i < 16; i++)
517 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
518 } else {
519 for (i = 0; i < 8; i++)
520 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
521 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
522 for (i = 0; i < 8; i++)
523 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
524 }
525
526 /* Display port ratios (must be done before clock is set) */ 461 /* Display port ratios (must be done before clock is set) */
527 if (SUPPORTS_INTEGRATED_DP(dev)) { 462 if (SUPPORTS_INTEGRATED_DP(dev)) {
528 I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); 463 I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
@@ -534,9 +469,11 @@ int i915_restore_state(struct drm_device *dev)
534 I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); 469 I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
535 I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); 470 I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
536 } 471 }
472
537 /* This is only meaningful in non-KMS mode */ 473 /* This is only meaningful in non-KMS mode */
538 /* Don't restore them in KMS mode */ 474 /* Don't restore them in KMS mode */
539 i915_restore_modeset_reg(dev); 475 i915_restore_modeset_reg(dev);
476
540 /* Cursor state */ 477 /* Cursor state */
541 I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); 478 I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
542 I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); 479 I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
@@ -586,6 +523,95 @@ int i915_restore_state(struct drm_device *dev)
586 I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); 523 I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
587 DRM_UDELAY(150); 524 DRM_UDELAY(150);
588 525
526 i915_restore_vga(dev);
527}
528
529int i915_save_state(struct drm_device *dev)
530{
531 struct drm_i915_private *dev_priv = dev->dev_private;
532 int i;
533
534 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
535
536 /* Render Standby */
537 if (IS_I965G(dev) && IS_MOBILE(dev))
538 dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
539
540 /* Hardware status page */
541 dev_priv->saveHWS = I915_READ(HWS_PGA);
542
543 i915_save_display(dev);
544
545 /* Interrupt state */
546 dev_priv->saveIER = I915_READ(IER);
547 dev_priv->saveIMR = I915_READ(IMR);
548
549 /* Clock gating state */
550 dev_priv->saveD_STATE = I915_READ(D_STATE);
551 dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); /* Not sure about this */
552
553 /* Cache mode state */
554 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
555
556 /* Memory Arbitration state */
557 dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
558
559 /* Scratch space */
560 for (i = 0; i < 16; i++) {
561 dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
562 dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
563 }
564 for (i = 0; i < 3; i++)
565 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
566
567 /* Fences */
568 if (IS_I965G(dev)) {
569 for (i = 0; i < 16; i++)
570 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
571 } else {
572 for (i = 0; i < 8; i++)
573 dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
574
575 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
576 for (i = 0; i < 8; i++)
577 dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
578 }
579
580 return 0;
581}
582
583int i915_restore_state(struct drm_device *dev)
584{
585 struct drm_i915_private *dev_priv = dev->dev_private;
586 int i;
587
588 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
589
590 /* Render Standby */
591 if (IS_I965G(dev) && IS_MOBILE(dev))
592 I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
593
594 /* Hardware status page */
595 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
596
597 /* Fences */
598 if (IS_I965G(dev)) {
599 for (i = 0; i < 16; i++)
600 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
601 } else {
602 for (i = 0; i < 8; i++)
603 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
604 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
605 for (i = 0; i < 8; i++)
606 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
607 }
608
609 i915_restore_display(dev);
610
611 /* Interrupt state */
612 I915_WRITE (IER, dev_priv->saveIER);
613 I915_WRITE (IMR, dev_priv->saveIMR);
614
589 /* Clock gating state */ 615 /* Clock gating state */
590 I915_WRITE (D_STATE, dev_priv->saveD_STATE); 616 I915_WRITE (D_STATE, dev_priv->saveD_STATE);
591 I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D); 617 I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
@@ -603,8 +629,6 @@ int i915_restore_state(struct drm_device *dev)
603 for (i = 0; i < 3; i++) 629 for (i = 0; i < 3; i++)
604 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); 630 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
605 631
606 i915_restore_vga(dev);
607
608 return 0; 632 return 0;
609} 633}
610 634
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
new file mode 100644
index 000000000000..5567a40816f3
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -0,0 +1,315 @@
1#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
2#define _I915_TRACE_H_
3
4#include <linux/stringify.h>
5#include <linux/types.h>
6#include <linux/tracepoint.h>
7
8#include <drm/drmP.h>
9
10#undef TRACE_SYSTEM
11#define TRACE_SYSTEM i915
12#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
13#define TRACE_INCLUDE_FILE i915_trace
14
15/* object tracking */
16
17TRACE_EVENT(i915_gem_object_create,
18
19 TP_PROTO(struct drm_gem_object *obj),
20
21 TP_ARGS(obj),
22
23 TP_STRUCT__entry(
24 __field(struct drm_gem_object *, obj)
25 __field(u32, size)
26 ),
27
28 TP_fast_assign(
29 __entry->obj = obj;
30 __entry->size = obj->size;
31 ),
32
33 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
34);
35
36TRACE_EVENT(i915_gem_object_bind,
37
38 TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset),
39
40 TP_ARGS(obj, gtt_offset),
41
42 TP_STRUCT__entry(
43 __field(struct drm_gem_object *, obj)
44 __field(u32, gtt_offset)
45 ),
46
47 TP_fast_assign(
48 __entry->obj = obj;
49 __entry->gtt_offset = gtt_offset;
50 ),
51
52 TP_printk("obj=%p, gtt_offset=%08x",
53 __entry->obj, __entry->gtt_offset)
54);
55
56TRACE_EVENT(i915_gem_object_clflush,
57
58 TP_PROTO(struct drm_gem_object *obj),
59
60 TP_ARGS(obj),
61
62 TP_STRUCT__entry(
63 __field(struct drm_gem_object *, obj)
64 ),
65
66 TP_fast_assign(
67 __entry->obj = obj;
68 ),
69
70 TP_printk("obj=%p", __entry->obj)
71);
72
73TRACE_EVENT(i915_gem_object_change_domain,
74
75 TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
76
77 TP_ARGS(obj, old_read_domains, old_write_domain),
78
79 TP_STRUCT__entry(
80 __field(struct drm_gem_object *, obj)
81 __field(u32, read_domains)
82 __field(u32, write_domain)
83 ),
84
85 TP_fast_assign(
86 __entry->obj = obj;
87 __entry->read_domains = obj->read_domains | (old_read_domains << 16);
88 __entry->write_domain = obj->write_domain | (old_write_domain << 16);
89 ),
90
91 TP_printk("obj=%p, read=%04x, write=%04x",
92 __entry->obj,
93 __entry->read_domains, __entry->write_domain)
94);
95
96TRACE_EVENT(i915_gem_object_get_fence,
97
98 TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode),
99
100 TP_ARGS(obj, fence, tiling_mode),
101
102 TP_STRUCT__entry(
103 __field(struct drm_gem_object *, obj)
104 __field(int, fence)
105 __field(int, tiling_mode)
106 ),
107
108 TP_fast_assign(
109 __entry->obj = obj;
110 __entry->fence = fence;
111 __entry->tiling_mode = tiling_mode;
112 ),
113
114 TP_printk("obj=%p, fence=%d, tiling=%d",
115 __entry->obj, __entry->fence, __entry->tiling_mode)
116);
117
118TRACE_EVENT(i915_gem_object_unbind,
119
120 TP_PROTO(struct drm_gem_object *obj),
121
122 TP_ARGS(obj),
123
124 TP_STRUCT__entry(
125 __field(struct drm_gem_object *, obj)
126 ),
127
128 TP_fast_assign(
129 __entry->obj = obj;
130 ),
131
132 TP_printk("obj=%p", __entry->obj)
133);
134
135TRACE_EVENT(i915_gem_object_destroy,
136
137 TP_PROTO(struct drm_gem_object *obj),
138
139 TP_ARGS(obj),
140
141 TP_STRUCT__entry(
142 __field(struct drm_gem_object *, obj)
143 ),
144
145 TP_fast_assign(
146 __entry->obj = obj;
147 ),
148
149 TP_printk("obj=%p", __entry->obj)
150);
151
152/* batch tracing */
153
154TRACE_EVENT(i915_gem_request_submit,
155
156 TP_PROTO(struct drm_device *dev, u32 seqno),
157
158 TP_ARGS(dev, seqno),
159
160 TP_STRUCT__entry(
161 __field(struct drm_device *, dev)
162 __field(u32, seqno)
163 ),
164
165 TP_fast_assign(
166 __entry->dev = dev;
167 __entry->seqno = seqno;
168 ),
169
170 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
171);
172
173TRACE_EVENT(i915_gem_request_flush,
174
175 TP_PROTO(struct drm_device *dev, u32 seqno,
176 u32 flush_domains, u32 invalidate_domains),
177
178 TP_ARGS(dev, seqno, flush_domains, invalidate_domains),
179
180 TP_STRUCT__entry(
181 __field(struct drm_device *, dev)
182 __field(u32, seqno)
183 __field(u32, flush_domains)
184 __field(u32, invalidate_domains)
185 ),
186
187 TP_fast_assign(
188 __entry->dev = dev;
189 __entry->seqno = seqno;
190 __entry->flush_domains = flush_domains;
191 __entry->invalidate_domains = invalidate_domains;
192 ),
193
194 TP_printk("dev=%p, seqno=%u, flush=%04x, invalidate=%04x",
195 __entry->dev, __entry->seqno,
196 __entry->flush_domains, __entry->invalidate_domains)
197);
198
199
200TRACE_EVENT(i915_gem_request_complete,
201
202 TP_PROTO(struct drm_device *dev, u32 seqno),
203
204 TP_ARGS(dev, seqno),
205
206 TP_STRUCT__entry(
207 __field(struct drm_device *, dev)
208 __field(u32, seqno)
209 ),
210
211 TP_fast_assign(
212 __entry->dev = dev;
213 __entry->seqno = seqno;
214 ),
215
216 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
217);
218
219TRACE_EVENT(i915_gem_request_retire,
220
221 TP_PROTO(struct drm_device *dev, u32 seqno),
222
223 TP_ARGS(dev, seqno),
224
225 TP_STRUCT__entry(
226 __field(struct drm_device *, dev)
227 __field(u32, seqno)
228 ),
229
230 TP_fast_assign(
231 __entry->dev = dev;
232 __entry->seqno = seqno;
233 ),
234
235 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
236);
237
238TRACE_EVENT(i915_gem_request_wait_begin,
239
240 TP_PROTO(struct drm_device *dev, u32 seqno),
241
242 TP_ARGS(dev, seqno),
243
244 TP_STRUCT__entry(
245 __field(struct drm_device *, dev)
246 __field(u32, seqno)
247 ),
248
249 TP_fast_assign(
250 __entry->dev = dev;
251 __entry->seqno = seqno;
252 ),
253
254 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
255);
256
257TRACE_EVENT(i915_gem_request_wait_end,
258
259 TP_PROTO(struct drm_device *dev, u32 seqno),
260
261 TP_ARGS(dev, seqno),
262
263 TP_STRUCT__entry(
264 __field(struct drm_device *, dev)
265 __field(u32, seqno)
266 ),
267
268 TP_fast_assign(
269 __entry->dev = dev;
270 __entry->seqno = seqno;
271 ),
272
273 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
274);
275
276TRACE_EVENT(i915_ring_wait_begin,
277
278 TP_PROTO(struct drm_device *dev),
279
280 TP_ARGS(dev),
281
282 TP_STRUCT__entry(
283 __field(struct drm_device *, dev)
284 ),
285
286 TP_fast_assign(
287 __entry->dev = dev;
288 ),
289
290 TP_printk("dev=%p", __entry->dev)
291);
292
293TRACE_EVENT(i915_ring_wait_end,
294
295 TP_PROTO(struct drm_device *dev),
296
297 TP_ARGS(dev),
298
299 TP_STRUCT__entry(
300 __field(struct drm_device *, dev)
301 ),
302
303 TP_fast_assign(
304 __entry->dev = dev;
305 ),
306
307 TP_printk("dev=%p", __entry->dev)
308);
309
310#endif /* _I915_TRACE_H_ */
311
312/* This part must be outside protection */
313#undef TRACE_INCLUDE_PATH
314#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
315#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c
new file mode 100644
index 000000000000..ead876eb6ea0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_trace_points.c
@@ -0,0 +1,11 @@
1/*
2 * Copyright © 2009 Intel Corporation
3 *
4 * Authors:
5 * Chris Wilson <chris@chris-wilson.co.uk>
6 */
7
8#include "i915_drv.h"
9
10#define CREATE_TRACE_POINTS
11#include "i915_trace.h"
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 1e28c1652fd0..4337414846b6 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -217,6 +217,9 @@ parse_general_features(struct drm_i915_private *dev_priv,
217 if (IS_I85X(dev_priv->dev)) 217 if (IS_I85X(dev_priv->dev))
218 dev_priv->lvds_ssc_freq = 218 dev_priv->lvds_ssc_freq =
219 general->ssc_freq ? 66 : 48; 219 general->ssc_freq ? 66 : 48;
220 else if (IS_IGDNG(dev_priv->dev))
221 dev_priv->lvds_ssc_freq =
222 general->ssc_freq ? 100 : 120;
220 else 223 else
221 dev_priv->lvds_ssc_freq = 224 dev_priv->lvds_ssc_freq =
222 general->ssc_freq ? 100 : 96; 225 general->ssc_freq ? 100 : 96;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 88814fa2dfd2..212e22740fc1 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -179,13 +179,10 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
179{ 179{
180 struct drm_device *dev = connector->dev; 180 struct drm_device *dev = connector->dev;
181 struct drm_i915_private *dev_priv = dev->dev_private; 181 struct drm_i915_private *dev_priv = dev->dev_private;
182 u32 adpa, temp; 182 u32 adpa;
183 bool ret; 183 bool ret;
184 184
185 temp = adpa = I915_READ(PCH_ADPA); 185 adpa = I915_READ(PCH_ADPA);
186
187 adpa &= ~ADPA_DAC_ENABLE;
188 I915_WRITE(PCH_ADPA, adpa);
189 186
190 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 187 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
191 188
@@ -212,8 +209,6 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
212 else 209 else
213 ret = false; 210 ret = false;
214 211
215 /* restore origin register */
216 I915_WRITE(PCH_ADPA, temp);
217 return ret; 212 return ret;
218} 213}
219 214
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 0227b1652906..93ff6c03733e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,6 +24,8 @@
24 * Eric Anholt <eric@anholt.net> 24 * Eric Anholt <eric@anholt.net>
25 */ 25 */
26 26
27#include <linux/module.h>
28#include <linux/input.h>
27#include <linux/i2c.h> 29#include <linux/i2c.h>
28#include <linux/kernel.h> 30#include <linux/kernel.h>
29#include "drmP.h" 31#include "drmP.h"
@@ -875,7 +877,7 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
875 refclk, best_clock); 877 refclk, best_clock);
876 878
877 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 879 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
878 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 880 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
879 LVDS_CLKB_POWER_UP) 881 LVDS_CLKB_POWER_UP)
880 clock.p2 = limit->p2.p2_fast; 882 clock.p2 = limit->p2.p2_fast;
881 else 883 else
@@ -952,6 +954,241 @@ intel_wait_for_vblank(struct drm_device *dev)
952 mdelay(20); 954 mdelay(20);
953} 955}
954 956
957/* Parameters have changed, update FBC info */
958static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
959{
960 struct drm_device *dev = crtc->dev;
961 struct drm_i915_private *dev_priv = dev->dev_private;
962 struct drm_framebuffer *fb = crtc->fb;
963 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
964 struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private;
965 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
966 int plane, i;
967 u32 fbc_ctl, fbc_ctl2;
968
969 dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
970
971 if (fb->pitch < dev_priv->cfb_pitch)
972 dev_priv->cfb_pitch = fb->pitch;
973
974 /* FBC_CTL wants 64B units */
975 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
976 dev_priv->cfb_fence = obj_priv->fence_reg;
977 dev_priv->cfb_plane = intel_crtc->plane;
978 plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
979
980 /* Clear old tags */
981 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
982 I915_WRITE(FBC_TAG + (i * 4), 0);
983
984 /* Set it up... */
985 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
986 if (obj_priv->tiling_mode != I915_TILING_NONE)
987 fbc_ctl2 |= FBC_CTL_CPU_FENCE;
988 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
989 I915_WRITE(FBC_FENCE_OFF, crtc->y);
990
991 /* enable it... */
992 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
993 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
994 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
995 if (obj_priv->tiling_mode != I915_TILING_NONE)
996 fbc_ctl |= dev_priv->cfb_fence;
997 I915_WRITE(FBC_CONTROL, fbc_ctl);
998
999 DRM_DEBUG("enabled FBC, pitch %ld, yoff %d, plane %d, ",
1000 dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
1001}
1002
1003void i8xx_disable_fbc(struct drm_device *dev)
1004{
1005 struct drm_i915_private *dev_priv = dev->dev_private;
1006 u32 fbc_ctl;
1007
1008 if (!I915_HAS_FBC(dev))
1009 return;
1010
1011 /* Disable compression */
1012 fbc_ctl = I915_READ(FBC_CONTROL);
1013 fbc_ctl &= ~FBC_CTL_EN;
1014 I915_WRITE(FBC_CONTROL, fbc_ctl);
1015
1016 /* Wait for compressing bit to clear */
1017 while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING)
1018 ; /* nothing */
1019
1020 intel_wait_for_vblank(dev);
1021
1022 DRM_DEBUG("disabled FBC\n");
1023}
1024
1025static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
1026{
1027 struct drm_device *dev = crtc->dev;
1028 struct drm_i915_private *dev_priv = dev->dev_private;
1029
1030 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1031}
1032
1033static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1034{
1035 struct drm_device *dev = crtc->dev;
1036 struct drm_i915_private *dev_priv = dev->dev_private;
1037 struct drm_framebuffer *fb = crtc->fb;
1038 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1039 struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private;
1040 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1041 int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA :
1042 DPFC_CTL_PLANEB);
1043 unsigned long stall_watermark = 200;
1044 u32 dpfc_ctl;
1045
1046 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1047 dev_priv->cfb_fence = obj_priv->fence_reg;
1048 dev_priv->cfb_plane = intel_crtc->plane;
1049
1050 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1051 if (obj_priv->tiling_mode != I915_TILING_NONE) {
1052 dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
1053 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1054 } else {
1055 I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
1056 }
1057
1058 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1059 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1060 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1061 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1062 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1063
1064 /* enable it... */
1065 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1066
1067 DRM_DEBUG("enabled fbc on plane %d\n", intel_crtc->plane);
1068}
1069
1070void g4x_disable_fbc(struct drm_device *dev)
1071{
1072 struct drm_i915_private *dev_priv = dev->dev_private;
1073 u32 dpfc_ctl;
1074
1075 /* Disable compression */
1076 dpfc_ctl = I915_READ(DPFC_CONTROL);
1077 dpfc_ctl &= ~DPFC_CTL_EN;
1078 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1079 intel_wait_for_vblank(dev);
1080
1081 DRM_DEBUG("disabled FBC\n");
1082}
1083
1084static bool g4x_fbc_enabled(struct drm_crtc *crtc)
1085{
1086 struct drm_device *dev = crtc->dev;
1087 struct drm_i915_private *dev_priv = dev->dev_private;
1088
1089 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1090}
1091
1092/**
1093 * intel_update_fbc - enable/disable FBC as needed
1094 * @crtc: CRTC to point the compressor at
1095 * @mode: mode in use
1096 *
1097 * Set up the framebuffer compression hardware at mode set time. We
1098 * enable it if possible:
1099 * - plane A only (on pre-965)
1100 * - no pixel mulitply/line duplication
1101 * - no alpha buffer discard
1102 * - no dual wide
1103 * - framebuffer <= 2048 in width, 1536 in height
1104 *
1105 * We can't assume that any compression will take place (worst case),
1106 * so the compressed buffer has to be the same size as the uncompressed
1107 * one. It also must reside (along with the line length buffer) in
1108 * stolen memory.
1109 *
1110 * We need to enable/disable FBC on a global basis.
1111 */
1112static void intel_update_fbc(struct drm_crtc *crtc,
1113 struct drm_display_mode *mode)
1114{
1115 struct drm_device *dev = crtc->dev;
1116 struct drm_i915_private *dev_priv = dev->dev_private;
1117 struct drm_framebuffer *fb = crtc->fb;
1118 struct intel_framebuffer *intel_fb;
1119 struct drm_i915_gem_object *obj_priv;
1120 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1121 int plane = intel_crtc->plane;
1122
1123 if (!i915_powersave)
1124 return;
1125
1126 if (!dev_priv->display.fbc_enabled ||
1127 !dev_priv->display.enable_fbc ||
1128 !dev_priv->display.disable_fbc)
1129 return;
1130
1131 if (!crtc->fb)
1132 return;
1133
1134 intel_fb = to_intel_framebuffer(fb);
1135 obj_priv = intel_fb->obj->driver_private;
1136
1137 /*
1138 * If FBC is already on, we just have to verify that we can
1139 * keep it that way...
1140 * Need to disable if:
1141 * - changing FBC params (stride, fence, mode)
1142 * - new fb is too large to fit in compressed buffer
1143 * - going to an unsupported config (interlace, pixel multiply, etc.)
1144 */
1145 if (intel_fb->obj->size > dev_priv->cfb_size) {
1146 DRM_DEBUG("framebuffer too large, disabling compression\n");
1147 goto out_disable;
1148 }
1149 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1150 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
1151 DRM_DEBUG("mode incompatible with compression, disabling\n");
1152 goto out_disable;
1153 }
1154 if ((mode->hdisplay > 2048) ||
1155 (mode->vdisplay > 1536)) {
1156 DRM_DEBUG("mode too large for compression, disabling\n");
1157 goto out_disable;
1158 }
1159 if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
1160 DRM_DEBUG("plane not 0, disabling compression\n");
1161 goto out_disable;
1162 }
1163 if (obj_priv->tiling_mode != I915_TILING_X) {
1164 DRM_DEBUG("framebuffer not tiled, disabling compression\n");
1165 goto out_disable;
1166 }
1167
1168 if (dev_priv->display.fbc_enabled(crtc)) {
1169 /* We can re-enable it in this case, but need to update pitch */
1170 if (fb->pitch > dev_priv->cfb_pitch)
1171 dev_priv->display.disable_fbc(dev);
1172 if (obj_priv->fence_reg != dev_priv->cfb_fence)
1173 dev_priv->display.disable_fbc(dev);
1174 if (plane != dev_priv->cfb_plane)
1175 dev_priv->display.disable_fbc(dev);
1176 }
1177
1178 if (!dev_priv->display.fbc_enabled(crtc)) {
1179 /* Now try to turn it back on if possible */
1180 dev_priv->display.enable_fbc(crtc, 500);
1181 }
1182
1183 return;
1184
1185out_disable:
1186 DRM_DEBUG("unsupported config, disabling FBC\n");
1187 /* Multiple disables should be harmless */
1188 if (dev_priv->display.fbc_enabled(crtc))
1189 dev_priv->display.disable_fbc(dev);
1190}
1191
955static int 1192static int
956intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 1193intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
957 struct drm_framebuffer *old_fb) 1194 struct drm_framebuffer *old_fb)
@@ -964,12 +1201,13 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
964 struct drm_i915_gem_object *obj_priv; 1201 struct drm_i915_gem_object *obj_priv;
965 struct drm_gem_object *obj; 1202 struct drm_gem_object *obj;
966 int pipe = intel_crtc->pipe; 1203 int pipe = intel_crtc->pipe;
1204 int plane = intel_crtc->plane;
967 unsigned long Start, Offset; 1205 unsigned long Start, Offset;
968 int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR); 1206 int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
969 int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF); 1207 int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
970 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; 1208 int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
971 int dsptileoff = (pipe == 0 ? DSPATILEOFF : DSPBTILEOFF); 1209 int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
972 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; 1210 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
973 u32 dspcntr, alignment; 1211 u32 dspcntr, alignment;
974 int ret; 1212 int ret;
975 1213
@@ -979,12 +1217,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
979 return 0; 1217 return 0;
980 } 1218 }
981 1219
982 switch (pipe) { 1220 switch (plane) {
983 case 0: 1221 case 0:
984 case 1: 1222 case 1:
985 break; 1223 break;
986 default: 1224 default:
987 DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); 1225 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
988 return -EINVAL; 1226 return -EINVAL;
989 } 1227 }
990 1228
@@ -1086,6 +1324,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1086 I915_READ(dspbase); 1324 I915_READ(dspbase);
1087 } 1325 }
1088 1326
1327 if ((IS_I965G(dev) || plane == 0))
1328 intel_update_fbc(crtc, &crtc->mode);
1329
1089 intel_wait_for_vblank(dev); 1330 intel_wait_for_vblank(dev);
1090 1331
1091 if (old_fb) { 1332 if (old_fb) {
@@ -1217,6 +1458,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1217 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; 1458 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
1218 int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; 1459 int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
1219 int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; 1460 int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
1461 int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS;
1220 int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; 1462 int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
1221 int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; 1463 int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
1222 int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; 1464 int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
@@ -1268,6 +1510,19 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1268 } 1510 }
1269 } 1511 }
1270 1512
1513 /* Enable panel fitting for LVDS */
1514 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
1515 temp = I915_READ(pf_ctl_reg);
1516 I915_WRITE(pf_ctl_reg, temp | PF_ENABLE);
1517
1518 /* currently full aspect */
1519 I915_WRITE(pf_win_pos, 0);
1520
1521 I915_WRITE(pf_win_size,
1522 (dev_priv->panel_fixed_mode->hdisplay << 16) |
1523 (dev_priv->panel_fixed_mode->vdisplay));
1524 }
1525
1271 /* Enable CPU pipe */ 1526 /* Enable CPU pipe */
1272 temp = I915_READ(pipeconf_reg); 1527 temp = I915_READ(pipeconf_reg);
1273 if ((temp & PIPEACONF_ENABLE) == 0) { 1528 if ((temp & PIPEACONF_ENABLE) == 0) {
@@ -1532,9 +1787,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
1532 struct drm_i915_private *dev_priv = dev->dev_private; 1787 struct drm_i915_private *dev_priv = dev->dev_private;
1533 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1788 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1534 int pipe = intel_crtc->pipe; 1789 int pipe = intel_crtc->pipe;
1790 int plane = intel_crtc->plane;
1535 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 1791 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
1536 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; 1792 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
1537 int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR; 1793 int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
1538 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; 1794 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
1539 u32 temp; 1795 u32 temp;
1540 1796
@@ -1577,6 +1833,9 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
1577 1833
1578 intel_crtc_load_lut(crtc); 1834 intel_crtc_load_lut(crtc);
1579 1835
1836 if ((IS_I965G(dev) || plane == 0))
1837 intel_update_fbc(crtc, &crtc->mode);
1838
1580 /* Give the overlay scaler a chance to enable if it's on this pipe */ 1839 /* Give the overlay scaler a chance to enable if it's on this pipe */
1581 //intel_crtc_dpms_video(crtc, true); TODO 1840 //intel_crtc_dpms_video(crtc, true); TODO
1582 intel_update_watermarks(dev); 1841 intel_update_watermarks(dev);
@@ -1586,6 +1845,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
1586 /* Give the overlay scaler a chance to disable if it's on this pipe */ 1845 /* Give the overlay scaler a chance to disable if it's on this pipe */
1587 //intel_crtc_dpms_video(crtc, FALSE); TODO 1846 //intel_crtc_dpms_video(crtc, FALSE); TODO
1588 1847
1848 if (dev_priv->cfb_plane == plane &&
1849 dev_priv->display.disable_fbc)
1850 dev_priv->display.disable_fbc(dev);
1851
1589 /* Disable the VGA plane that we never use */ 1852 /* Disable the VGA plane that we never use */
1590 i915_disable_vga(dev); 1853 i915_disable_vga(dev);
1591 1854
@@ -1634,15 +1897,13 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
1634static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) 1897static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
1635{ 1898{
1636 struct drm_device *dev = crtc->dev; 1899 struct drm_device *dev = crtc->dev;
1900 struct drm_i915_private *dev_priv = dev->dev_private;
1637 struct drm_i915_master_private *master_priv; 1901 struct drm_i915_master_private *master_priv;
1638 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1902 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1639 int pipe = intel_crtc->pipe; 1903 int pipe = intel_crtc->pipe;
1640 bool enabled; 1904 bool enabled;
1641 1905
1642 if (IS_IGDNG(dev)) 1906 dev_priv->display.dpms(crtc, mode);
1643 igdng_crtc_dpms(crtc, mode);
1644 else
1645 i9xx_crtc_dpms(crtc, mode);
1646 1907
1647 intel_crtc->dpms_mode = mode; 1908 intel_crtc->dpms_mode = mode;
1648 1909
@@ -1709,56 +1970,68 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
1709 return true; 1970 return true;
1710} 1971}
1711 1972
1973static int i945_get_display_clock_speed(struct drm_device *dev)
1974{
1975 return 400000;
1976}
1712 1977
1713/** Returns the core display clock speed for i830 - i945 */ 1978static int i915_get_display_clock_speed(struct drm_device *dev)
1714static int intel_get_core_clock_speed(struct drm_device *dev)
1715{ 1979{
1980 return 333000;
1981}
1716 1982
1717 /* Core clock values taken from the published datasheets. 1983static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
1718 * The 830 may go up to 166 Mhz, which we should check. 1984{
1719 */ 1985 return 200000;
1720 if (IS_I945G(dev)) 1986}
1721 return 400000;
1722 else if (IS_I915G(dev))
1723 return 333000;
1724 else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
1725 return 200000;
1726 else if (IS_I915GM(dev)) {
1727 u16 gcfgc = 0;
1728 1987
1729 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 1988static int i915gm_get_display_clock_speed(struct drm_device *dev)
1989{
1990 u16 gcfgc = 0;
1730 1991
1731 if (gcfgc & GC_LOW_FREQUENCY_ENABLE) 1992 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
1732 return 133000; 1993
1733 else { 1994 if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
1734 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 1995 return 133000;
1735 case GC_DISPLAY_CLOCK_333_MHZ: 1996 else {
1736 return 333000; 1997 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
1737 default: 1998 case GC_DISPLAY_CLOCK_333_MHZ:
1738 case GC_DISPLAY_CLOCK_190_200_MHZ: 1999 return 333000;
1739 return 190000; 2000 default:
1740 } 2001 case GC_DISPLAY_CLOCK_190_200_MHZ:
1741 } 2002 return 190000;
1742 } else if (IS_I865G(dev))
1743 return 266000;
1744 else if (IS_I855(dev)) {
1745 u16 hpllcc = 0;
1746 /* Assume that the hardware is in the high speed state. This
1747 * should be the default.
1748 */
1749 switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
1750 case GC_CLOCK_133_200:
1751 case GC_CLOCK_100_200:
1752 return 200000;
1753 case GC_CLOCK_166_250:
1754 return 250000;
1755 case GC_CLOCK_100_133:
1756 return 133000;
1757 } 2003 }
1758 } else /* 852, 830 */ 2004 }
2005}
2006
2007static int i865_get_display_clock_speed(struct drm_device *dev)
2008{
2009 return 266000;
2010}
2011
2012static int i855_get_display_clock_speed(struct drm_device *dev)
2013{
2014 u16 hpllcc = 0;
2015 /* Assume that the hardware is in the high speed state. This
2016 * should be the default.
2017 */
2018 switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
2019 case GC_CLOCK_133_200:
2020 case GC_CLOCK_100_200:
2021 return 200000;
2022 case GC_CLOCK_166_250:
2023 return 250000;
2024 case GC_CLOCK_100_133:
1759 return 133000; 2025 return 133000;
2026 }
2027
2028 /* Shouldn't happen */
2029 return 0;
2030}
1760 2031
1761 return 0; /* Silence gcc warning */ 2032static int i830_get_display_clock_speed(struct drm_device *dev)
2033{
2034 return 133000;
1762} 2035}
1763 2036
1764/** 2037/**
@@ -1921,7 +2194,14 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
1921{ 2194{
1922 long entries_required, wm_size; 2195 long entries_required, wm_size;
1923 2196
1924 entries_required = (clock_in_khz * pixel_size * latency_ns) / 1000000; 2197 /*
2198 * Note: we need to make sure we don't overflow for various clock &
2199 * latency values.
2200 * clocks go from a few thousand to several hundred thousand.
2201 * latency is usually a few thousand
2202 */
2203 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
2204 1000;
1925 entries_required /= wm->cacheline_size; 2205 entries_required /= wm->cacheline_size;
1926 2206
1927 DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required); 2207 DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required);
@@ -1986,14 +2266,13 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
1986 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { 2266 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
1987 latency = &cxsr_latency_table[i]; 2267 latency = &cxsr_latency_table[i];
1988 if (is_desktop == latency->is_desktop && 2268 if (is_desktop == latency->is_desktop &&
1989 fsb == latency->fsb_freq && mem == latency->mem_freq) 2269 fsb == latency->fsb_freq && mem == latency->mem_freq)
1990 break; 2270 return latency;
1991 } 2271 }
1992 if (i >= ARRAY_SIZE(cxsr_latency_table)) { 2272
1993 DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); 2273 DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
1994 return NULL; 2274
1995 } 2275 return NULL;
1996 return latency;
1997} 2276}
1998 2277
1999static void igd_disable_cxsr(struct drm_device *dev) 2278static void igd_disable_cxsr(struct drm_device *dev)
@@ -2084,32 +2363,36 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
2084 */ 2363 */
2085const static int latency_ns = 5000; 2364const static int latency_ns = 5000;
2086 2365
2087static int intel_get_fifo_size(struct drm_device *dev, int plane) 2366static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
2088{ 2367{
2089 struct drm_i915_private *dev_priv = dev->dev_private; 2368 struct drm_i915_private *dev_priv = dev->dev_private;
2090 uint32_t dsparb = I915_READ(DSPARB); 2369 uint32_t dsparb = I915_READ(DSPARB);
2091 int size; 2370 int size;
2092 2371
2093 if (IS_I9XX(dev)) { 2372 if (plane == 0)
2094 if (plane == 0)
2095 size = dsparb & 0x7f;
2096 else
2097 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
2098 (dsparb & 0x7f);
2099 } else if (IS_I85X(dev)) {
2100 if (plane == 0)
2101 size = dsparb & 0x1ff;
2102 else
2103 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) -
2104 (dsparb & 0x1ff);
2105 size >>= 1; /* Convert to cachelines */
2106 } else if (IS_845G(dev)) {
2107 size = dsparb & 0x7f; 2373 size = dsparb & 0x7f;
2108 size >>= 2; /* Convert to cachelines */ 2374 else
2109 } else { 2375 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
2110 size = dsparb & 0x7f; 2376 (dsparb & 0x7f);
2111 size >>= 1; /* Convert to cachelines */ 2377
2112 } 2378 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
2379 size);
2380
2381 return size;
2382}
2383
2384static int i85x_get_fifo_size(struct drm_device *dev, int plane)
2385{
2386 struct drm_i915_private *dev_priv = dev->dev_private;
2387 uint32_t dsparb = I915_READ(DSPARB);
2388 int size;
2389
2390 if (plane == 0)
2391 size = dsparb & 0x1ff;
2392 else
2393 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) -
2394 (dsparb & 0x1ff);
2395 size >>= 1; /* Convert to cachelines */
2113 2396
2114 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", 2397 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
2115 size); 2398 size);
@@ -2117,7 +2400,38 @@ static int intel_get_fifo_size(struct drm_device *dev, int plane)
2117 return size; 2400 return size;
2118} 2401}
2119 2402
2120static void g4x_update_wm(struct drm_device *dev) 2403static int i845_get_fifo_size(struct drm_device *dev, int plane)
2404{
2405 struct drm_i915_private *dev_priv = dev->dev_private;
2406 uint32_t dsparb = I915_READ(DSPARB);
2407 int size;
2408
2409 size = dsparb & 0x7f;
2410 size >>= 2; /* Convert to cachelines */
2411
2412 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
2413 size);
2414
2415 return size;
2416}
2417
2418static int i830_get_fifo_size(struct drm_device *dev, int plane)
2419{
2420 struct drm_i915_private *dev_priv = dev->dev_private;
2421 uint32_t dsparb = I915_READ(DSPARB);
2422 int size;
2423
2424 size = dsparb & 0x7f;
2425 size >>= 1; /* Convert to cachelines */
2426
2427 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
2428 size);
2429
2430 return size;
2431}
2432
2433static void g4x_update_wm(struct drm_device *dev, int unused, int unused2,
2434 int unused3, int unused4)
2121{ 2435{
2122 struct drm_i915_private *dev_priv = dev->dev_private; 2436 struct drm_i915_private *dev_priv = dev->dev_private;
2123 u32 fw_blc_self = I915_READ(FW_BLC_SELF); 2437 u32 fw_blc_self = I915_READ(FW_BLC_SELF);
@@ -2129,7 +2443,8 @@ static void g4x_update_wm(struct drm_device *dev)
2129 I915_WRITE(FW_BLC_SELF, fw_blc_self); 2443 I915_WRITE(FW_BLC_SELF, fw_blc_self);
2130} 2444}
2131 2445
2132static void i965_update_wm(struct drm_device *dev) 2446static void i965_update_wm(struct drm_device *dev, int unused, int unused2,
2447 int unused3, int unused4)
2133{ 2448{
2134 struct drm_i915_private *dev_priv = dev->dev_private; 2449 struct drm_i915_private *dev_priv = dev->dev_private;
2135 2450
@@ -2165,8 +2480,8 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2165 cacheline_size = planea_params.cacheline_size; 2480 cacheline_size = planea_params.cacheline_size;
2166 2481
2167 /* Update per-plane FIFO sizes */ 2482 /* Update per-plane FIFO sizes */
2168 planea_params.fifo_size = intel_get_fifo_size(dev, 0); 2483 planea_params.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
2169 planeb_params.fifo_size = intel_get_fifo_size(dev, 1); 2484 planeb_params.fifo_size = dev_priv->display.get_fifo_size(dev, 1);
2170 2485
2171 planea_wm = intel_calculate_wm(planea_clock, &planea_params, 2486 planea_wm = intel_calculate_wm(planea_clock, &planea_params,
2172 pixel_size, latency_ns); 2487 pixel_size, latency_ns);
@@ -2213,14 +2528,14 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2213 I915_WRITE(FW_BLC2, fwater_hi); 2528 I915_WRITE(FW_BLC2, fwater_hi);
2214} 2529}
2215 2530
2216static void i830_update_wm(struct drm_device *dev, int planea_clock, 2531static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
2217 int pixel_size) 2532 int unused2, int pixel_size)
2218{ 2533{
2219 struct drm_i915_private *dev_priv = dev->dev_private; 2534 struct drm_i915_private *dev_priv = dev->dev_private;
2220 uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; 2535 uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff;
2221 int planea_wm; 2536 int planea_wm;
2222 2537
2223 i830_wm_info.fifo_size = intel_get_fifo_size(dev, 0); 2538 i830_wm_info.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
2224 2539
2225 planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info, 2540 planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info,
2226 pixel_size, latency_ns); 2541 pixel_size, latency_ns);
@@ -2264,6 +2579,7 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock,
2264 */ 2579 */
2265static void intel_update_watermarks(struct drm_device *dev) 2580static void intel_update_watermarks(struct drm_device *dev)
2266{ 2581{
2582 struct drm_i915_private *dev_priv = dev->dev_private;
2267 struct drm_crtc *crtc; 2583 struct drm_crtc *crtc;
2268 struct intel_crtc *intel_crtc; 2584 struct intel_crtc *intel_crtc;
2269 int sr_hdisplay = 0; 2585 int sr_hdisplay = 0;
@@ -2302,15 +2618,8 @@ static void intel_update_watermarks(struct drm_device *dev)
2302 else if (IS_IGD(dev)) 2618 else if (IS_IGD(dev))
2303 igd_disable_cxsr(dev); 2619 igd_disable_cxsr(dev);
2304 2620
2305 if (IS_G4X(dev)) 2621 dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
2306 g4x_update_wm(dev); 2622 sr_hdisplay, pixel_size);
2307 else if (IS_I965G(dev))
2308 i965_update_wm(dev);
2309 else if (IS_I9XX(dev) || IS_MOBILE(dev))
2310 i9xx_update_wm(dev, planea_clock, planeb_clock, sr_hdisplay,
2311 pixel_size);
2312 else
2313 i830_update_wm(dev, planea_clock, pixel_size);
2314} 2623}
2315 2624
2316static int intel_crtc_mode_set(struct drm_crtc *crtc, 2625static int intel_crtc_mode_set(struct drm_crtc *crtc,
@@ -2323,10 +2632,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2323 struct drm_i915_private *dev_priv = dev->dev_private; 2632 struct drm_i915_private *dev_priv = dev->dev_private;
2324 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2633 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2325 int pipe = intel_crtc->pipe; 2634 int pipe = intel_crtc->pipe;
2635 int plane = intel_crtc->plane;
2326 int fp_reg = (pipe == 0) ? FPA0 : FPB0; 2636 int fp_reg = (pipe == 0) ? FPA0 : FPB0;
2327 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 2637 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
2328 int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD; 2638 int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
2329 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; 2639 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
2330 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; 2640 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
2331 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; 2641 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
2332 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; 2642 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
@@ -2334,8 +2644,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2334 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; 2644 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
2335 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; 2645 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
2336 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; 2646 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
2337 int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; 2647 int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
2338 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; 2648 int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
2339 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; 2649 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
2340 int refclk, num_outputs = 0; 2650 int refclk, num_outputs = 0;
2341 intel_clock_t clock, reduced_clock; 2651 intel_clock_t clock, reduced_clock;
@@ -2568,7 +2878,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2568 enable color space conversion */ 2878 enable color space conversion */
2569 if (!IS_IGDNG(dev)) { 2879 if (!IS_IGDNG(dev)) {
2570 if (pipe == 0) 2880 if (pipe == 0)
2571 dspcntr |= DISPPLANE_SEL_PIPE_A; 2881 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
2572 else 2882 else
2573 dspcntr |= DISPPLANE_SEL_PIPE_B; 2883 dspcntr |= DISPPLANE_SEL_PIPE_B;
2574 } 2884 }
@@ -2580,7 +2890,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2580 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the 2890 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
2581 * pipe == 0 check? 2891 * pipe == 0 check?
2582 */ 2892 */
2583 if (mode->clock > intel_get_core_clock_speed(dev) * 9 / 10) 2893 if (mode->clock >
2894 dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
2584 pipeconf |= PIPEACONF_DOUBLE_WIDE; 2895 pipeconf |= PIPEACONF_DOUBLE_WIDE;
2585 else 2896 else
2586 pipeconf &= ~PIPEACONF_DOUBLE_WIDE; 2897 pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
@@ -2652,9 +2963,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2652 udelay(150); 2963 udelay(150);
2653 2964
2654 if (IS_I965G(dev) && !IS_IGDNG(dev)) { 2965 if (IS_I965G(dev) && !IS_IGDNG(dev)) {
2655 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 2966 if (is_sdvo) {
2656 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | 2967 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
2968 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
2657 ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); 2969 ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
2970 } else
2971 I915_WRITE(dpll_md_reg, 0);
2658 } else { 2972 } else {
2659 /* write it again -- the BIOS does, after all */ 2973 /* write it again -- the BIOS does, after all */
2660 I915_WRITE(dpll_reg, dpll); 2974 I915_WRITE(dpll_reg, dpll);
@@ -2734,6 +3048,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2734 /* Flush the plane changes */ 3048 /* Flush the plane changes */
2735 ret = intel_pipe_set_base(crtc, x, y, old_fb); 3049 ret = intel_pipe_set_base(crtc, x, y, old_fb);
2736 3050
3051 if ((IS_I965G(dev) || plane == 0))
3052 intel_update_fbc(crtc, &crtc->mode);
3053
2737 intel_update_watermarks(dev); 3054 intel_update_watermarks(dev);
2738 3055
2739 drm_vblank_post_modeset(dev, pipe); 3056 drm_vblank_post_modeset(dev, pipe);
@@ -2778,6 +3095,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
2778 struct drm_gem_object *bo; 3095 struct drm_gem_object *bo;
2779 struct drm_i915_gem_object *obj_priv; 3096 struct drm_i915_gem_object *obj_priv;
2780 int pipe = intel_crtc->pipe; 3097 int pipe = intel_crtc->pipe;
3098 int plane = intel_crtc->plane;
2781 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; 3099 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
2782 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; 3100 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
2783 uint32_t temp = I915_READ(control); 3101 uint32_t temp = I915_READ(control);
@@ -2863,6 +3181,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
2863 i915_gem_object_unpin(intel_crtc->cursor_bo); 3181 i915_gem_object_unpin(intel_crtc->cursor_bo);
2864 drm_gem_object_unreference(intel_crtc->cursor_bo); 3182 drm_gem_object_unreference(intel_crtc->cursor_bo);
2865 } 3183 }
3184
3185 if ((IS_I965G(dev) || plane == 0))
3186 intel_update_fbc(crtc, &crtc->mode);
3187
2866 mutex_unlock(&dev->struct_mutex); 3188 mutex_unlock(&dev->struct_mutex);
2867 3189
2868 intel_crtc->cursor_addr = addr; 3190 intel_crtc->cursor_addr = addr;
@@ -3544,6 +3866,14 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
3544 intel_crtc->lut_b[i] = i; 3866 intel_crtc->lut_b[i] = i;
3545 } 3867 }
3546 3868
3869 /* Swap pipes & planes for FBC on pre-965 */
3870 intel_crtc->pipe = pipe;
3871 intel_crtc->plane = pipe;
3872 if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
3873 DRM_DEBUG("swapping pipes & planes for FBC\n");
3874 intel_crtc->plane = ((pipe == 0) ? 1 : 0);
3875 }
3876
3547 intel_crtc->cursor_addr = 0; 3877 intel_crtc->cursor_addr = 0;
3548 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; 3878 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
3549 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 3879 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
@@ -3826,6 +4156,73 @@ void intel_init_clock_gating(struct drm_device *dev)
3826 } 4156 }
3827} 4157}
3828 4158
4159/* Set up chip specific display functions */
4160static void intel_init_display(struct drm_device *dev)
4161{
4162 struct drm_i915_private *dev_priv = dev->dev_private;
4163
4164 /* We always want a DPMS function */
4165 if (IS_IGDNG(dev))
4166 dev_priv->display.dpms = igdng_crtc_dpms;
4167 else
4168 dev_priv->display.dpms = i9xx_crtc_dpms;
4169
4170 /* Only mobile has FBC, leave pointers NULL for other chips */
4171 if (IS_MOBILE(dev)) {
4172 if (IS_GM45(dev)) {
4173 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
4174 dev_priv->display.enable_fbc = g4x_enable_fbc;
4175 dev_priv->display.disable_fbc = g4x_disable_fbc;
4176 } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) {
4177 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
4178 dev_priv->display.enable_fbc = i8xx_enable_fbc;
4179 dev_priv->display.disable_fbc = i8xx_disable_fbc;
4180 }
4181 /* 855GM needs testing */
4182 }
4183
4184 /* Returns the core display clock speed */
4185 if (IS_I945G(dev))
4186 dev_priv->display.get_display_clock_speed =
4187 i945_get_display_clock_speed;
4188 else if (IS_I915G(dev))
4189 dev_priv->display.get_display_clock_speed =
4190 i915_get_display_clock_speed;
4191 else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
4192 dev_priv->display.get_display_clock_speed =
4193 i9xx_misc_get_display_clock_speed;
4194 else if (IS_I915GM(dev))
4195 dev_priv->display.get_display_clock_speed =
4196 i915gm_get_display_clock_speed;
4197 else if (IS_I865G(dev))
4198 dev_priv->display.get_display_clock_speed =
4199 i865_get_display_clock_speed;
4200 else if (IS_I855(dev))
4201 dev_priv->display.get_display_clock_speed =
4202 i855_get_display_clock_speed;
4203 else /* 852, 830 */
4204 dev_priv->display.get_display_clock_speed =
4205 i830_get_display_clock_speed;
4206
4207 /* For FIFO watermark updates */
4208 if (IS_G4X(dev))
4209 dev_priv->display.update_wm = g4x_update_wm;
4210 else if (IS_I965G(dev))
4211 dev_priv->display.update_wm = i965_update_wm;
4212 else if (IS_I9XX(dev) || IS_MOBILE(dev)) {
4213 dev_priv->display.update_wm = i9xx_update_wm;
4214 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
4215 } else {
4216 if (IS_I85X(dev))
4217 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
4218 else if (IS_845G(dev))
4219 dev_priv->display.get_fifo_size = i845_get_fifo_size;
4220 else
4221 dev_priv->display.get_fifo_size = i830_get_fifo_size;
4222 dev_priv->display.update_wm = i830_update_wm;
4223 }
4224}
4225
3829void intel_modeset_init(struct drm_device *dev) 4226void intel_modeset_init(struct drm_device *dev)
3830{ 4227{
3831 struct drm_i915_private *dev_priv = dev->dev_private; 4228 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3839,6 +4236,8 @@ void intel_modeset_init(struct drm_device *dev)
3839 4236
3840 dev->mode_config.funcs = (void *)&intel_mode_funcs; 4237 dev->mode_config.funcs = (void *)&intel_mode_funcs;
3841 4238
4239 intel_init_display(dev);
4240
3842 if (IS_I965G(dev)) { 4241 if (IS_I965G(dev)) {
3843 dev->mode_config.max_width = 8192; 4242 dev->mode_config.max_width = 8192;
3844 dev->mode_config.max_height = 8192; 4243 dev->mode_config.max_height = 8192;
@@ -3904,6 +4303,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
3904 4303
3905 mutex_unlock(&dev->struct_mutex); 4304 mutex_unlock(&dev->struct_mutex);
3906 4305
4306 if (dev_priv->display.disable_fbc)
4307 dev_priv->display.disable_fbc(dev);
4308
3907 drm_mode_config_cleanup(dev); 4309 drm_mode_config_cleanup(dev);
3908} 4310}
3909 4311
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 3ebbbabfe59b..8aa4b7f30daa 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -28,6 +28,7 @@
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/i2c-id.h> 29#include <linux/i2c-id.h>
30#include <linux/i2c-algo-bit.h> 30#include <linux/i2c-algo-bit.h>
31#include "i915_drv.h"
31#include "drm_crtc.h" 32#include "drm_crtc.h"
32 33
33#include "drm_crtc_helper.h" 34#include "drm_crtc_helper.h"
@@ -111,8 +112,8 @@ struct intel_output {
111 112
112struct intel_crtc { 113struct intel_crtc {
113 struct drm_crtc base; 114 struct drm_crtc base;
114 int pipe; 115 enum pipe pipe;
115 int plane; 116 enum plane plane;
116 struct drm_gem_object *cursor_bo; 117 struct drm_gem_object *cursor_bo;
117 uint32_t cursor_addr; 118 uint32_t cursor_addr;
118 u8 lut_r[256], lut_g[256], lut_b[256]; 119 u8 lut_r[256], lut_g[256], lut_b[256];
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index dafc0da1c256..98ae3d73577e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -27,6 +27,7 @@
27 * Jesse Barnes <jesse.barnes@intel.com> 27 * Jesse Barnes <jesse.barnes@intel.com>
28 */ 28 */
29 29
30#include <acpi/button.h>
30#include <linux/dmi.h> 31#include <linux/dmi.h>
31#include <linux/i2c.h> 32#include <linux/i2c.h>
32#include "drmP.h" 33#include "drmP.h"
@@ -295,6 +296,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
295 goto out; 296 goto out;
296 } 297 }
297 298
299 /* full screen scale for now */
300 if (IS_IGDNG(dev))
301 goto out;
302
298 /* 965+ wants fuzzy fitting */ 303 /* 965+ wants fuzzy fitting */
299 if (IS_I965G(dev)) 304 if (IS_I965G(dev))
300 pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) | 305 pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) |
@@ -322,8 +327,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
322 * to register description and PRM. 327 * to register description and PRM.
323 * Change the value here to see the borders for debugging 328 * Change the value here to see the borders for debugging
324 */ 329 */
325 I915_WRITE(BCLRPAT_A, 0); 330 if (!IS_IGDNG(dev)) {
326 I915_WRITE(BCLRPAT_B, 0); 331 I915_WRITE(BCLRPAT_A, 0);
332 I915_WRITE(BCLRPAT_B, 0);
333 }
327 334
328 switch (lvds_priv->fitting_mode) { 335 switch (lvds_priv->fitting_mode) {
329 case DRM_MODE_SCALE_CENTER: 336 case DRM_MODE_SCALE_CENTER:
@@ -572,7 +579,6 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
572 * settings. 579 * settings.
573 */ 580 */
574 581
575 /* No panel fitting yet, fixme */
576 if (IS_IGDNG(dev)) 582 if (IS_IGDNG(dev))
577 return; 583 return;
578 584
@@ -585,15 +591,33 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
585 I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control); 591 I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control);
586} 592}
587 593
594/* Some lid devices report incorrect lid status, assume they're connected */
595static const struct dmi_system_id bad_lid_status[] = {
596 {
597 .ident = "Aspire One",
598 .matches = {
599 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
600 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
601 },
602 },
603 { }
604};
605
588/** 606/**
589 * Detect the LVDS connection. 607 * Detect the LVDS connection.
590 * 608 *
591 * This always returns CONNECTOR_STATUS_CONNECTED. This connector should only have 609 * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means
592 * been set up if the LVDS was actually connected anyway. 610 * connected and closed means disconnected. We also send hotplug events as
611 * needed, using lid status notification from the input layer.
593 */ 612 */
594static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) 613static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
595{ 614{
596 return connector_status_connected; 615 enum drm_connector_status status = connector_status_connected;
616
617 if (!acpi_lid_open() && !dmi_check_system(bad_lid_status))
618 status = connector_status_disconnected;
619
620 return status;
597} 621}
598 622
599/** 623/**
@@ -632,6 +656,24 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
632 return 0; 656 return 0;
633} 657}
634 658
659static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
660 void *unused)
661{
662 struct drm_i915_private *dev_priv =
663 container_of(nb, struct drm_i915_private, lid_notifier);
664 struct drm_device *dev = dev_priv->dev;
665
666 if (acpi_lid_open() && !dev_priv->suspended) {
667 mutex_lock(&dev->mode_config.mutex);
668 drm_helper_resume_force_mode(dev);
669 mutex_unlock(&dev->mode_config.mutex);
670 }
671
672 drm_sysfs_hotplug_event(dev_priv->dev);
673
674 return NOTIFY_OK;
675}
676
635/** 677/**
636 * intel_lvds_destroy - unregister and free LVDS structures 678 * intel_lvds_destroy - unregister and free LVDS structures
637 * @connector: connector to free 679 * @connector: connector to free
@@ -641,10 +683,14 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
641 */ 683 */
642static void intel_lvds_destroy(struct drm_connector *connector) 684static void intel_lvds_destroy(struct drm_connector *connector)
643{ 685{
686 struct drm_device *dev = connector->dev;
644 struct intel_output *intel_output = to_intel_output(connector); 687 struct intel_output *intel_output = to_intel_output(connector);
688 struct drm_i915_private *dev_priv = dev->dev_private;
645 689
646 if (intel_output->ddc_bus) 690 if (intel_output->ddc_bus)
647 intel_i2c_destroy(intel_output->ddc_bus); 691 intel_i2c_destroy(intel_output->ddc_bus);
692 if (dev_priv->lid_notifier.notifier_call)
693 acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
648 drm_sysfs_connector_remove(connector); 694 drm_sysfs_connector_remove(connector);
649 drm_connector_cleanup(connector); 695 drm_connector_cleanup(connector);
650 kfree(connector); 696 kfree(connector);
@@ -1011,6 +1057,11 @@ out:
1011 pwm |= PWM_PCH_ENABLE; 1057 pwm |= PWM_PCH_ENABLE;
1012 I915_WRITE(BLC_PWM_PCH_CTL1, pwm); 1058 I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
1013 } 1059 }
1060 dev_priv->lid_notifier.notifier_call = intel_lid_notify;
1061 if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
1062 DRM_DEBUG("lid notifier registration failed\n");
1063 dev_priv->lid_notifier.notifier_call = NULL;
1064 }
1014 drm_sysfs_connector_add(connector); 1065 drm_sysfs_connector_add(connector);
1015 return; 1066 return;
1016 1067
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 0bf28efcf2c1..083bec2e50f9 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -135,6 +135,30 @@ struct intel_sdvo_priv {
135 struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; 135 struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
136 struct intel_sdvo_dtd save_output_dtd[16]; 136 struct intel_sdvo_dtd save_output_dtd[16];
137 u32 save_SDVOX; 137 u32 save_SDVOX;
138 /* add the property for the SDVO-TV */
139 struct drm_property *left_property;
140 struct drm_property *right_property;
141 struct drm_property *top_property;
142 struct drm_property *bottom_property;
143 struct drm_property *hpos_property;
144 struct drm_property *vpos_property;
145
146 /* add the property for the SDVO-TV/LVDS */
147 struct drm_property *brightness_property;
148 struct drm_property *contrast_property;
149 struct drm_property *saturation_property;
150 struct drm_property *hue_property;
151
152 /* Add variable to record current setting for the above property */
153 u32 left_margin, right_margin, top_margin, bottom_margin;
154 /* this is to get the range of margin.*/
155 u32 max_hscan, max_vscan;
156 u32 max_hpos, cur_hpos;
157 u32 max_vpos, cur_vpos;
158 u32 cur_brightness, max_brightness;
159 u32 cur_contrast, max_contrast;
160 u32 cur_saturation, max_saturation;
161 u32 cur_hue, max_hue;
138}; 162};
139 163
140static bool 164static bool
@@ -281,6 +305,31 @@ static const struct _sdvo_cmd_name {
281 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), 305 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
282 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), 306 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
283 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), 307 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
308 /* Add the op code for SDVO enhancements */
309 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_H),
310 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_H),
311 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_H),
312 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_V),
313 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_V),
314 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_V),
315 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
316 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
317 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
318 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
319 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
320 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
321 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
322 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
323 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
324 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
325 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
326 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
327 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
328 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
329 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
330 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
331 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
332 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
284 /* HDMI op code */ 333 /* HDMI op code */
285 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), 334 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
286 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), 335 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
@@ -981,7 +1030,7 @@ static void intel_sdvo_set_tv_format(struct intel_output *output)
981 1030
982 status = intel_sdvo_read_response(output, NULL, 0); 1031 status = intel_sdvo_read_response(output, NULL, 0);
983 if (status != SDVO_CMD_STATUS_SUCCESS) 1032 if (status != SDVO_CMD_STATUS_SUCCESS)
984 DRM_DEBUG("%s: Failed to set TV format\n", 1033 DRM_DEBUG_KMS("%s: Failed to set TV format\n",
985 SDVO_NAME(sdvo_priv)); 1034 SDVO_NAME(sdvo_priv));
986} 1035}
987 1036
@@ -1792,6 +1841,45 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
1792 return 1; 1841 return 1;
1793} 1842}
1794 1843
1844static
1845void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1846{
1847 struct intel_output *intel_output = to_intel_output(connector);
1848 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1849 struct drm_device *dev = connector->dev;
1850
1851 if (sdvo_priv->is_tv) {
1852 if (sdvo_priv->left_property)
1853 drm_property_destroy(dev, sdvo_priv->left_property);
1854 if (sdvo_priv->right_property)
1855 drm_property_destroy(dev, sdvo_priv->right_property);
1856 if (sdvo_priv->top_property)
1857 drm_property_destroy(dev, sdvo_priv->top_property);
1858 if (sdvo_priv->bottom_property)
1859 drm_property_destroy(dev, sdvo_priv->bottom_property);
1860 if (sdvo_priv->hpos_property)
1861 drm_property_destroy(dev, sdvo_priv->hpos_property);
1862 if (sdvo_priv->vpos_property)
1863 drm_property_destroy(dev, sdvo_priv->vpos_property);
1864 }
1865 if (sdvo_priv->is_tv) {
1866 if (sdvo_priv->saturation_property)
1867 drm_property_destroy(dev,
1868 sdvo_priv->saturation_property);
1869 if (sdvo_priv->contrast_property)
1870 drm_property_destroy(dev,
1871 sdvo_priv->contrast_property);
1872 if (sdvo_priv->hue_property)
1873 drm_property_destroy(dev, sdvo_priv->hue_property);
1874 }
1875 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
1876 if (sdvo_priv->brightness_property)
1877 drm_property_destroy(dev,
1878 sdvo_priv->brightness_property);
1879 }
1880 return;
1881}
1882
1795static void intel_sdvo_destroy(struct drm_connector *connector) 1883static void intel_sdvo_destroy(struct drm_connector *connector)
1796{ 1884{
1797 struct intel_output *intel_output = to_intel_output(connector); 1885 struct intel_output *intel_output = to_intel_output(connector);
@@ -1812,6 +1900,9 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1812 drm_property_destroy(connector->dev, 1900 drm_property_destroy(connector->dev,
1813 sdvo_priv->tv_format_property); 1901 sdvo_priv->tv_format_property);
1814 1902
1903 if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
1904 intel_sdvo_destroy_enhance_property(connector);
1905
1815 drm_sysfs_connector_remove(connector); 1906 drm_sysfs_connector_remove(connector);
1816 drm_connector_cleanup(connector); 1907 drm_connector_cleanup(connector);
1817 1908
@@ -1829,6 +1920,8 @@ intel_sdvo_set_property(struct drm_connector *connector,
1829 struct drm_crtc *crtc = encoder->crtc; 1920 struct drm_crtc *crtc = encoder->crtc;
1830 int ret = 0; 1921 int ret = 0;
1831 bool changed = false; 1922 bool changed = false;
1923 uint8_t cmd, status;
1924 uint16_t temp_value;
1832 1925
1833 ret = drm_connector_property_set_value(connector, property, val); 1926 ret = drm_connector_property_set_value(connector, property, val);
1834 if (ret < 0) 1927 if (ret < 0)
@@ -1845,11 +1938,102 @@ intel_sdvo_set_property(struct drm_connector *connector,
1845 1938
1846 sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val]; 1939 sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val];
1847 changed = true; 1940 changed = true;
1848 } else {
1849 ret = -EINVAL;
1850 goto out;
1851 } 1941 }
1852 1942
1943 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
1944 cmd = 0;
1945 temp_value = val;
1946 if (sdvo_priv->left_property == property) {
1947 drm_connector_property_set_value(connector,
1948 sdvo_priv->right_property, val);
1949 if (sdvo_priv->left_margin == temp_value)
1950 goto out;
1951
1952 sdvo_priv->left_margin = temp_value;
1953 sdvo_priv->right_margin = temp_value;
1954 temp_value = sdvo_priv->max_hscan -
1955 sdvo_priv->left_margin;
1956 cmd = SDVO_CMD_SET_OVERSCAN_H;
1957 } else if (sdvo_priv->right_property == property) {
1958 drm_connector_property_set_value(connector,
1959 sdvo_priv->left_property, val);
1960 if (sdvo_priv->right_margin == temp_value)
1961 goto out;
1962
1963 sdvo_priv->left_margin = temp_value;
1964 sdvo_priv->right_margin = temp_value;
1965 temp_value = sdvo_priv->max_hscan -
1966 sdvo_priv->left_margin;
1967 cmd = SDVO_CMD_SET_OVERSCAN_H;
1968 } else if (sdvo_priv->top_property == property) {
1969 drm_connector_property_set_value(connector,
1970 sdvo_priv->bottom_property, val);
1971 if (sdvo_priv->top_margin == temp_value)
1972 goto out;
1973
1974 sdvo_priv->top_margin = temp_value;
1975 sdvo_priv->bottom_margin = temp_value;
1976 temp_value = sdvo_priv->max_vscan -
1977 sdvo_priv->top_margin;
1978 cmd = SDVO_CMD_SET_OVERSCAN_V;
1979 } else if (sdvo_priv->bottom_property == property) {
1980 drm_connector_property_set_value(connector,
1981 sdvo_priv->top_property, val);
1982 if (sdvo_priv->bottom_margin == temp_value)
1983 goto out;
1984 sdvo_priv->top_margin = temp_value;
1985 sdvo_priv->bottom_margin = temp_value;
1986 temp_value = sdvo_priv->max_vscan -
1987 sdvo_priv->top_margin;
1988 cmd = SDVO_CMD_SET_OVERSCAN_V;
1989 } else if (sdvo_priv->hpos_property == property) {
1990 if (sdvo_priv->cur_hpos == temp_value)
1991 goto out;
1992
1993 cmd = SDVO_CMD_SET_POSITION_H;
1994 sdvo_priv->cur_hpos = temp_value;
1995 } else if (sdvo_priv->vpos_property == property) {
1996 if (sdvo_priv->cur_vpos == temp_value)
1997 goto out;
1998
1999 cmd = SDVO_CMD_SET_POSITION_V;
2000 sdvo_priv->cur_vpos = temp_value;
2001 } else if (sdvo_priv->saturation_property == property) {
2002 if (sdvo_priv->cur_saturation == temp_value)
2003 goto out;
2004
2005 cmd = SDVO_CMD_SET_SATURATION;
2006 sdvo_priv->cur_saturation = temp_value;
2007 } else if (sdvo_priv->contrast_property == property) {
2008 if (sdvo_priv->cur_contrast == temp_value)
2009 goto out;
2010
2011 cmd = SDVO_CMD_SET_CONTRAST;
2012 sdvo_priv->cur_contrast = temp_value;
2013 } else if (sdvo_priv->hue_property == property) {
2014 if (sdvo_priv->cur_hue == temp_value)
2015 goto out;
2016
2017 cmd = SDVO_CMD_SET_HUE;
2018 sdvo_priv->cur_hue = temp_value;
2019 } else if (sdvo_priv->brightness_property == property) {
2020 if (sdvo_priv->cur_brightness == temp_value)
2021 goto out;
2022
2023 cmd = SDVO_CMD_SET_BRIGHTNESS;
2024 sdvo_priv->cur_brightness = temp_value;
2025 }
2026 if (cmd) {
2027 intel_sdvo_write_cmd(intel_output, cmd, &temp_value, 2);
2028 status = intel_sdvo_read_response(intel_output,
2029 NULL, 0);
2030 if (status != SDVO_CMD_STATUS_SUCCESS) {
2031 DRM_DEBUG_KMS("Incorrect SDVO command \n");
2032 return -EINVAL;
2033 }
2034 changed = true;
2035 }
2036 }
1853 if (changed && crtc) 2037 if (changed && crtc)
1854 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, 2038 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
1855 crtc->y, crtc->fb); 2039 crtc->y, crtc->fb);
@@ -2090,6 +2274,8 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2090 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; 2274 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
2091 encoder->encoder_type = DRM_MODE_ENCODER_DAC; 2275 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
2092 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2276 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2277 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2278 (1 << INTEL_ANALOG_CLONE_BIT);
2093 } else if (flags & SDVO_OUTPUT_LVDS0) { 2279 } else if (flags & SDVO_OUTPUT_LVDS0) {
2094 2280
2095 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; 2281 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
@@ -2176,6 +2362,310 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector)
2176 2362
2177} 2363}
2178 2364
2365static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2366{
2367 struct intel_output *intel_output = to_intel_output(connector);
2368 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
2369 struct intel_sdvo_enhancements_reply sdvo_data;
2370 struct drm_device *dev = connector->dev;
2371 uint8_t status;
2372 uint16_t response, data_value[2];
2373
2374 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
2375 NULL, 0);
2376 status = intel_sdvo_read_response(intel_output, &sdvo_data,
2377 sizeof(sdvo_data));
2378 if (status != SDVO_CMD_STATUS_SUCCESS) {
2379 DRM_DEBUG_KMS(" incorrect response is returned\n");
2380 return;
2381 }
2382 response = *((uint16_t *)&sdvo_data);
2383 if (!response) {
2384 DRM_DEBUG_KMS("No enhancement is supported\n");
2385 return;
2386 }
2387 if (sdvo_priv->is_tv) {
2388 /* when horizontal overscan is supported, Add the left/right
2389 * property
2390 */
2391 if (sdvo_data.overscan_h) {
2392 intel_sdvo_write_cmd(intel_output,
2393 SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0);
2394 status = intel_sdvo_read_response(intel_output,
2395 &data_value, 4);
2396 if (status != SDVO_CMD_STATUS_SUCCESS) {
2397 DRM_DEBUG_KMS("Incorrect SDVO max "
2398 "h_overscan\n");
2399 return;
2400 }
2401 intel_sdvo_write_cmd(intel_output,
2402 SDVO_CMD_GET_OVERSCAN_H, NULL, 0);
2403 status = intel_sdvo_read_response(intel_output,
2404 &response, 2);
2405 if (status != SDVO_CMD_STATUS_SUCCESS) {
2406 DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n");
2407 return;
2408 }
2409 sdvo_priv->max_hscan = data_value[0];
2410 sdvo_priv->left_margin = data_value[0] - response;
2411 sdvo_priv->right_margin = sdvo_priv->left_margin;
2412 sdvo_priv->left_property =
2413 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2414 "left_margin", 2);
2415 sdvo_priv->left_property->values[0] = 0;
2416 sdvo_priv->left_property->values[1] = data_value[0];
2417 drm_connector_attach_property(connector,
2418 sdvo_priv->left_property,
2419 sdvo_priv->left_margin);
2420 sdvo_priv->right_property =
2421 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2422 "right_margin", 2);
2423 sdvo_priv->right_property->values[0] = 0;
2424 sdvo_priv->right_property->values[1] = data_value[0];
2425 drm_connector_attach_property(connector,
2426 sdvo_priv->right_property,
2427 sdvo_priv->right_margin);
2428 DRM_DEBUG_KMS("h_overscan: max %d, "
2429 "default %d, current %d\n",
2430 data_value[0], data_value[1], response);
2431 }
2432 if (sdvo_data.overscan_v) {
2433 intel_sdvo_write_cmd(intel_output,
2434 SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0);
2435 status = intel_sdvo_read_response(intel_output,
2436 &data_value, 4);
2437 if (status != SDVO_CMD_STATUS_SUCCESS) {
2438 DRM_DEBUG_KMS("Incorrect SDVO max "
2439 "v_overscan\n");
2440 return;
2441 }
2442 intel_sdvo_write_cmd(intel_output,
2443 SDVO_CMD_GET_OVERSCAN_V, NULL, 0);
2444 status = intel_sdvo_read_response(intel_output,
2445 &response, 2);
2446 if (status != SDVO_CMD_STATUS_SUCCESS) {
2447 DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n");
2448 return;
2449 }
2450 sdvo_priv->max_vscan = data_value[0];
2451 sdvo_priv->top_margin = data_value[0] - response;
2452 sdvo_priv->bottom_margin = sdvo_priv->top_margin;
2453 sdvo_priv->top_property =
2454 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2455 "top_margin", 2);
2456 sdvo_priv->top_property->values[0] = 0;
2457 sdvo_priv->top_property->values[1] = data_value[0];
2458 drm_connector_attach_property(connector,
2459 sdvo_priv->top_property,
2460 sdvo_priv->top_margin);
2461 sdvo_priv->bottom_property =
2462 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2463 "bottom_margin", 2);
2464 sdvo_priv->bottom_property->values[0] = 0;
2465 sdvo_priv->bottom_property->values[1] = data_value[0];
2466 drm_connector_attach_property(connector,
2467 sdvo_priv->bottom_property,
2468 sdvo_priv->bottom_margin);
2469 DRM_DEBUG_KMS("v_overscan: max %d, "
2470 "default %d, current %d\n",
2471 data_value[0], data_value[1], response);
2472 }
2473 if (sdvo_data.position_h) {
2474 intel_sdvo_write_cmd(intel_output,
2475 SDVO_CMD_GET_MAX_POSITION_H, NULL, 0);
2476 status = intel_sdvo_read_response(intel_output,
2477 &data_value, 4);
2478 if (status != SDVO_CMD_STATUS_SUCCESS) {
2479 DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n");
2480 return;
2481 }
2482 intel_sdvo_write_cmd(intel_output,
2483 SDVO_CMD_GET_POSITION_H, NULL, 0);
2484 status = intel_sdvo_read_response(intel_output,
2485 &response, 2);
2486 if (status != SDVO_CMD_STATUS_SUCCESS) {
2487 DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n");
2488 return;
2489 }
2490 sdvo_priv->max_hpos = data_value[0];
2491 sdvo_priv->cur_hpos = response;
2492 sdvo_priv->hpos_property =
2493 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2494 "hpos", 2);
2495 sdvo_priv->hpos_property->values[0] = 0;
2496 sdvo_priv->hpos_property->values[1] = data_value[0];
2497 drm_connector_attach_property(connector,
2498 sdvo_priv->hpos_property,
2499 sdvo_priv->cur_hpos);
2500 DRM_DEBUG_KMS("h_position: max %d, "
2501 "default %d, current %d\n",
2502 data_value[0], data_value[1], response);
2503 }
2504 if (sdvo_data.position_v) {
2505 intel_sdvo_write_cmd(intel_output,
2506 SDVO_CMD_GET_MAX_POSITION_V, NULL, 0);
2507 status = intel_sdvo_read_response(intel_output,
2508 &data_value, 4);
2509 if (status != SDVO_CMD_STATUS_SUCCESS) {
2510 DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n");
2511 return;
2512 }
2513 intel_sdvo_write_cmd(intel_output,
2514 SDVO_CMD_GET_POSITION_V, NULL, 0);
2515 status = intel_sdvo_read_response(intel_output,
2516 &response, 2);
2517 if (status != SDVO_CMD_STATUS_SUCCESS) {
2518 DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n");
2519 return;
2520 }
2521 sdvo_priv->max_vpos = data_value[0];
2522 sdvo_priv->cur_vpos = response;
2523 sdvo_priv->vpos_property =
2524 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2525 "vpos", 2);
2526 sdvo_priv->vpos_property->values[0] = 0;
2527 sdvo_priv->vpos_property->values[1] = data_value[0];
2528 drm_connector_attach_property(connector,
2529 sdvo_priv->vpos_property,
2530 sdvo_priv->cur_vpos);
2531 DRM_DEBUG_KMS("v_position: max %d, "
2532 "default %d, current %d\n",
2533 data_value[0], data_value[1], response);
2534 }
2535 }
2536 if (sdvo_priv->is_tv) {
2537 if (sdvo_data.saturation) {
2538 intel_sdvo_write_cmd(intel_output,
2539 SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
2540 status = intel_sdvo_read_response(intel_output,
2541 &data_value, 4);
2542 if (status != SDVO_CMD_STATUS_SUCCESS) {
2543 DRM_DEBUG_KMS("Incorrect SDVO Max sat\n");
2544 return;
2545 }
2546 intel_sdvo_write_cmd(intel_output,
2547 SDVO_CMD_GET_SATURATION, NULL, 0);
2548 status = intel_sdvo_read_response(intel_output,
2549 &response, 2);
2550 if (status != SDVO_CMD_STATUS_SUCCESS) {
2551 DRM_DEBUG_KMS("Incorrect SDVO get sat\n");
2552 return;
2553 }
2554 sdvo_priv->max_saturation = data_value[0];
2555 sdvo_priv->cur_saturation = response;
2556 sdvo_priv->saturation_property =
2557 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2558 "saturation", 2);
2559 sdvo_priv->saturation_property->values[0] = 0;
2560 sdvo_priv->saturation_property->values[1] =
2561 data_value[0];
2562 drm_connector_attach_property(connector,
2563 sdvo_priv->saturation_property,
2564 sdvo_priv->cur_saturation);
2565 DRM_DEBUG_KMS("saturation: max %d, "
2566 "default %d, current %d\n",
2567 data_value[0], data_value[1], response);
2568 }
2569 if (sdvo_data.contrast) {
2570 intel_sdvo_write_cmd(intel_output,
2571 SDVO_CMD_GET_MAX_CONTRAST, NULL, 0);
2572 status = intel_sdvo_read_response(intel_output,
2573 &data_value, 4);
2574 if (status != SDVO_CMD_STATUS_SUCCESS) {
2575 DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n");
2576 return;
2577 }
2578 intel_sdvo_write_cmd(intel_output,
2579 SDVO_CMD_GET_CONTRAST, NULL, 0);
2580 status = intel_sdvo_read_response(intel_output,
2581 &response, 2);
2582 if (status != SDVO_CMD_STATUS_SUCCESS) {
2583 DRM_DEBUG_KMS("Incorrect SDVO get contrast\n");
2584 return;
2585 }
2586 sdvo_priv->max_contrast = data_value[0];
2587 sdvo_priv->cur_contrast = response;
2588 sdvo_priv->contrast_property =
2589 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2590 "contrast", 2);
2591 sdvo_priv->contrast_property->values[0] = 0;
2592 sdvo_priv->contrast_property->values[1] = data_value[0];
2593 drm_connector_attach_property(connector,
2594 sdvo_priv->contrast_property,
2595 sdvo_priv->cur_contrast);
2596 DRM_DEBUG_KMS("contrast: max %d, "
2597 "default %d, current %d\n",
2598 data_value[0], data_value[1], response);
2599 }
2600 if (sdvo_data.hue) {
2601 intel_sdvo_write_cmd(intel_output,
2602 SDVO_CMD_GET_MAX_HUE, NULL, 0);
2603 status = intel_sdvo_read_response(intel_output,
2604 &data_value, 4);
2605 if (status != SDVO_CMD_STATUS_SUCCESS) {
2606 DRM_DEBUG_KMS("Incorrect SDVO Max hue\n");
2607 return;
2608 }
2609 intel_sdvo_write_cmd(intel_output,
2610 SDVO_CMD_GET_HUE, NULL, 0);
2611 status = intel_sdvo_read_response(intel_output,
2612 &response, 2);
2613 if (status != SDVO_CMD_STATUS_SUCCESS) {
2614 DRM_DEBUG_KMS("Incorrect SDVO get hue\n");
2615 return;
2616 }
2617 sdvo_priv->max_hue = data_value[0];
2618 sdvo_priv->cur_hue = response;
2619 sdvo_priv->hue_property =
2620 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2621 "hue", 2);
2622 sdvo_priv->hue_property->values[0] = 0;
2623 sdvo_priv->hue_property->values[1] =
2624 data_value[0];
2625 drm_connector_attach_property(connector,
2626 sdvo_priv->hue_property,
2627 sdvo_priv->cur_hue);
2628 DRM_DEBUG_KMS("hue: max %d, default %d, current %d\n",
2629 data_value[0], data_value[1], response);
2630 }
2631 }
2632 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
2633 if (sdvo_data.brightness) {
2634 intel_sdvo_write_cmd(intel_output,
2635 SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
2636 status = intel_sdvo_read_response(intel_output,
2637 &data_value, 4);
2638 if (status != SDVO_CMD_STATUS_SUCCESS) {
2639 DRM_DEBUG_KMS("Incorrect SDVO Max bright\n");
2640 return;
2641 }
2642 intel_sdvo_write_cmd(intel_output,
2643 SDVO_CMD_GET_BRIGHTNESS, NULL, 0);
2644 status = intel_sdvo_read_response(intel_output,
2645 &response, 2);
2646 if (status != SDVO_CMD_STATUS_SUCCESS) {
2647 DRM_DEBUG_KMS("Incorrect SDVO get brigh\n");
2648 return;
2649 }
2650 sdvo_priv->max_brightness = data_value[0];
2651 sdvo_priv->cur_brightness = response;
2652 sdvo_priv->brightness_property =
2653 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2654 "brightness", 2);
2655 sdvo_priv->brightness_property->values[0] = 0;
2656 sdvo_priv->brightness_property->values[1] =
2657 data_value[0];
2658 drm_connector_attach_property(connector,
2659 sdvo_priv->brightness_property,
2660 sdvo_priv->cur_brightness);
2661 DRM_DEBUG_KMS("brightness: max %d, "
2662 "default %d, current %d\n",
2663 data_value[0], data_value[1], response);
2664 }
2665 }
2666 return;
2667}
2668
2179bool intel_sdvo_init(struct drm_device *dev, int output_device) 2669bool intel_sdvo_init(struct drm_device *dev, int output_device)
2180{ 2670{
2181 struct drm_connector *connector; 2671 struct drm_connector *connector;
@@ -2264,6 +2754,10 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
2264 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 2754 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
2265 if (sdvo_priv->is_tv) 2755 if (sdvo_priv->is_tv)
2266 intel_sdvo_tv_create_property(connector); 2756 intel_sdvo_tv_create_property(connector);
2757
2758 if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
2759 intel_sdvo_create_enhance_property(connector);
2760
2267 drm_sysfs_connector_add(connector); 2761 drm_sysfs_connector_add(connector);
2268 2762
2269 intel_sdvo_select_ddc_bus(sdvo_priv); 2763 intel_sdvo_select_ddc_bus(sdvo_priv);
diff --git a/include/acpi/button.h b/include/acpi/button.h
new file mode 100644
index 000000000000..97eea0e4c016
--- /dev/null
+++ b/include/acpi/button.h
@@ -0,0 +1,25 @@
1#ifndef ACPI_BUTTON_H
2#define ACPI_BUTTON_H
3
4#include <linux/notifier.h>
5
6#if defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE)
7extern int acpi_lid_notifier_register(struct notifier_block *nb);
8extern int acpi_lid_notifier_unregister(struct notifier_block *nb);
9extern int acpi_lid_open(void);
10#else
11static inline int acpi_lid_notifier_register(struct notifier_block *nb)
12{
13 return 0;
14}
15static inline int acpi_lid_notifier_unregister(struct notifier_block *nb)
16{
17 return 0;
18}
19static inline int acpi_lid_open(void)
20{
21 return 1;
22}
23#endif /* defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) */
24
25#endif /* ACPI_BUTTON_H */
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 853508499d20..3f6e545609be 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -552,6 +552,7 @@
552 {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 552 {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
553 {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 553 {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
554 {0x8086, 0x2e32, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 554 {0x8086, 0x2e32, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
555 {0x8086, 0x2e42, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
555 {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 556 {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
556 {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 557 {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
557 {0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 558 {0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 8e1e92583fbc..7e0cb1da92e6 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -185,6 +185,7 @@ typedef struct _drm_i915_sarea {
185#define DRM_I915_GEM_GET_APERTURE 0x23 185#define DRM_I915_GEM_GET_APERTURE 0x23
186#define DRM_I915_GEM_MMAP_GTT 0x24 186#define DRM_I915_GEM_MMAP_GTT 0x24
187#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 187#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
188#define DRM_I915_GEM_MADVISE 0x26
188 189
189#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 190#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
190#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 191#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -221,6 +222,7 @@ typedef struct _drm_i915_sarea {
221#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) 222#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
222#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) 223#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
223#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_intel_get_pipe_from_crtc_id) 224#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_intel_get_pipe_from_crtc_id)
225#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
224 226
225/* Allow drivers to submit batchbuffers directly to hardware, relying 227/* Allow drivers to submit batchbuffers directly to hardware, relying
226 * on the security mechanisms provided by hardware. 228 * on the security mechanisms provided by hardware.
@@ -667,4 +669,21 @@ struct drm_i915_get_pipe_from_crtc_id {
667 __u32 pipe; 669 __u32 pipe;
668}; 670};
669 671
672#define I915_MADV_WILLNEED 0
673#define I915_MADV_DONTNEED 1
674#define __I915_MADV_PURGED 2 /* internal state */
675
676struct drm_i915_gem_madvise {
677 /** Handle of the buffer to change the backing store advice */
678 __u32 handle;
679
680 /* Advice: either the buffer will be needed again in the near future,
681 * or wont be and could be discarded under memory pressure.
682 */
683 __u32 madv;
684
685 /** Whether the backing store still exists. */
686 __u32 retained;
687};
688
670#endif /* _I915_DRM_H_ */ 689#endif /* _I915_DRM_H_ */