diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-01-06 21:16:17 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-01-06 21:16:17 -0500 |
commit | 635b3c9d5508d1dfe02ee5f882becea37e294111 (patch) | |
tree | 97f1958ec849742245a730ddda9ac798a802dde4 | |
parent | 7959722b951cffcd61a0a35229d007deeed8c2dd (diff) | |
parent | 823f68fd646da6a39a9c0d3eb4c60d69dab5aa13 (diff) |
Merge branch 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel
* 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel: (23 commits)
drm/i915: remove full registers dump debug
drm/i915: Add DP dpll limit on ironlake and use existing DPLL search function
drm/i915: Select the correct BPC for LVDS on Ironlake
drm/i915: Make the BPC in FDI rx/transcoder be consistent with that in pipeconf on Ironlake
drm/i915: Enable/disable the dithering for LVDS based on VBT setting
drm/i915: Permit pinning whilst the device is 'suspended'
drm/i915: Hold struct mutex whilst pinning power context bo.
drm/i915: fix unused var
drm/i915: Storage class should be before const qualifier
drm/i915: remove render reclock support
drm/i915: Fix RC6 suspend/resume
drm/i915: execbuf2 support
drm/i915: Reload hangcheck timer too for Ironlake
drm/i915: only enable hotplug for detected outputs
drm/i915: Track whether cursor needs physical address in intel_device_info
drm/i915: Implement IS_* macros using static tables
drm/i915: Move PCI IDs into i915 driver
drm/i915: Update LVDS connector status when receiving ACPI LID event
drm/i915: Add MALATA PC-81005 to ACPI LID quirk list
drm/i915: implement new pm ops for i915
...
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 31 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 26 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 174 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 123 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 249 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_tiling.c | 46 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 32 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_reg.h | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_suspend.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_crt.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 414 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dp.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_hdmi.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lvds.c | 83 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_sdvo.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_tv.c | 2 | ||||
-rw-r--r-- | include/drm/i915_drm.h | 54 |
17 files changed, 736 insertions, 537 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 18476bf0b580..9c9998c4dceb 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -272,7 +272,7 @@ static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_co | |||
272 | mem = kmap_atomic(pages[page], KM_USER0); | 272 | mem = kmap_atomic(pages[page], KM_USER0); |
273 | for (i = 0; i < PAGE_SIZE; i += 4) | 273 | for (i = 0; i < PAGE_SIZE; i += 4) |
274 | seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); | 274 | seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); |
275 | kunmap_atomic(pages[page], KM_USER0); | 275 | kunmap_atomic(mem, KM_USER0); |
276 | } | 276 | } |
277 | } | 277 | } |
278 | 278 | ||
@@ -386,34 +386,6 @@ out: | |||
386 | return 0; | 386 | return 0; |
387 | } | 387 | } |
388 | 388 | ||
389 | static int i915_registers_info(struct seq_file *m, void *data) { | ||
390 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
391 | struct drm_device *dev = node->minor->dev; | ||
392 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
393 | uint32_t reg; | ||
394 | |||
395 | #define DUMP_RANGE(start, end) \ | ||
396 | for (reg=start; reg < end; reg += 4) \ | ||
397 | seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg)); | ||
398 | |||
399 | DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */ | ||
400 | DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */ | ||
401 | DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */ | ||
402 | DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */ | ||
403 | DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */ | ||
404 | DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */ | ||
405 | DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */ | ||
406 | DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */ | ||
407 | DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */ | ||
408 | DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */ | ||
409 | DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */ | ||
410 | DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */ | ||
411 | DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */ | ||
412 | DUMP_RANGE(0x73000, 0x73fff); /* performance counters */ | ||
413 | |||
414 | return 0; | ||
415 | } | ||
416 | |||
417 | static int | 389 | static int |
418 | i915_wedged_open(struct inode *inode, | 390 | i915_wedged_open(struct inode *inode, |
419 | struct file *filp) | 391 | struct file *filp) |
@@ -519,7 +491,6 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) | |||
519 | } | 491 | } |
520 | 492 | ||
521 | static struct drm_info_list i915_debugfs_list[] = { | 493 | static struct drm_info_list i915_debugfs_list[] = { |
522 | {"i915_regs", i915_registers_info, 0}, | ||
523 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, | 494 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, |
524 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, | 495 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, |
525 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, | 496 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 701bfeac7f57..c2b11088a6ea 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -813,9 +813,13 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
813 | case I915_PARAM_HAS_PAGEFLIPPING: | 813 | case I915_PARAM_HAS_PAGEFLIPPING: |
814 | value = 1; | 814 | value = 1; |
815 | break; | 815 | break; |
816 | case I915_PARAM_HAS_EXECBUF2: | ||
817 | /* depends on GEM */ | ||
818 | value = dev_priv->has_gem; | ||
819 | break; | ||
816 | default: | 820 | default: |
817 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", | 821 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
818 | param->param); | 822 | param->param); |
819 | return -EINVAL; | 823 | return -EINVAL; |
820 | } | 824 | } |
821 | 825 | ||
@@ -1117,7 +1121,8 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1117 | { | 1121 | { |
1118 | struct drm_i915_private *dev_priv = dev->dev_private; | 1122 | struct drm_i915_private *dev_priv = dev->dev_private; |
1119 | struct drm_mm_node *compressed_fb, *compressed_llb; | 1123 | struct drm_mm_node *compressed_fb, *compressed_llb; |
1120 | unsigned long cfb_base, ll_base; | 1124 | unsigned long cfb_base; |
1125 | unsigned long ll_base = 0; | ||
1121 | 1126 | ||
1122 | /* Leave 1M for line length buffer & misc. */ | 1127 | /* Leave 1M for line length buffer & misc. */ |
1123 | compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); | 1128 | compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); |
@@ -1200,14 +1205,6 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1200 | dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & | 1205 | dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & |
1201 | 0xff000000; | 1206 | 0xff000000; |
1202 | 1207 | ||
1203 | if (IS_MOBILE(dev) || IS_I9XX(dev)) | ||
1204 | dev_priv->cursor_needs_physical = true; | ||
1205 | else | ||
1206 | dev_priv->cursor_needs_physical = false; | ||
1207 | |||
1208 | if (IS_I965G(dev) || IS_G33(dev)) | ||
1209 | dev_priv->cursor_needs_physical = false; | ||
1210 | |||
1211 | /* Basic memrange allocator for stolen space (aka vram) */ | 1208 | /* Basic memrange allocator for stolen space (aka vram) */ |
1212 | drm_mm_init(&dev_priv->vram, 0, prealloc_size); | 1209 | drm_mm_init(&dev_priv->vram, 0, prealloc_size); |
1213 | DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); | 1210 | DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); |
@@ -1257,6 +1254,8 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1257 | if (ret) | 1254 | if (ret) |
1258 | goto destroy_ringbuffer; | 1255 | goto destroy_ringbuffer; |
1259 | 1256 | ||
1257 | intel_modeset_init(dev); | ||
1258 | |||
1260 | ret = drm_irq_install(dev); | 1259 | ret = drm_irq_install(dev); |
1261 | if (ret) | 1260 | if (ret) |
1262 | goto destroy_ringbuffer; | 1261 | goto destroy_ringbuffer; |
@@ -1271,8 +1270,6 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1271 | 1270 | ||
1272 | I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); | 1271 | I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); |
1273 | 1272 | ||
1274 | intel_modeset_init(dev); | ||
1275 | |||
1276 | drm_helper_initial_config(dev); | 1273 | drm_helper_initial_config(dev); |
1277 | 1274 | ||
1278 | return 0; | 1275 | return 0; |
@@ -1360,7 +1357,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1360 | { | 1357 | { |
1361 | struct drm_i915_private *dev_priv = dev->dev_private; | 1358 | struct drm_i915_private *dev_priv = dev->dev_private; |
1362 | resource_size_t base, size; | 1359 | resource_size_t base, size; |
1363 | int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; | 1360 | int ret = 0, mmio_bar; |
1364 | uint32_t agp_size, prealloc_size, prealloc_start; | 1361 | uint32_t agp_size, prealloc_size, prealloc_start; |
1365 | 1362 | ||
1366 | /* i915 has 4 more counters */ | 1363 | /* i915 has 4 more counters */ |
@@ -1376,8 +1373,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1376 | 1373 | ||
1377 | dev->dev_private = (void *)dev_priv; | 1374 | dev->dev_private = (void *)dev_priv; |
1378 | dev_priv->dev = dev; | 1375 | dev_priv->dev = dev; |
1376 | dev_priv->info = (struct intel_device_info *) flags; | ||
1379 | 1377 | ||
1380 | /* Add register map (needed for suspend/resume) */ | 1378 | /* Add register map (needed for suspend/resume) */ |
1379 | mmio_bar = IS_I9XX(dev) ? 0 : 1; | ||
1381 | base = drm_get_resource_start(dev, mmio_bar); | 1380 | base = drm_get_resource_start(dev, mmio_bar); |
1382 | size = drm_get_resource_len(dev, mmio_bar); | 1381 | size = drm_get_resource_len(dev, mmio_bar); |
1383 | 1382 | ||
@@ -1652,6 +1651,7 @@ struct drm_ioctl_desc i915_ioctls[] = { | |||
1652 | DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1651 | DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1653 | DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1652 | DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1654 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), | 1653 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), |
1654 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH), | ||
1655 | DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | 1655 | DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), |
1656 | DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | 1656 | DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), |
1657 | DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), | 1657 | DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 24286ca168fc..2ffffd7ae09a 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include "i915_drm.h" | 33 | #include "i915_drm.h" |
34 | #include "i915_drv.h" | 34 | #include "i915_drv.h" |
35 | 35 | ||
36 | #include "drm_pciids.h" | ||
37 | #include <linux/console.h> | 36 | #include <linux/console.h> |
38 | #include "drm_crtc_helper.h" | 37 | #include "drm_crtc_helper.h" |
39 | 38 | ||
@@ -48,8 +47,124 @@ module_param_named(powersave, i915_powersave, int, 0400); | |||
48 | 47 | ||
49 | static struct drm_driver driver; | 48 | static struct drm_driver driver; |
50 | 49 | ||
51 | static struct pci_device_id pciidlist[] = { | 50 | #define INTEL_VGA_DEVICE(id, info) { \ |
52 | i915_PCI_IDS | 51 | .class = PCI_CLASS_DISPLAY_VGA << 8, \ |
52 | .class_mask = 0xffff00, \ | ||
53 | .vendor = 0x8086, \ | ||
54 | .device = id, \ | ||
55 | .subvendor = PCI_ANY_ID, \ | ||
56 | .subdevice = PCI_ANY_ID, \ | ||
57 | .driver_data = (unsigned long) info } | ||
58 | |||
59 | const static struct intel_device_info intel_i830_info = { | ||
60 | .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, | ||
61 | }; | ||
62 | |||
63 | const static struct intel_device_info intel_845g_info = { | ||
64 | .is_i8xx = 1, | ||
65 | }; | ||
66 | |||
67 | const static struct intel_device_info intel_i85x_info = { | ||
68 | .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, | ||
69 | }; | ||
70 | |||
71 | const static struct intel_device_info intel_i865g_info = { | ||
72 | .is_i8xx = 1, | ||
73 | }; | ||
74 | |||
75 | const static struct intel_device_info intel_i915g_info = { | ||
76 | .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, | ||
77 | }; | ||
78 | const static struct intel_device_info intel_i915gm_info = { | ||
79 | .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, | ||
80 | .cursor_needs_physical = 1, | ||
81 | }; | ||
82 | const static struct intel_device_info intel_i945g_info = { | ||
83 | .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, | ||
84 | }; | ||
85 | const static struct intel_device_info intel_i945gm_info = { | ||
86 | .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, | ||
87 | .has_hotplug = 1, .cursor_needs_physical = 1, | ||
88 | }; | ||
89 | |||
90 | const static struct intel_device_info intel_i965g_info = { | ||
91 | .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, | ||
92 | }; | ||
93 | |||
94 | const static struct intel_device_info intel_i965gm_info = { | ||
95 | .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1, | ||
96 | .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, | ||
97 | .has_hotplug = 1, | ||
98 | }; | ||
99 | |||
100 | const static struct intel_device_info intel_g33_info = { | ||
101 | .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, | ||
102 | .has_hotplug = 1, | ||
103 | }; | ||
104 | |||
105 | const static struct intel_device_info intel_g45_info = { | ||
106 | .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, | ||
107 | .has_pipe_cxsr = 1, | ||
108 | .has_hotplug = 1, | ||
109 | }; | ||
110 | |||
111 | const static struct intel_device_info intel_gm45_info = { | ||
112 | .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1, | ||
113 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, | ||
114 | .has_pipe_cxsr = 1, | ||
115 | .has_hotplug = 1, | ||
116 | }; | ||
117 | |||
118 | const static struct intel_device_info intel_pineview_info = { | ||
119 | .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, | ||
120 | .has_pipe_cxsr = 1, | ||
121 | .has_hotplug = 1, | ||
122 | }; | ||
123 | |||
124 | const static struct intel_device_info intel_ironlake_d_info = { | ||
125 | .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, | ||
126 | .has_pipe_cxsr = 1, | ||
127 | .has_hotplug = 1, | ||
128 | }; | ||
129 | |||
130 | const static struct intel_device_info intel_ironlake_m_info = { | ||
131 | .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, | ||
132 | .need_gfx_hws = 1, .has_rc6 = 1, | ||
133 | .has_hotplug = 1, | ||
134 | }; | ||
135 | |||
136 | const static struct pci_device_id pciidlist[] = { | ||
137 | INTEL_VGA_DEVICE(0x3577, &intel_i830_info), | ||
138 | INTEL_VGA_DEVICE(0x2562, &intel_845g_info), | ||
139 | INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), | ||
140 | INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info), | ||
141 | INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), | ||
142 | INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), | ||
143 | INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), | ||
144 | INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), | ||
145 | INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), | ||
146 | INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), | ||
147 | INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), | ||
148 | INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), | ||
149 | INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), | ||
150 | INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), | ||
151 | INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), | ||
152 | INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), | ||
153 | INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), | ||
154 | INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), | ||
155 | INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), | ||
156 | INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), | ||
157 | INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), | ||
158 | INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), | ||
159 | INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), | ||
160 | INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), | ||
161 | INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), | ||
162 | INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), | ||
163 | INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), | ||
164 | INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), | ||
165 | INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), | ||
166 | INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), | ||
167 | {0, 0, 0} | ||
53 | }; | 168 | }; |
54 | 169 | ||
55 | #if defined(CONFIG_DRM_I915_KMS) | 170 | #if defined(CONFIG_DRM_I915_KMS) |
@@ -284,6 +399,52 @@ i915_pci_resume(struct pci_dev *pdev) | |||
284 | return i915_resume(dev); | 399 | return i915_resume(dev); |
285 | } | 400 | } |
286 | 401 | ||
402 | static int | ||
403 | i915_pm_suspend(struct device *dev) | ||
404 | { | ||
405 | return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND); | ||
406 | } | ||
407 | |||
408 | static int | ||
409 | i915_pm_resume(struct device *dev) | ||
410 | { | ||
411 | return i915_pci_resume(to_pci_dev(dev)); | ||
412 | } | ||
413 | |||
414 | static int | ||
415 | i915_pm_freeze(struct device *dev) | ||
416 | { | ||
417 | return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE); | ||
418 | } | ||
419 | |||
420 | static int | ||
421 | i915_pm_thaw(struct device *dev) | ||
422 | { | ||
423 | /* thaw during hibernate, do nothing! */ | ||
424 | return 0; | ||
425 | } | ||
426 | |||
427 | static int | ||
428 | i915_pm_poweroff(struct device *dev) | ||
429 | { | ||
430 | return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE); | ||
431 | } | ||
432 | |||
433 | static int | ||
434 | i915_pm_restore(struct device *dev) | ||
435 | { | ||
436 | return i915_pci_resume(to_pci_dev(dev)); | ||
437 | } | ||
438 | |||
439 | const struct dev_pm_ops i915_pm_ops = { | ||
440 | .suspend = i915_pm_suspend, | ||
441 | .resume = i915_pm_resume, | ||
442 | .freeze = i915_pm_freeze, | ||
443 | .thaw = i915_pm_thaw, | ||
444 | .poweroff = i915_pm_poweroff, | ||
445 | .restore = i915_pm_restore, | ||
446 | }; | ||
447 | |||
287 | static struct vm_operations_struct i915_gem_vm_ops = { | 448 | static struct vm_operations_struct i915_gem_vm_ops = { |
288 | .fault = i915_gem_fault, | 449 | .fault = i915_gem_fault, |
289 | .open = drm_gem_vm_open, | 450 | .open = drm_gem_vm_open, |
@@ -303,8 +464,6 @@ static struct drm_driver driver = { | |||
303 | .lastclose = i915_driver_lastclose, | 464 | .lastclose = i915_driver_lastclose, |
304 | .preclose = i915_driver_preclose, | 465 | .preclose = i915_driver_preclose, |
305 | .postclose = i915_driver_postclose, | 466 | .postclose = i915_driver_postclose, |
306 | .suspend = i915_suspend, | ||
307 | .resume = i915_resume, | ||
308 | .device_is_agp = i915_driver_device_is_agp, | 467 | .device_is_agp = i915_driver_device_is_agp, |
309 | .enable_vblank = i915_enable_vblank, | 468 | .enable_vblank = i915_enable_vblank, |
310 | .disable_vblank = i915_disable_vblank, | 469 | .disable_vblank = i915_disable_vblank, |
@@ -344,10 +503,7 @@ static struct drm_driver driver = { | |||
344 | .id_table = pciidlist, | 503 | .id_table = pciidlist, |
345 | .probe = i915_pci_probe, | 504 | .probe = i915_pci_probe, |
346 | .remove = i915_pci_remove, | 505 | .remove = i915_pci_remove, |
347 | #ifdef CONFIG_PM | 506 | .driver.pm = &i915_pm_ops, |
348 | .resume = i915_pci_resume, | ||
349 | .suspend = i915_pci_suspend, | ||
350 | #endif | ||
351 | }, | 507 | }, |
352 | 508 | ||
353 | .name = DRIVER_NAME, | 509 | .name = DRIVER_NAME, |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index fbecac72f5bb..29dd67626967 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -172,9 +172,31 @@ struct drm_i915_display_funcs { | |||
172 | 172 | ||
173 | struct intel_overlay; | 173 | struct intel_overlay; |
174 | 174 | ||
175 | struct intel_device_info { | ||
176 | u8 is_mobile : 1; | ||
177 | u8 is_i8xx : 1; | ||
178 | u8 is_i915g : 1; | ||
179 | u8 is_i9xx : 1; | ||
180 | u8 is_i945gm : 1; | ||
181 | u8 is_i965g : 1; | ||
182 | u8 is_i965gm : 1; | ||
183 | u8 is_g33 : 1; | ||
184 | u8 need_gfx_hws : 1; | ||
185 | u8 is_g4x : 1; | ||
186 | u8 is_pineview : 1; | ||
187 | u8 is_ironlake : 1; | ||
188 | u8 has_fbc : 1; | ||
189 | u8 has_rc6 : 1; | ||
190 | u8 has_pipe_cxsr : 1; | ||
191 | u8 has_hotplug : 1; | ||
192 | u8 cursor_needs_physical : 1; | ||
193 | }; | ||
194 | |||
175 | typedef struct drm_i915_private { | 195 | typedef struct drm_i915_private { |
176 | struct drm_device *dev; | 196 | struct drm_device *dev; |
177 | 197 | ||
198 | const struct intel_device_info *info; | ||
199 | |||
178 | int has_gem; | 200 | int has_gem; |
179 | 201 | ||
180 | void __iomem *regs; | 202 | void __iomem *regs; |
@@ -232,8 +254,6 @@ typedef struct drm_i915_private { | |||
232 | int hangcheck_count; | 254 | int hangcheck_count; |
233 | uint32_t last_acthd; | 255 | uint32_t last_acthd; |
234 | 256 | ||
235 | bool cursor_needs_physical; | ||
236 | |||
237 | struct drm_mm vram; | 257 | struct drm_mm vram; |
238 | 258 | ||
239 | unsigned long cfb_size; | 259 | unsigned long cfb_size; |
@@ -287,8 +307,6 @@ typedef struct drm_i915_private { | |||
287 | u32 saveDSPACNTR; | 307 | u32 saveDSPACNTR; |
288 | u32 saveDSPBCNTR; | 308 | u32 saveDSPBCNTR; |
289 | u32 saveDSPARB; | 309 | u32 saveDSPARB; |
290 | u32 saveRENDERSTANDBY; | ||
291 | u32 savePWRCTXA; | ||
292 | u32 saveHWS; | 310 | u32 saveHWS; |
293 | u32 savePIPEACONF; | 311 | u32 savePIPEACONF; |
294 | u32 savePIPEBCONF; | 312 | u32 savePIPEBCONF; |
@@ -561,6 +579,7 @@ typedef struct drm_i915_private { | |||
561 | u16 orig_clock; | 579 | u16 orig_clock; |
562 | int child_dev_num; | 580 | int child_dev_num; |
563 | struct child_device_config *child_dev; | 581 | struct child_device_config *child_dev; |
582 | struct drm_connector *int_lvds_connector; | ||
564 | } drm_i915_private_t; | 583 | } drm_i915_private_t; |
565 | 584 | ||
566 | /** driver private structure attached to each drm_gem_object */ | 585 | /** driver private structure attached to each drm_gem_object */ |
@@ -794,6 +813,8 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
794 | struct drm_file *file_priv); | 813 | struct drm_file *file_priv); |
795 | int i915_gem_execbuffer(struct drm_device *dev, void *data, | 814 | int i915_gem_execbuffer(struct drm_device *dev, void *data, |
796 | struct drm_file *file_priv); | 815 | struct drm_file *file_priv); |
816 | int i915_gem_execbuffer2(struct drm_device *dev, void *data, | ||
817 | struct drm_file *file_priv); | ||
797 | int i915_gem_pin_ioctl(struct drm_device *dev, void *data, | 818 | int i915_gem_pin_ioctl(struct drm_device *dev, void *data, |
798 | struct drm_file *file_priv); | 819 | struct drm_file *file_priv); |
799 | int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | 820 | int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, |
@@ -860,6 +881,9 @@ void i915_gem_shrinker_exit(void); | |||
860 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); | 881 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
861 | void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); | 882 | void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); |
862 | void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); | 883 | void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); |
884 | bool i915_tiling_ok(struct drm_device *dev, int stride, int size, | ||
885 | int tiling_mode); | ||
886 | bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj); | ||
863 | 887 | ||
864 | /* i915_gem_debug.c */ | 888 | /* i915_gem_debug.c */ |
865 | void i915_gem_dump_object(struct drm_gem_object *obj, int len, | 889 | void i915_gem_dump_object(struct drm_gem_object *obj, int len, |
@@ -982,67 +1006,33 @@ extern void g4x_disable_fbc(struct drm_device *dev); | |||
982 | extern int i915_wrap_ring(struct drm_device * dev); | 1006 | extern int i915_wrap_ring(struct drm_device * dev); |
983 | extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | 1007 | extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); |
984 | 1008 | ||
985 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) | 1009 | #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) |
986 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) | 1010 | |
987 | #define IS_I85X(dev) ((dev)->pci_device == 0x3582) | 1011 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) |
988 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) | 1012 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) |
989 | #define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev)) | 1013 | #define IS_I85X(dev) ((dev)->pci_device == 0x3582) |
990 | 1014 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) | |
991 | #define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) | 1015 | #define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx) |
992 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) | 1016 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) |
993 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) | 1017 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) |
994 | #define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\ | 1018 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) |
995 | (dev)->pci_device == 0x27AE) | 1019 | #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) |
996 | #define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ | 1020 | #define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g) |
997 | (dev)->pci_device == 0x2982 || \ | 1021 | #define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm) |
998 | (dev)->pci_device == 0x2992 || \ | 1022 | #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) |
999 | (dev)->pci_device == 0x29A2 || \ | 1023 | #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) |
1000 | (dev)->pci_device == 0x2A02 || \ | 1024 | #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) |
1001 | (dev)->pci_device == 0x2A12 || \ | 1025 | #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) |
1002 | (dev)->pci_device == 0x2A42 || \ | 1026 | #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) |
1003 | (dev)->pci_device == 0x2E02 || \ | 1027 | #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) |
1004 | (dev)->pci_device == 0x2E12 || \ | ||
1005 | (dev)->pci_device == 0x2E22 || \ | ||
1006 | (dev)->pci_device == 0x2E32 || \ | ||
1007 | (dev)->pci_device == 0x2E42 || \ | ||
1008 | (dev)->pci_device == 0x0042 || \ | ||
1009 | (dev)->pci_device == 0x0046) | ||
1010 | |||
1011 | #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \ | ||
1012 | (dev)->pci_device == 0x2A12) | ||
1013 | |||
1014 | #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) | ||
1015 | |||
1016 | #define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ | ||
1017 | (dev)->pci_device == 0x2E12 || \ | ||
1018 | (dev)->pci_device == 0x2E22 || \ | ||
1019 | (dev)->pci_device == 0x2E32 || \ | ||
1020 | (dev)->pci_device == 0x2E42 || \ | ||
1021 | IS_GM45(dev)) | ||
1022 | |||
1023 | #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) | ||
1024 | #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) | ||
1025 | #define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev)) | ||
1026 | |||
1027 | #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ | ||
1028 | (dev)->pci_device == 0x29B2 || \ | ||
1029 | (dev)->pci_device == 0x29D2 || \ | ||
1030 | (IS_PINEVIEW(dev))) | ||
1031 | |||
1032 | #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) | 1028 | #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) |
1033 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) | 1029 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) |
1034 | #define IS_IRONLAKE(dev) (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev)) | 1030 | #define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) |
1035 | 1031 | #define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) | |
1036 | #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ | 1032 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) |
1037 | IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \ | ||
1038 | IS_IRONLAKE(dev)) | ||
1039 | 1033 | ||
1040 | #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ | 1034 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
1041 | IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \ | ||
1042 | IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev)) | ||
1043 | 1035 | ||
1044 | #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \ | ||
1045 | IS_IRONLAKE(dev)) | ||
1046 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte | 1036 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte |
1047 | * rows, which changed the alignment requirements and fence programming. | 1037 | * rows, which changed the alignment requirements and fence programming. |
1048 | */ | 1038 | */ |
@@ -1054,17 +1044,14 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
1054 | #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) | 1044 | #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) |
1055 | #define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ | 1045 | #define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ |
1056 | !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) | 1046 | !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) |
1057 | #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev)) | 1047 | #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) |
1058 | /* dsparb controlled by hw only */ | 1048 | /* dsparb controlled by hw only */ |
1059 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) | 1049 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) |
1060 | 1050 | ||
1061 | #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev)) | 1051 | #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev)) |
1062 | #define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) | 1052 | #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) |
1063 | #define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \ | 1053 | #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) |
1064 | (IS_I9XX(dev) || IS_GM45(dev)) && \ | 1054 | #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) |
1065 | !IS_PINEVIEW(dev) && \ | ||
1066 | !IS_IRONLAKE(dev)) | ||
1067 | #define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev)) | ||
1068 | 1055 | ||
1069 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | 1056 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) |
1070 | 1057 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8c463cf2050a..21950ef987c7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2021,9 +2021,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2021 | /* blow away mappings if mapped through GTT */ | 2021 | /* blow away mappings if mapped through GTT */ |
2022 | i915_gem_release_mmap(obj); | 2022 | i915_gem_release_mmap(obj); |
2023 | 2023 | ||
2024 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
2025 | i915_gem_clear_fence_reg(obj); | ||
2026 | |||
2027 | /* Move the object to the CPU domain to ensure that | 2024 | /* Move the object to the CPU domain to ensure that |
2028 | * any possible CPU writes while it's not in the GTT | 2025 | * any possible CPU writes while it's not in the GTT |
2029 | * are flushed when we go to remap it. This will | 2026 | * are flushed when we go to remap it. This will |
@@ -2039,6 +2036,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2039 | 2036 | ||
2040 | BUG_ON(obj_priv->active); | 2037 | BUG_ON(obj_priv->active); |
2041 | 2038 | ||
2039 | /* release the fence reg _after_ flushing */ | ||
2040 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
2041 | i915_gem_clear_fence_reg(obj); | ||
2042 | |||
2042 | if (obj_priv->agp_mem != NULL) { | 2043 | if (obj_priv->agp_mem != NULL) { |
2043 | drm_unbind_agp(obj_priv->agp_mem); | 2044 | drm_unbind_agp(obj_priv->agp_mem); |
2044 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); | 2045 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); |
@@ -2581,9 +2582,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2581 | bool retry_alloc = false; | 2582 | bool retry_alloc = false; |
2582 | int ret; | 2583 | int ret; |
2583 | 2584 | ||
2584 | if (dev_priv->mm.suspended) | ||
2585 | return -EBUSY; | ||
2586 | |||
2587 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 2585 | if (obj_priv->madv != I915_MADV_WILLNEED) { |
2588 | DRM_ERROR("Attempting to bind a purgeable object\n"); | 2586 | DRM_ERROR("Attempting to bind a purgeable object\n"); |
2589 | return -EINVAL; | 2587 | return -EINVAL; |
@@ -3198,7 +3196,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
3198 | static int | 3196 | static int |
3199 | i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | 3197 | i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, |
3200 | struct drm_file *file_priv, | 3198 | struct drm_file *file_priv, |
3201 | struct drm_i915_gem_exec_object *entry, | 3199 | struct drm_i915_gem_exec_object2 *entry, |
3202 | struct drm_i915_gem_relocation_entry *relocs) | 3200 | struct drm_i915_gem_relocation_entry *relocs) |
3203 | { | 3201 | { |
3204 | struct drm_device *dev = obj->dev; | 3202 | struct drm_device *dev = obj->dev; |
@@ -3206,12 +3204,35 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3206 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3204 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
3207 | int i, ret; | 3205 | int i, ret; |
3208 | void __iomem *reloc_page; | 3206 | void __iomem *reloc_page; |
3207 | bool need_fence; | ||
3208 | |||
3209 | need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
3210 | obj_priv->tiling_mode != I915_TILING_NONE; | ||
3211 | |||
3212 | /* Check fence reg constraints and rebind if necessary */ | ||
3213 | if (need_fence && !i915_obj_fenceable(dev, obj)) | ||
3214 | i915_gem_object_unbind(obj); | ||
3209 | 3215 | ||
3210 | /* Choose the GTT offset for our buffer and put it there. */ | 3216 | /* Choose the GTT offset for our buffer and put it there. */ |
3211 | ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); | 3217 | ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); |
3212 | if (ret) | 3218 | if (ret) |
3213 | return ret; | 3219 | return ret; |
3214 | 3220 | ||
3221 | /* | ||
3222 | * Pre-965 chips need a fence register set up in order to | ||
3223 | * properly handle blits to/from tiled surfaces. | ||
3224 | */ | ||
3225 | if (need_fence) { | ||
3226 | ret = i915_gem_object_get_fence_reg(obj); | ||
3227 | if (ret != 0) { | ||
3228 | if (ret != -EBUSY && ret != -ERESTARTSYS) | ||
3229 | DRM_ERROR("Failure to install fence: %d\n", | ||
3230 | ret); | ||
3231 | i915_gem_object_unpin(obj); | ||
3232 | return ret; | ||
3233 | } | ||
3234 | } | ||
3235 | |||
3215 | entry->offset = obj_priv->gtt_offset; | 3236 | entry->offset = obj_priv->gtt_offset; |
3216 | 3237 | ||
3217 | /* Apply the relocations, using the GTT aperture to avoid cache | 3238 | /* Apply the relocations, using the GTT aperture to avoid cache |
@@ -3373,7 +3394,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3373 | */ | 3394 | */ |
3374 | static int | 3395 | static int |
3375 | i915_dispatch_gem_execbuffer(struct drm_device *dev, | 3396 | i915_dispatch_gem_execbuffer(struct drm_device *dev, |
3376 | struct drm_i915_gem_execbuffer *exec, | 3397 | struct drm_i915_gem_execbuffer2 *exec, |
3377 | struct drm_clip_rect *cliprects, | 3398 | struct drm_clip_rect *cliprects, |
3378 | uint64_t exec_offset) | 3399 | uint64_t exec_offset) |
3379 | { | 3400 | { |
@@ -3463,7 +3484,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) | |||
3463 | } | 3484 | } |
3464 | 3485 | ||
3465 | static int | 3486 | static int |
3466 | i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, | 3487 | i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list, |
3467 | uint32_t buffer_count, | 3488 | uint32_t buffer_count, |
3468 | struct drm_i915_gem_relocation_entry **relocs) | 3489 | struct drm_i915_gem_relocation_entry **relocs) |
3469 | { | 3490 | { |
@@ -3478,8 +3499,10 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, | |||
3478 | } | 3499 | } |
3479 | 3500 | ||
3480 | *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); | 3501 | *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); |
3481 | if (*relocs == NULL) | 3502 | if (*relocs == NULL) { |
3503 | DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count); | ||
3482 | return -ENOMEM; | 3504 | return -ENOMEM; |
3505 | } | ||
3483 | 3506 | ||
3484 | for (i = 0; i < buffer_count; i++) { | 3507 | for (i = 0; i < buffer_count; i++) { |
3485 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 3508 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
@@ -3503,7 +3526,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, | |||
3503 | } | 3526 | } |
3504 | 3527 | ||
3505 | static int | 3528 | static int |
3506 | i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, | 3529 | i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, |
3507 | uint32_t buffer_count, | 3530 | uint32_t buffer_count, |
3508 | struct drm_i915_gem_relocation_entry *relocs) | 3531 | struct drm_i915_gem_relocation_entry *relocs) |
3509 | { | 3532 | { |
@@ -3536,7 +3559,7 @@ err: | |||
3536 | } | 3559 | } |
3537 | 3560 | ||
3538 | static int | 3561 | static int |
3539 | i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec, | 3562 | i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, |
3540 | uint64_t exec_offset) | 3563 | uint64_t exec_offset) |
3541 | { | 3564 | { |
3542 | uint32_t exec_start, exec_len; | 3565 | uint32_t exec_start, exec_len; |
@@ -3589,18 +3612,18 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev, | |||
3589 | } | 3612 | } |
3590 | 3613 | ||
3591 | int | 3614 | int |
3592 | i915_gem_execbuffer(struct drm_device *dev, void *data, | 3615 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, |
3593 | struct drm_file *file_priv) | 3616 | struct drm_file *file_priv, |
3617 | struct drm_i915_gem_execbuffer2 *args, | ||
3618 | struct drm_i915_gem_exec_object2 *exec_list) | ||
3594 | { | 3619 | { |
3595 | drm_i915_private_t *dev_priv = dev->dev_private; | 3620 | drm_i915_private_t *dev_priv = dev->dev_private; |
3596 | struct drm_i915_gem_execbuffer *args = data; | ||
3597 | struct drm_i915_gem_exec_object *exec_list = NULL; | ||
3598 | struct drm_gem_object **object_list = NULL; | 3621 | struct drm_gem_object **object_list = NULL; |
3599 | struct drm_gem_object *batch_obj; | 3622 | struct drm_gem_object *batch_obj; |
3600 | struct drm_i915_gem_object *obj_priv; | 3623 | struct drm_i915_gem_object *obj_priv; |
3601 | struct drm_clip_rect *cliprects = NULL; | 3624 | struct drm_clip_rect *cliprects = NULL; |
3602 | struct drm_i915_gem_relocation_entry *relocs; | 3625 | struct drm_i915_gem_relocation_entry *relocs; |
3603 | int ret, ret2, i, pinned = 0; | 3626 | int ret = 0, ret2, i, pinned = 0; |
3604 | uint64_t exec_offset; | 3627 | uint64_t exec_offset; |
3605 | uint32_t seqno, flush_domains, reloc_index; | 3628 | uint32_t seqno, flush_domains, reloc_index; |
3606 | int pin_tries, flips; | 3629 | int pin_tries, flips; |
@@ -3614,25 +3637,13 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3614 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | 3637 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); |
3615 | return -EINVAL; | 3638 | return -EINVAL; |
3616 | } | 3639 | } |
3617 | /* Copy in the exec list from userland */ | ||
3618 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); | ||
3619 | object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); | 3640 | object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); |
3620 | if (exec_list == NULL || object_list == NULL) { | 3641 | if (object_list == NULL) { |
3621 | DRM_ERROR("Failed to allocate exec or object list " | 3642 | DRM_ERROR("Failed to allocate object list for %d buffers\n", |
3622 | "for %d buffers\n", | ||
3623 | args->buffer_count); | 3643 | args->buffer_count); |
3624 | ret = -ENOMEM; | 3644 | ret = -ENOMEM; |
3625 | goto pre_mutex_err; | 3645 | goto pre_mutex_err; |
3626 | } | 3646 | } |
3627 | ret = copy_from_user(exec_list, | ||
3628 | (struct drm_i915_relocation_entry __user *) | ||
3629 | (uintptr_t) args->buffers_ptr, | ||
3630 | sizeof(*exec_list) * args->buffer_count); | ||
3631 | if (ret != 0) { | ||
3632 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
3633 | args->buffer_count, ret); | ||
3634 | goto pre_mutex_err; | ||
3635 | } | ||
3636 | 3647 | ||
3637 | if (args->num_cliprects != 0) { | 3648 | if (args->num_cliprects != 0) { |
3638 | cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), | 3649 | cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), |
@@ -3884,20 +3895,6 @@ err: | |||
3884 | 3895 | ||
3885 | mutex_unlock(&dev->struct_mutex); | 3896 | mutex_unlock(&dev->struct_mutex); |
3886 | 3897 | ||
3887 | if (!ret) { | ||
3888 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
3889 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
3890 | (uintptr_t) args->buffers_ptr, | ||
3891 | exec_list, | ||
3892 | sizeof(*exec_list) * args->buffer_count); | ||
3893 | if (ret) { | ||
3894 | ret = -EFAULT; | ||
3895 | DRM_ERROR("failed to copy %d exec entries " | ||
3896 | "back to user (%d)\n", | ||
3897 | args->buffer_count, ret); | ||
3898 | } | ||
3899 | } | ||
3900 | |||
3901 | /* Copy the updated relocations out regardless of current error | 3898 | /* Copy the updated relocations out regardless of current error |
3902 | * state. Failure to update the relocs would mean that the next | 3899 | * state. Failure to update the relocs would mean that the next |
3903 | * time userland calls execbuf, it would do so with presumed offset | 3900 | * time userland calls execbuf, it would do so with presumed offset |
@@ -3914,12 +3911,158 @@ err: | |||
3914 | 3911 | ||
3915 | pre_mutex_err: | 3912 | pre_mutex_err: |
3916 | drm_free_large(object_list); | 3913 | drm_free_large(object_list); |
3917 | drm_free_large(exec_list); | ||
3918 | kfree(cliprects); | 3914 | kfree(cliprects); |
3919 | 3915 | ||
3920 | return ret; | 3916 | return ret; |
3921 | } | 3917 | } |
3922 | 3918 | ||
3919 | /* | ||
3920 | * Legacy execbuffer just creates an exec2 list from the original exec object | ||
3921 | * list array and passes it to the real function. | ||
3922 | */ | ||
3923 | int | ||
3924 | i915_gem_execbuffer(struct drm_device *dev, void *data, | ||
3925 | struct drm_file *file_priv) | ||
3926 | { | ||
3927 | struct drm_i915_gem_execbuffer *args = data; | ||
3928 | struct drm_i915_gem_execbuffer2 exec2; | ||
3929 | struct drm_i915_gem_exec_object *exec_list = NULL; | ||
3930 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | ||
3931 | int ret, i; | ||
3932 | |||
3933 | #if WATCH_EXEC | ||
3934 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
3935 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
3936 | #endif | ||
3937 | |||
3938 | if (args->buffer_count < 1) { | ||
3939 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | ||
3940 | return -EINVAL; | ||
3941 | } | ||
3942 | |||
3943 | /* Copy in the exec list from userland */ | ||
3944 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); | ||
3945 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | ||
3946 | if (exec_list == NULL || exec2_list == NULL) { | ||
3947 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | ||
3948 | args->buffer_count); | ||
3949 | drm_free_large(exec_list); | ||
3950 | drm_free_large(exec2_list); | ||
3951 | return -ENOMEM; | ||
3952 | } | ||
3953 | ret = copy_from_user(exec_list, | ||
3954 | (struct drm_i915_relocation_entry __user *) | ||
3955 | (uintptr_t) args->buffers_ptr, | ||
3956 | sizeof(*exec_list) * args->buffer_count); | ||
3957 | if (ret != 0) { | ||
3958 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
3959 | args->buffer_count, ret); | ||
3960 | drm_free_large(exec_list); | ||
3961 | drm_free_large(exec2_list); | ||
3962 | return -EFAULT; | ||
3963 | } | ||
3964 | |||
3965 | for (i = 0; i < args->buffer_count; i++) { | ||
3966 | exec2_list[i].handle = exec_list[i].handle; | ||
3967 | exec2_list[i].relocation_count = exec_list[i].relocation_count; | ||
3968 | exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; | ||
3969 | exec2_list[i].alignment = exec_list[i].alignment; | ||
3970 | exec2_list[i].offset = exec_list[i].offset; | ||
3971 | if (!IS_I965G(dev)) | ||
3972 | exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; | ||
3973 | else | ||
3974 | exec2_list[i].flags = 0; | ||
3975 | } | ||
3976 | |||
3977 | exec2.buffers_ptr = args->buffers_ptr; | ||
3978 | exec2.buffer_count = args->buffer_count; | ||
3979 | exec2.batch_start_offset = args->batch_start_offset; | ||
3980 | exec2.batch_len = args->batch_len; | ||
3981 | exec2.DR1 = args->DR1; | ||
3982 | exec2.DR4 = args->DR4; | ||
3983 | exec2.num_cliprects = args->num_cliprects; | ||
3984 | exec2.cliprects_ptr = args->cliprects_ptr; | ||
3985 | exec2.flags = 0; | ||
3986 | |||
3987 | ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); | ||
3988 | if (!ret) { | ||
3989 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
3990 | for (i = 0; i < args->buffer_count; i++) | ||
3991 | exec_list[i].offset = exec2_list[i].offset; | ||
3992 | /* ... and back out to userspace */ | ||
3993 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
3994 | (uintptr_t) args->buffers_ptr, | ||
3995 | exec_list, | ||
3996 | sizeof(*exec_list) * args->buffer_count); | ||
3997 | if (ret) { | ||
3998 | ret = -EFAULT; | ||
3999 | DRM_ERROR("failed to copy %d exec entries " | ||
4000 | "back to user (%d)\n", | ||
4001 | args->buffer_count, ret); | ||
4002 | } | ||
4003 | } else { | ||
4004 | DRM_ERROR("i915_gem_do_execbuffer returns %d\n", ret); | ||
4005 | } | ||
4006 | |||
4007 | drm_free_large(exec_list); | ||
4008 | drm_free_large(exec2_list); | ||
4009 | return ret; | ||
4010 | } | ||
4011 | |||
4012 | int | ||
4013 | i915_gem_execbuffer2(struct drm_device *dev, void *data, | ||
4014 | struct drm_file *file_priv) | ||
4015 | { | ||
4016 | struct drm_i915_gem_execbuffer2 *args = data; | ||
4017 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | ||
4018 | int ret; | ||
4019 | |||
4020 | #if WATCH_EXEC | ||
4021 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
4022 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
4023 | #endif | ||
4024 | |||
4025 | if (args->buffer_count < 1) { | ||
4026 | DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); | ||
4027 | return -EINVAL; | ||
4028 | } | ||
4029 | |||
4030 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | ||
4031 | if (exec2_list == NULL) { | ||
4032 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | ||
4033 | args->buffer_count); | ||
4034 | return -ENOMEM; | ||
4035 | } | ||
4036 | ret = copy_from_user(exec2_list, | ||
4037 | (struct drm_i915_relocation_entry __user *) | ||
4038 | (uintptr_t) args->buffers_ptr, | ||
4039 | sizeof(*exec2_list) * args->buffer_count); | ||
4040 | if (ret != 0) { | ||
4041 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
4042 | args->buffer_count, ret); | ||
4043 | drm_free_large(exec2_list); | ||
4044 | return -EFAULT; | ||
4045 | } | ||
4046 | |||
4047 | ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list); | ||
4048 | if (!ret) { | ||
4049 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
4050 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
4051 | (uintptr_t) args->buffers_ptr, | ||
4052 | exec2_list, | ||
4053 | sizeof(*exec2_list) * args->buffer_count); | ||
4054 | if (ret) { | ||
4055 | ret = -EFAULT; | ||
4056 | DRM_ERROR("failed to copy %d exec entries " | ||
4057 | "back to user (%d)\n", | ||
4058 | args->buffer_count, ret); | ||
4059 | } | ||
4060 | } | ||
4061 | |||
4062 | drm_free_large(exec2_list); | ||
4063 | return ret; | ||
4064 | } | ||
4065 | |||
3923 | int | 4066 | int |
3924 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | 4067 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) |
3925 | { | 4068 | { |
@@ -3933,19 +4076,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | |||
3933 | if (ret) | 4076 | if (ret) |
3934 | return ret; | 4077 | return ret; |
3935 | } | 4078 | } |
3936 | /* | 4079 | |
3937 | * Pre-965 chips need a fence register set up in order to | ||
3938 | * properly handle tiled surfaces. | ||
3939 | */ | ||
3940 | if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) { | ||
3941 | ret = i915_gem_object_get_fence_reg(obj); | ||
3942 | if (ret != 0) { | ||
3943 | if (ret != -EBUSY && ret != -ERESTARTSYS) | ||
3944 | DRM_ERROR("Failure to install fence: %d\n", | ||
3945 | ret); | ||
3946 | return ret; | ||
3947 | } | ||
3948 | } | ||
3949 | obj_priv->pin_count++; | 4080 | obj_priv->pin_count++; |
3950 | 4081 | ||
3951 | /* If the object is not active and not pending a flush, | 4082 | /* If the object is not active and not pending a flush, |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 30d6af6c09bb..df278b2685bf 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -304,35 +304,39 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
304 | 304 | ||
305 | 305 | ||
306 | /** | 306 | /** |
307 | * Returns the size of the fence for a tiled object of the given size. | 307 | * Returns whether an object is currently fenceable. If not, it may need |
308 | * to be unbound and have its pitch adjusted. | ||
308 | */ | 309 | */ |
309 | static int | 310 | bool |
310 | i915_get_fence_size(struct drm_device *dev, int size) | 311 | i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj) |
311 | { | 312 | { |
312 | int i; | 313 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
313 | int start; | ||
314 | 314 | ||
315 | if (IS_I965G(dev)) { | 315 | if (IS_I965G(dev)) { |
316 | /* The 965 can have fences at any page boundary. */ | 316 | /* The 965 can have fences at any page boundary. */ |
317 | return ALIGN(size, 4096); | 317 | if (obj->size & 4095) |
318 | return false; | ||
319 | return true; | ||
320 | } else if (IS_I9XX(dev)) { | ||
321 | if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) | ||
322 | return false; | ||
318 | } else { | 323 | } else { |
319 | /* Align the size to a power of two greater than the smallest | 324 | if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) |
320 | * fence size. | 325 | return false; |
321 | */ | 326 | } |
322 | if (IS_I9XX(dev)) | ||
323 | start = 1024 * 1024; | ||
324 | else | ||
325 | start = 512 * 1024; | ||
326 | 327 | ||
327 | for (i = start; i < size; i <<= 1) | 328 | /* Power of two sized... */ |
328 | ; | 329 | if (obj->size & (obj->size - 1)) |
330 | return false; | ||
329 | 331 | ||
330 | return i; | 332 | /* Objects must be size aligned as well */ |
331 | } | 333 | if (obj_priv->gtt_offset & (obj->size - 1)) |
334 | return false; | ||
335 | return true; | ||
332 | } | 336 | } |
333 | 337 | ||
334 | /* Check pitch constriants for all chips & tiling formats */ | 338 | /* Check pitch constriants for all chips & tiling formats */ |
335 | static bool | 339 | bool |
336 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | 340 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) |
337 | { | 341 | { |
338 | int tile_width; | 342 | int tile_width; |
@@ -384,12 +388,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | |||
384 | if (stride & (stride - 1)) | 388 | if (stride & (stride - 1)) |
385 | return false; | 389 | return false; |
386 | 390 | ||
387 | /* We don't 0handle the aperture area covered by the fence being bigger | ||
388 | * than the object size. | ||
389 | */ | ||
390 | if (i915_get_fence_size(dev, size) != size) | ||
391 | return false; | ||
392 | |||
393 | return true; | 391 | return true; |
394 | } | 392 | } |
395 | 393 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 85f4c5de97e2..7cd8110051b6 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -313,6 +313,8 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
313 | dev_priv->mm.irq_gem_seqno = seqno; | 313 | dev_priv->mm.irq_gem_seqno = seqno; |
314 | trace_i915_gem_request_complete(dev, seqno); | 314 | trace_i915_gem_request_complete(dev, seqno); |
315 | DRM_WAKEUP(&dev_priv->irq_queue); | 315 | DRM_WAKEUP(&dev_priv->irq_queue); |
316 | dev_priv->hangcheck_count = 0; | ||
317 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | ||
316 | } | 318 | } |
317 | 319 | ||
318 | if (de_iir & DE_GSE) | 320 | if (de_iir & DE_GSE) |
@@ -1084,6 +1086,10 @@ void i915_driver_irq_preinstall(struct drm_device * dev) | |||
1084 | (void) I915_READ(IER); | 1086 | (void) I915_READ(IER); |
1085 | } | 1087 | } |
1086 | 1088 | ||
1089 | /* | ||
1090 | * Must be called after intel_modeset_init or hotplug interrupts won't be | ||
1091 | * enabled correctly. | ||
1092 | */ | ||
1087 | int i915_driver_irq_postinstall(struct drm_device *dev) | 1093 | int i915_driver_irq_postinstall(struct drm_device *dev) |
1088 | { | 1094 | { |
1089 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1095 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
@@ -1106,19 +1112,23 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1106 | if (I915_HAS_HOTPLUG(dev)) { | 1112 | if (I915_HAS_HOTPLUG(dev)) { |
1107 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | 1113 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); |
1108 | 1114 | ||
1109 | /* Leave other bits alone */ | 1115 | /* Note HDMI and DP share bits */ |
1110 | hotplug_en |= HOTPLUG_EN_MASK; | 1116 | if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) |
1117 | hotplug_en |= HDMIB_HOTPLUG_INT_EN; | ||
1118 | if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) | ||
1119 | hotplug_en |= HDMIC_HOTPLUG_INT_EN; | ||
1120 | if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) | ||
1121 | hotplug_en |= HDMID_HOTPLUG_INT_EN; | ||
1122 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) | ||
1123 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; | ||
1124 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) | ||
1125 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; | ||
1126 | if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) | ||
1127 | hotplug_en |= CRT_HOTPLUG_INT_EN; | ||
1128 | /* Ignore TV since it's buggy */ | ||
1129 | |||
1111 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | 1130 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); |
1112 | 1131 | ||
1113 | dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS | | ||
1114 | TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS | | ||
1115 | SDVOB_HOTPLUG_INT_STATUS; | ||
1116 | if (IS_G4X(dev)) { | ||
1117 | dev_priv->hotplug_supported_mask |= | ||
1118 | HDMIB_HOTPLUG_INT_STATUS | | ||
1119 | HDMIC_HOTPLUG_INT_STATUS | | ||
1120 | HDMID_HOTPLUG_INT_STATUS; | ||
1121 | } | ||
1122 | /* Enable in IER... */ | 1132 | /* Enable in IER... */ |
1123 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | 1133 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; |
1124 | /* and unmask in IMR */ | 1134 | /* and unmask in IMR */ |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 974b3cf70618..149d360d64a3 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -879,13 +879,6 @@ | |||
879 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) | 879 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) |
880 | #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ | 880 | #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ |
881 | #define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f | 881 | #define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f |
882 | #define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \ | ||
883 | HDMIC_HOTPLUG_INT_EN | \ | ||
884 | HDMID_HOTPLUG_INT_EN | \ | ||
885 | SDVOB_HOTPLUG_INT_EN | \ | ||
886 | SDVOC_HOTPLUG_INT_EN | \ | ||
887 | CRT_HOTPLUG_INT_EN) | ||
888 | |||
889 | 882 | ||
890 | #define PORT_HOTPLUG_STAT 0x61114 | 883 | #define PORT_HOTPLUG_STAT 0x61114 |
891 | #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) | 884 | #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) |
@@ -982,6 +975,8 @@ | |||
982 | #define LVDS_PORT_EN (1 << 31) | 975 | #define LVDS_PORT_EN (1 << 31) |
983 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ | 976 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ |
984 | #define LVDS_PIPEB_SELECT (1 << 30) | 977 | #define LVDS_PIPEB_SELECT (1 << 30) |
978 | /* LVDS dithering flag on 965/g4x platform */ | ||
979 | #define LVDS_ENABLE_DITHER (1 << 25) | ||
985 | /* Enable border for unscaled (or aspect-scaled) display */ | 980 | /* Enable border for unscaled (or aspect-scaled) display */ |
986 | #define LVDS_BORDER_ENABLE (1 << 15) | 981 | #define LVDS_BORDER_ENABLE (1 << 15) |
987 | /* | 982 | /* |
@@ -1751,6 +1746,8 @@ | |||
1751 | 1746 | ||
1752 | /* Display & cursor control */ | 1747 | /* Display & cursor control */ |
1753 | 1748 | ||
1749 | /* dithering flag on Ironlake */ | ||
1750 | #define PIPE_ENABLE_DITHER (1 << 4) | ||
1754 | /* Pipe A */ | 1751 | /* Pipe A */ |
1755 | #define PIPEADSL 0x70000 | 1752 | #define PIPEADSL 0x70000 |
1756 | #define PIPEACONF 0x70008 | 1753 | #define PIPEACONF 0x70008 |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index d5ebb00a9d49..a3b90c9561dc 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -732,12 +732,6 @@ int i915_save_state(struct drm_device *dev) | |||
732 | 732 | ||
733 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); | 733 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); |
734 | 734 | ||
735 | /* Render Standby */ | ||
736 | if (I915_HAS_RC6(dev)) { | ||
737 | dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); | ||
738 | dev_priv->savePWRCTXA = I915_READ(PWRCTXA); | ||
739 | } | ||
740 | |||
741 | /* Hardware status page */ | 735 | /* Hardware status page */ |
742 | dev_priv->saveHWS = I915_READ(HWS_PGA); | 736 | dev_priv->saveHWS = I915_READ(HWS_PGA); |
743 | 737 | ||
@@ -793,12 +787,6 @@ int i915_restore_state(struct drm_device *dev) | |||
793 | 787 | ||
794 | pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); | 788 | pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); |
795 | 789 | ||
796 | /* Render Standby */ | ||
797 | if (I915_HAS_RC6(dev)) { | ||
798 | I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); | ||
799 | I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA); | ||
800 | } | ||
801 | |||
802 | /* Hardware status page */ | 790 | /* Hardware status page */ |
803 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); | 791 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); |
804 | 792 | ||
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 9f3d3e563414..ddefc871edfe 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -548,4 +548,6 @@ void intel_crt_init(struct drm_device *dev) | |||
548 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); | 548 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); |
549 | 549 | ||
550 | drm_sysfs_connector_add(connector); | 550 | drm_sysfs_connector_add(connector); |
551 | |||
552 | dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; | ||
551 | } | 553 | } |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 52cd9b006da2..002612fae717 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -262,6 +262,14 @@ struct intel_limit { | |||
262 | #define IRONLAKE_P2_LVDS_FAST 7 /* double channel */ | 262 | #define IRONLAKE_P2_LVDS_FAST 7 /* double channel */ |
263 | #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ | 263 | #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ |
264 | 264 | ||
265 | #define IRONLAKE_P_DISPLAY_PORT_MIN 10 | ||
266 | #define IRONLAKE_P_DISPLAY_PORT_MAX 20 | ||
267 | #define IRONLAKE_P2_DISPLAY_PORT_FAST 10 | ||
268 | #define IRONLAKE_P2_DISPLAY_PORT_SLOW 10 | ||
269 | #define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0 | ||
270 | #define IRONLAKE_P1_DISPLAY_PORT_MIN 1 | ||
271 | #define IRONLAKE_P1_DISPLAY_PORT_MAX 2 | ||
272 | |||
265 | static bool | 273 | static bool |
266 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 274 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
267 | int target, int refclk, intel_clock_t *best_clock); | 275 | int target, int refclk, intel_clock_t *best_clock); |
@@ -271,9 +279,6 @@ intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
271 | static bool | 279 | static bool |
272 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 280 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
273 | int target, int refclk, intel_clock_t *best_clock); | 281 | int target, int refclk, intel_clock_t *best_clock); |
274 | static bool | ||
275 | intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
276 | int target, int refclk, intel_clock_t *best_clock); | ||
277 | 282 | ||
278 | static bool | 283 | static bool |
279 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, | 284 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, |
@@ -496,7 +501,7 @@ static const intel_limit_t intel_limits_ironlake_sdvo = { | |||
496 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | 501 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, |
497 | .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, | 502 | .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, |
498 | .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, | 503 | .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, |
499 | .find_pll = intel_ironlake_find_best_PLL, | 504 | .find_pll = intel_g4x_find_best_PLL, |
500 | }; | 505 | }; |
501 | 506 | ||
502 | static const intel_limit_t intel_limits_ironlake_lvds = { | 507 | static const intel_limit_t intel_limits_ironlake_lvds = { |
@@ -511,7 +516,30 @@ static const intel_limit_t intel_limits_ironlake_lvds = { | |||
511 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | 516 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, |
512 | .p2_slow = IRONLAKE_P2_LVDS_SLOW, | 517 | .p2_slow = IRONLAKE_P2_LVDS_SLOW, |
513 | .p2_fast = IRONLAKE_P2_LVDS_FAST }, | 518 | .p2_fast = IRONLAKE_P2_LVDS_FAST }, |
514 | .find_pll = intel_ironlake_find_best_PLL, | 519 | .find_pll = intel_g4x_find_best_PLL, |
520 | }; | ||
521 | |||
522 | static const intel_limit_t intel_limits_ironlake_display_port = { | ||
523 | .dot = { .min = IRONLAKE_DOT_MIN, | ||
524 | .max = IRONLAKE_DOT_MAX }, | ||
525 | .vco = { .min = IRONLAKE_VCO_MIN, | ||
526 | .max = IRONLAKE_VCO_MAX}, | ||
527 | .n = { .min = IRONLAKE_N_MIN, | ||
528 | .max = IRONLAKE_N_MAX }, | ||
529 | .m = { .min = IRONLAKE_M_MIN, | ||
530 | .max = IRONLAKE_M_MAX }, | ||
531 | .m1 = { .min = IRONLAKE_M1_MIN, | ||
532 | .max = IRONLAKE_M1_MAX }, | ||
533 | .m2 = { .min = IRONLAKE_M2_MIN, | ||
534 | .max = IRONLAKE_M2_MAX }, | ||
535 | .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN, | ||
536 | .max = IRONLAKE_P_DISPLAY_PORT_MAX }, | ||
537 | .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN, | ||
538 | .max = IRONLAKE_P1_DISPLAY_PORT_MAX}, | ||
539 | .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT, | ||
540 | .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW, | ||
541 | .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST }, | ||
542 | .find_pll = intel_find_pll_ironlake_dp, | ||
515 | }; | 543 | }; |
516 | 544 | ||
517 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) | 545 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) |
@@ -519,6 +547,9 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) | |||
519 | const intel_limit_t *limit; | 547 | const intel_limit_t *limit; |
520 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 548 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
521 | limit = &intel_limits_ironlake_lvds; | 549 | limit = &intel_limits_ironlake_lvds; |
550 | else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || | ||
551 | HAS_eDP) | ||
552 | limit = &intel_limits_ironlake_display_port; | ||
522 | else | 553 | else |
523 | limit = &intel_limits_ironlake_sdvo; | 554 | limit = &intel_limits_ironlake_sdvo; |
524 | 555 | ||
@@ -791,7 +822,13 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
791 | found = false; | 822 | found = false; |
792 | 823 | ||
793 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 824 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
794 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == | 825 | int lvds_reg; |
826 | |||
827 | if (IS_IRONLAKE(dev)) | ||
828 | lvds_reg = PCH_LVDS; | ||
829 | else | ||
830 | lvds_reg = LVDS; | ||
831 | if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) == | ||
795 | LVDS_CLKB_POWER_UP) | 832 | LVDS_CLKB_POWER_UP) |
796 | clock.p2 = limit->p2.p2_fast; | 833 | clock.p2 = limit->p2.p2_fast; |
797 | else | 834 | else |
@@ -839,6 +876,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
839 | { | 876 | { |
840 | struct drm_device *dev = crtc->dev; | 877 | struct drm_device *dev = crtc->dev; |
841 | intel_clock_t clock; | 878 | intel_clock_t clock; |
879 | |||
880 | /* return directly when it is eDP */ | ||
881 | if (HAS_eDP) | ||
882 | return true; | ||
883 | |||
842 | if (target < 200000) { | 884 | if (target < 200000) { |
843 | clock.n = 1; | 885 | clock.n = 1; |
844 | clock.p1 = 2; | 886 | clock.p1 = 2; |
@@ -857,68 +899,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
857 | return true; | 899 | return true; |
858 | } | 900 | } |
859 | 901 | ||
860 | static bool | ||
861 | intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
862 | int target, int refclk, intel_clock_t *best_clock) | ||
863 | { | ||
864 | struct drm_device *dev = crtc->dev; | ||
865 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
866 | intel_clock_t clock; | ||
867 | int err_most = 47; | ||
868 | int err_min = 10000; | ||
869 | |||
870 | /* eDP has only 2 clock choice, no n/m/p setting */ | ||
871 | if (HAS_eDP) | ||
872 | return true; | ||
873 | |||
874 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) | ||
875 | return intel_find_pll_ironlake_dp(limit, crtc, target, | ||
876 | refclk, best_clock); | ||
877 | |||
878 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | ||
879 | if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == | ||
880 | LVDS_CLKB_POWER_UP) | ||
881 | clock.p2 = limit->p2.p2_fast; | ||
882 | else | ||
883 | clock.p2 = limit->p2.p2_slow; | ||
884 | } else { | ||
885 | if (target < limit->p2.dot_limit) | ||
886 | clock.p2 = limit->p2.p2_slow; | ||
887 | else | ||
888 | clock.p2 = limit->p2.p2_fast; | ||
889 | } | ||
890 | |||
891 | memset(best_clock, 0, sizeof(*best_clock)); | ||
892 | for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { | ||
893 | /* based on hardware requriment prefer smaller n to precision */ | ||
894 | for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) { | ||
895 | /* based on hardware requirment prefere larger m1,m2 */ | ||
896 | for (clock.m1 = limit->m1.max; | ||
897 | clock.m1 >= limit->m1.min; clock.m1--) { | ||
898 | for (clock.m2 = limit->m2.max; | ||
899 | clock.m2 >= limit->m2.min; clock.m2--) { | ||
900 | int this_err; | ||
901 | |||
902 | intel_clock(dev, refclk, &clock); | ||
903 | if (!intel_PLL_is_valid(crtc, &clock)) | ||
904 | continue; | ||
905 | this_err = abs((10000 - (target*10000/clock.dot))); | ||
906 | if (this_err < err_most) { | ||
907 | *best_clock = clock; | ||
908 | /* found on first matching */ | ||
909 | goto out; | ||
910 | } else if (this_err < err_min) { | ||
911 | *best_clock = clock; | ||
912 | err_min = this_err; | ||
913 | } | ||
914 | } | ||
915 | } | ||
916 | } | ||
917 | } | ||
918 | out: | ||
919 | return true; | ||
920 | } | ||
921 | |||
922 | /* DisplayPort has only two frequencies, 162MHz and 270MHz */ | 902 | /* DisplayPort has only two frequencies, 162MHz and 270MHz */ |
923 | static bool | 903 | static bool |
924 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | 904 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, |
@@ -1493,6 +1473,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1493 | int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; | 1473 | int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; |
1494 | u32 temp; | 1474 | u32 temp; |
1495 | int tries = 5, j, n; | 1475 | int tries = 5, j, n; |
1476 | u32 pipe_bpc; | ||
1477 | |||
1478 | temp = I915_READ(pipeconf_reg); | ||
1479 | pipe_bpc = temp & PIPE_BPC_MASK; | ||
1496 | 1480 | ||
1497 | /* XXX: When our outputs are all unaware of DPMS modes other than off | 1481 | /* XXX: When our outputs are all unaware of DPMS modes other than off |
1498 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. | 1482 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. |
@@ -1524,6 +1508,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1524 | 1508 | ||
1525 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ | 1509 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ |
1526 | temp = I915_READ(fdi_rx_reg); | 1510 | temp = I915_READ(fdi_rx_reg); |
1511 | /* | ||
1512 | * make the BPC in FDI Rx be consistent with that in | ||
1513 | * pipeconf reg. | ||
1514 | */ | ||
1515 | temp &= ~(0x7 << 16); | ||
1516 | temp |= (pipe_bpc << 11); | ||
1527 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | | 1517 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | |
1528 | FDI_SEL_PCDCLK | | 1518 | FDI_SEL_PCDCLK | |
1529 | FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ | 1519 | FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ |
@@ -1666,6 +1656,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1666 | 1656 | ||
1667 | /* enable PCH transcoder */ | 1657 | /* enable PCH transcoder */ |
1668 | temp = I915_READ(transconf_reg); | 1658 | temp = I915_READ(transconf_reg); |
1659 | /* | ||
1660 | * make the BPC in transcoder be consistent with | ||
1661 | * that in pipeconf reg. | ||
1662 | */ | ||
1663 | temp &= ~PIPE_BPC_MASK; | ||
1664 | temp |= pipe_bpc; | ||
1669 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); | 1665 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); |
1670 | I915_READ(transconf_reg); | 1666 | I915_READ(transconf_reg); |
1671 | 1667 | ||
@@ -1745,6 +1741,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1745 | I915_READ(fdi_tx_reg); | 1741 | I915_READ(fdi_tx_reg); |
1746 | 1742 | ||
1747 | temp = I915_READ(fdi_rx_reg); | 1743 | temp = I915_READ(fdi_rx_reg); |
1744 | /* BPC in FDI rx is consistent with that in pipeconf */ | ||
1745 | temp &= ~(0x07 << 16); | ||
1746 | temp |= (pipe_bpc << 11); | ||
1748 | I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); | 1747 | I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); |
1749 | I915_READ(fdi_rx_reg); | 1748 | I915_READ(fdi_rx_reg); |
1750 | 1749 | ||
@@ -1789,7 +1788,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1789 | } | 1788 | } |
1790 | } | 1789 | } |
1791 | } | 1790 | } |
1792 | 1791 | temp = I915_READ(transconf_reg); | |
1792 | /* BPC in transcoder is consistent with that in pipeconf */ | ||
1793 | temp &= ~PIPE_BPC_MASK; | ||
1794 | temp |= pipe_bpc; | ||
1795 | I915_WRITE(transconf_reg, temp); | ||
1796 | I915_READ(transconf_reg); | ||
1793 | udelay(100); | 1797 | udelay(100); |
1794 | 1798 | ||
1795 | /* disable PCH DPLL */ | 1799 | /* disable PCH DPLL */ |
@@ -2448,7 +2452,7 @@ static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock, | |||
2448 | * A value of 5us seems to be a good balance; safe for very low end | 2452 | * A value of 5us seems to be a good balance; safe for very low end |
2449 | * platforms but not overly aggressive on lower latency configs. | 2453 | * platforms but not overly aggressive on lower latency configs. |
2450 | */ | 2454 | */ |
2451 | const static int latency_ns = 5000; | 2455 | static const int latency_ns = 5000; |
2452 | 2456 | ||
2453 | static int i9xx_get_fifo_size(struct drm_device *dev, int plane) | 2457 | static int i9xx_get_fifo_size(struct drm_device *dev, int plane) |
2454 | { | 2458 | { |
@@ -2559,7 +2563,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, | |||
2559 | /* Calc sr entries for one plane configs */ | 2563 | /* Calc sr entries for one plane configs */ |
2560 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | 2564 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { |
2561 | /* self-refresh has much higher latency */ | 2565 | /* self-refresh has much higher latency */ |
2562 | const static int sr_latency_ns = 12000; | 2566 | static const int sr_latency_ns = 12000; |
2563 | 2567 | ||
2564 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 2568 | sr_clock = planea_clock ? planea_clock : planeb_clock; |
2565 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | 2569 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); |
@@ -2598,7 +2602,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, | |||
2598 | /* Calc sr entries for one plane configs */ | 2602 | /* Calc sr entries for one plane configs */ |
2599 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | 2603 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { |
2600 | /* self-refresh has much higher latency */ | 2604 | /* self-refresh has much higher latency */ |
2601 | const static int sr_latency_ns = 12000; | 2605 | static const int sr_latency_ns = 12000; |
2602 | 2606 | ||
2603 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 2607 | sr_clock = planea_clock ? planea_clock : planeb_clock; |
2604 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | 2608 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); |
@@ -2667,7 +2671,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
2667 | if (HAS_FW_BLC(dev) && sr_hdisplay && | 2671 | if (HAS_FW_BLC(dev) && sr_hdisplay && |
2668 | (!planea_clock || !planeb_clock)) { | 2672 | (!planea_clock || !planeb_clock)) { |
2669 | /* self-refresh has much higher latency */ | 2673 | /* self-refresh has much higher latency */ |
2670 | const static int sr_latency_ns = 6000; | 2674 | static const int sr_latency_ns = 6000; |
2671 | 2675 | ||
2672 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 2676 | sr_clock = planea_clock ? planea_clock : planeb_clock; |
2673 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | 2677 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); |
@@ -2969,6 +2973,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2969 | 2973 | ||
2970 | /* determine panel color depth */ | 2974 | /* determine panel color depth */ |
2971 | temp = I915_READ(pipeconf_reg); | 2975 | temp = I915_READ(pipeconf_reg); |
2976 | temp &= ~PIPE_BPC_MASK; | ||
2977 | if (is_lvds) { | ||
2978 | int lvds_reg = I915_READ(PCH_LVDS); | ||
2979 | /* the BPC will be 6 if it is 18-bit LVDS panel */ | ||
2980 | if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) | ||
2981 | temp |= PIPE_8BPC; | ||
2982 | else | ||
2983 | temp |= PIPE_6BPC; | ||
2984 | } else | ||
2985 | temp |= PIPE_8BPC; | ||
2986 | I915_WRITE(pipeconf_reg, temp); | ||
2987 | I915_READ(pipeconf_reg); | ||
2972 | 2988 | ||
2973 | switch (temp & PIPE_BPC_MASK) { | 2989 | switch (temp & PIPE_BPC_MASK) { |
2974 | case PIPE_8BPC: | 2990 | case PIPE_8BPC: |
@@ -3195,7 +3211,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3195 | * appropriately here, but we need to look more thoroughly into how | 3211 | * appropriately here, but we need to look more thoroughly into how |
3196 | * panels behave in the two modes. | 3212 | * panels behave in the two modes. |
3197 | */ | 3213 | */ |
3198 | 3214 | /* set the dithering flag */ | |
3215 | if (IS_I965G(dev)) { | ||
3216 | if (dev_priv->lvds_dither) { | ||
3217 | if (IS_IRONLAKE(dev)) | ||
3218 | pipeconf |= PIPE_ENABLE_DITHER; | ||
3219 | else | ||
3220 | lvds |= LVDS_ENABLE_DITHER; | ||
3221 | } else { | ||
3222 | if (IS_IRONLAKE(dev)) | ||
3223 | pipeconf &= ~PIPE_ENABLE_DITHER; | ||
3224 | else | ||
3225 | lvds &= ~LVDS_ENABLE_DITHER; | ||
3226 | } | ||
3227 | } | ||
3199 | I915_WRITE(lvds_reg, lvds); | 3228 | I915_WRITE(lvds_reg, lvds); |
3200 | I915_READ(lvds_reg); | 3229 | I915_READ(lvds_reg); |
3201 | } | 3230 | } |
@@ -3385,7 +3414,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
3385 | 3414 | ||
3386 | /* we only need to pin inside GTT if cursor is non-phy */ | 3415 | /* we only need to pin inside GTT if cursor is non-phy */ |
3387 | mutex_lock(&dev->struct_mutex); | 3416 | mutex_lock(&dev->struct_mutex); |
3388 | if (!dev_priv->cursor_needs_physical) { | 3417 | if (!dev_priv->info->cursor_needs_physical) { |
3389 | ret = i915_gem_object_pin(bo, PAGE_SIZE); | 3418 | ret = i915_gem_object_pin(bo, PAGE_SIZE); |
3390 | if (ret) { | 3419 | if (ret) { |
3391 | DRM_ERROR("failed to pin cursor bo\n"); | 3420 | DRM_ERROR("failed to pin cursor bo\n"); |
@@ -3420,7 +3449,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
3420 | I915_WRITE(base, addr); | 3449 | I915_WRITE(base, addr); |
3421 | 3450 | ||
3422 | if (intel_crtc->cursor_bo) { | 3451 | if (intel_crtc->cursor_bo) { |
3423 | if (dev_priv->cursor_needs_physical) { | 3452 | if (dev_priv->info->cursor_needs_physical) { |
3424 | if (intel_crtc->cursor_bo != bo) | 3453 | if (intel_crtc->cursor_bo != bo) |
3425 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); | 3454 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); |
3426 | } else | 3455 | } else |
@@ -3779,125 +3808,6 @@ static void intel_gpu_idle_timer(unsigned long arg) | |||
3779 | queue_work(dev_priv->wq, &dev_priv->idle_work); | 3808 | queue_work(dev_priv->wq, &dev_priv->idle_work); |
3780 | } | 3809 | } |
3781 | 3810 | ||
3782 | void intel_increase_renderclock(struct drm_device *dev, bool schedule) | ||
3783 | { | ||
3784 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3785 | |||
3786 | if (IS_IRONLAKE(dev)) | ||
3787 | return; | ||
3788 | |||
3789 | if (!dev_priv->render_reclock_avail) { | ||
3790 | DRM_DEBUG_DRIVER("not reclocking render clock\n"); | ||
3791 | return; | ||
3792 | } | ||
3793 | |||
3794 | /* Restore render clock frequency to original value */ | ||
3795 | if (IS_G4X(dev) || IS_I9XX(dev)) | ||
3796 | pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock); | ||
3797 | else if (IS_I85X(dev)) | ||
3798 | pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock); | ||
3799 | DRM_DEBUG_DRIVER("increasing render clock frequency\n"); | ||
3800 | |||
3801 | /* Schedule downclock */ | ||
3802 | if (schedule) | ||
3803 | mod_timer(&dev_priv->idle_timer, jiffies + | ||
3804 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | ||
3805 | } | ||
3806 | |||
3807 | void intel_decrease_renderclock(struct drm_device *dev) | ||
3808 | { | ||
3809 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3810 | |||
3811 | if (IS_IRONLAKE(dev)) | ||
3812 | return; | ||
3813 | |||
3814 | if (!dev_priv->render_reclock_avail) { | ||
3815 | DRM_DEBUG_DRIVER("not reclocking render clock\n"); | ||
3816 | return; | ||
3817 | } | ||
3818 | |||
3819 | if (IS_G4X(dev)) { | ||
3820 | u16 gcfgc; | ||
3821 | |||
3822 | /* Adjust render clock... */ | ||
3823 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
3824 | |||
3825 | /* Down to minimum... */ | ||
3826 | gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK; | ||
3827 | gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ; | ||
3828 | |||
3829 | pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
3830 | } else if (IS_I965G(dev)) { | ||
3831 | u16 gcfgc; | ||
3832 | |||
3833 | /* Adjust render clock... */ | ||
3834 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
3835 | |||
3836 | /* Down to minimum... */ | ||
3837 | gcfgc &= ~I965_GC_RENDER_CLOCK_MASK; | ||
3838 | gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ; | ||
3839 | |||
3840 | pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
3841 | } else if (IS_I945G(dev) || IS_I945GM(dev)) { | ||
3842 | u16 gcfgc; | ||
3843 | |||
3844 | /* Adjust render clock... */ | ||
3845 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
3846 | |||
3847 | /* Down to minimum... */ | ||
3848 | gcfgc &= ~I945_GC_RENDER_CLOCK_MASK; | ||
3849 | gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ; | ||
3850 | |||
3851 | pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
3852 | } else if (IS_I915G(dev)) { | ||
3853 | u16 gcfgc; | ||
3854 | |||
3855 | /* Adjust render clock... */ | ||
3856 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
3857 | |||
3858 | /* Down to minimum... */ | ||
3859 | gcfgc &= ~I915_GC_RENDER_CLOCK_MASK; | ||
3860 | gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ; | ||
3861 | |||
3862 | pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
3863 | } else if (IS_I85X(dev)) { | ||
3864 | u16 hpllcc; | ||
3865 | |||
3866 | /* Adjust render clock... */ | ||
3867 | pci_read_config_word(dev->pdev, HPLLCC, &hpllcc); | ||
3868 | |||
3869 | /* Up to maximum... */ | ||
3870 | hpllcc &= ~GC_CLOCK_CONTROL_MASK; | ||
3871 | hpllcc |= GC_CLOCK_133_200; | ||
3872 | |||
3873 | pci_write_config_word(dev->pdev, HPLLCC, hpllcc); | ||
3874 | } | ||
3875 | DRM_DEBUG_DRIVER("decreasing render clock frequency\n"); | ||
3876 | } | ||
3877 | |||
3878 | /* Note that no increase function is needed for this - increase_renderclock() | ||
3879 | * will also rewrite these bits | ||
3880 | */ | ||
3881 | void intel_decrease_displayclock(struct drm_device *dev) | ||
3882 | { | ||
3883 | if (IS_IRONLAKE(dev)) | ||
3884 | return; | ||
3885 | |||
3886 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) || | ||
3887 | IS_I915GM(dev)) { | ||
3888 | u16 gcfgc; | ||
3889 | |||
3890 | /* Adjust render clock... */ | ||
3891 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
3892 | |||
3893 | /* Down to minimum... */ | ||
3894 | gcfgc &= ~0xf0; | ||
3895 | gcfgc |= 0x80; | ||
3896 | |||
3897 | pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
3898 | } | ||
3899 | } | ||
3900 | |||
3901 | #define CRTC_IDLE_TIMEOUT 1000 /* ms */ | 3811 | #define CRTC_IDLE_TIMEOUT 1000 /* ms */ |
3902 | 3812 | ||
3903 | static void intel_crtc_idle_timer(unsigned long arg) | 3813 | static void intel_crtc_idle_timer(unsigned long arg) |
@@ -4011,12 +3921,6 @@ static void intel_idle_update(struct work_struct *work) | |||
4011 | 3921 | ||
4012 | mutex_lock(&dev->struct_mutex); | 3922 | mutex_lock(&dev->struct_mutex); |
4013 | 3923 | ||
4014 | /* GPU isn't processing, downclock it. */ | ||
4015 | if (!dev_priv->busy) { | ||
4016 | intel_decrease_renderclock(dev); | ||
4017 | intel_decrease_displayclock(dev); | ||
4018 | } | ||
4019 | |||
4020 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 3924 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
4021 | /* Skip inactive CRTCs */ | 3925 | /* Skip inactive CRTCs */ |
4022 | if (!crtc->fb) | 3926 | if (!crtc->fb) |
@@ -4050,13 +3954,11 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) | |||
4050 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 3954 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
4051 | return; | 3955 | return; |
4052 | 3956 | ||
4053 | if (!dev_priv->busy) { | 3957 | if (!dev_priv->busy) |
4054 | dev_priv->busy = true; | 3958 | dev_priv->busy = true; |
4055 | intel_increase_renderclock(dev, true); | 3959 | else |
4056 | } else { | ||
4057 | mod_timer(&dev_priv->idle_timer, jiffies + | 3960 | mod_timer(&dev_priv->idle_timer, jiffies + |
4058 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | 3961 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); |
4059 | } | ||
4060 | 3962 | ||
4061 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 3963 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
4062 | if (!crtc->fb) | 3964 | if (!crtc->fb) |
@@ -4400,29 +4302,43 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
4400 | bool found = false; | 4302 | bool found = false; |
4401 | 4303 | ||
4402 | if (I915_READ(SDVOB) & SDVO_DETECTED) { | 4304 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
4305 | DRM_DEBUG_KMS("probing SDVOB\n"); | ||
4403 | found = intel_sdvo_init(dev, SDVOB); | 4306 | found = intel_sdvo_init(dev, SDVOB); |
4404 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) | 4307 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { |
4308 | DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); | ||
4405 | intel_hdmi_init(dev, SDVOB); | 4309 | intel_hdmi_init(dev, SDVOB); |
4310 | } | ||
4406 | 4311 | ||
4407 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) | 4312 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) { |
4313 | DRM_DEBUG_KMS("probing DP_B\n"); | ||
4408 | intel_dp_init(dev, DP_B); | 4314 | intel_dp_init(dev, DP_B); |
4315 | } | ||
4409 | } | 4316 | } |
4410 | 4317 | ||
4411 | /* Before G4X SDVOC doesn't have its own detect register */ | 4318 | /* Before G4X SDVOC doesn't have its own detect register */ |
4412 | 4319 | ||
4413 | if (I915_READ(SDVOB) & SDVO_DETECTED) | 4320 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
4321 | DRM_DEBUG_KMS("probing SDVOC\n"); | ||
4414 | found = intel_sdvo_init(dev, SDVOC); | 4322 | found = intel_sdvo_init(dev, SDVOC); |
4323 | } | ||
4415 | 4324 | ||
4416 | if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { | 4325 | if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { |
4417 | 4326 | ||
4418 | if (SUPPORTS_INTEGRATED_HDMI(dev)) | 4327 | if (SUPPORTS_INTEGRATED_HDMI(dev)) { |
4328 | DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); | ||
4419 | intel_hdmi_init(dev, SDVOC); | 4329 | intel_hdmi_init(dev, SDVOC); |
4420 | if (SUPPORTS_INTEGRATED_DP(dev)) | 4330 | } |
4331 | if (SUPPORTS_INTEGRATED_DP(dev)) { | ||
4332 | DRM_DEBUG_KMS("probing DP_C\n"); | ||
4421 | intel_dp_init(dev, DP_C); | 4333 | intel_dp_init(dev, DP_C); |
4334 | } | ||
4422 | } | 4335 | } |
4423 | 4336 | ||
4424 | if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) | 4337 | if (SUPPORTS_INTEGRATED_DP(dev) && |
4338 | (I915_READ(DP_D) & DP_DETECTED)) { | ||
4339 | DRM_DEBUG_KMS("probing DP_D\n"); | ||
4425 | intel_dp_init(dev, DP_D); | 4340 | intel_dp_init(dev, DP_D); |
4341 | } | ||
4426 | } else if (IS_I8XX(dev)) | 4342 | } else if (IS_I8XX(dev)) |
4427 | intel_dvo_init(dev); | 4343 | intel_dvo_init(dev); |
4428 | 4344 | ||
@@ -4527,6 +4443,42 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { | |||
4527 | .fb_changed = intelfb_probe, | 4443 | .fb_changed = intelfb_probe, |
4528 | }; | 4444 | }; |
4529 | 4445 | ||
4446 | static struct drm_gem_object * | ||
4447 | intel_alloc_power_context(struct drm_device *dev) | ||
4448 | { | ||
4449 | struct drm_gem_object *pwrctx; | ||
4450 | int ret; | ||
4451 | |||
4452 | pwrctx = drm_gem_object_alloc(dev, 4096); | ||
4453 | if (!pwrctx) { | ||
4454 | DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); | ||
4455 | return NULL; | ||
4456 | } | ||
4457 | |||
4458 | mutex_lock(&dev->struct_mutex); | ||
4459 | ret = i915_gem_object_pin(pwrctx, 4096); | ||
4460 | if (ret) { | ||
4461 | DRM_ERROR("failed to pin power context: %d\n", ret); | ||
4462 | goto err_unref; | ||
4463 | } | ||
4464 | |||
4465 | ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1); | ||
4466 | if (ret) { | ||
4467 | DRM_ERROR("failed to set-domain on power context: %d\n", ret); | ||
4468 | goto err_unpin; | ||
4469 | } | ||
4470 | mutex_unlock(&dev->struct_mutex); | ||
4471 | |||
4472 | return pwrctx; | ||
4473 | |||
4474 | err_unpin: | ||
4475 | i915_gem_object_unpin(pwrctx); | ||
4476 | err_unref: | ||
4477 | drm_gem_object_unreference(pwrctx); | ||
4478 | mutex_unlock(&dev->struct_mutex); | ||
4479 | return NULL; | ||
4480 | } | ||
4481 | |||
4530 | void intel_init_clock_gating(struct drm_device *dev) | 4482 | void intel_init_clock_gating(struct drm_device *dev) |
4531 | { | 4483 | { |
4532 | struct drm_i915_private *dev_priv = dev->dev_private; | 4484 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -4579,42 +4531,27 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
4579 | * GPU can automatically power down the render unit if given a page | 4531 | * GPU can automatically power down the render unit if given a page |
4580 | * to save state. | 4532 | * to save state. |
4581 | */ | 4533 | */ |
4582 | if (I915_HAS_RC6(dev)) { | 4534 | if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { |
4583 | struct drm_gem_object *pwrctx; | 4535 | struct drm_i915_gem_object *obj_priv = NULL; |
4584 | struct drm_i915_gem_object *obj_priv; | ||
4585 | int ret; | ||
4586 | 4536 | ||
4587 | if (dev_priv->pwrctx) { | 4537 | if (dev_priv->pwrctx) { |
4588 | obj_priv = dev_priv->pwrctx->driver_private; | 4538 | obj_priv = dev_priv->pwrctx->driver_private; |
4589 | } else { | 4539 | } else { |
4590 | pwrctx = drm_gem_object_alloc(dev, 4096); | 4540 | struct drm_gem_object *pwrctx; |
4591 | if (!pwrctx) { | ||
4592 | DRM_DEBUG("failed to alloc power context, " | ||
4593 | "RC6 disabled\n"); | ||
4594 | goto out; | ||
4595 | } | ||
4596 | 4541 | ||
4597 | ret = i915_gem_object_pin(pwrctx, 4096); | 4542 | pwrctx = intel_alloc_power_context(dev); |
4598 | if (ret) { | 4543 | if (pwrctx) { |
4599 | DRM_ERROR("failed to pin power context: %d\n", | 4544 | dev_priv->pwrctx = pwrctx; |
4600 | ret); | 4545 | obj_priv = pwrctx->driver_private; |
4601 | drm_gem_object_unreference(pwrctx); | ||
4602 | goto out; | ||
4603 | } | 4546 | } |
4604 | |||
4605 | i915_gem_object_set_to_gtt_domain(pwrctx, 1); | ||
4606 | |||
4607 | dev_priv->pwrctx = pwrctx; | ||
4608 | obj_priv = pwrctx->driver_private; | ||
4609 | } | 4547 | } |
4610 | 4548 | ||
4611 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); | 4549 | if (obj_priv) { |
4612 | I915_WRITE(MCHBAR_RENDER_STANDBY, | 4550 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); |
4613 | I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); | 4551 | I915_WRITE(MCHBAR_RENDER_STANDBY, |
4552 | I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); | ||
4553 | } | ||
4614 | } | 4554 | } |
4615 | |||
4616 | out: | ||
4617 | return; | ||
4618 | } | 4555 | } |
4619 | 4556 | ||
4620 | /* Set up chip specific display functions */ | 4557 | /* Set up chip specific display functions */ |
@@ -4770,7 +4707,6 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
4770 | del_timer_sync(&intel_crtc->idle_timer); | 4707 | del_timer_sync(&intel_crtc->idle_timer); |
4771 | } | 4708 | } |
4772 | 4709 | ||
4773 | intel_increase_renderclock(dev, false); | ||
4774 | del_timer_sync(&dev_priv->idle_timer); | 4710 | del_timer_sync(&dev_priv->idle_timer); |
4775 | 4711 | ||
4776 | if (dev_priv->display.disable_fbc) | 4712 | if (dev_priv->display.disable_fbc) |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 4e7aa8b7b938..1349d9fd01c4 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -1402,14 +1402,20 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1402 | break; | 1402 | break; |
1403 | case DP_B: | 1403 | case DP_B: |
1404 | case PCH_DP_B: | 1404 | case PCH_DP_B: |
1405 | dev_priv->hotplug_supported_mask |= | ||
1406 | HDMIB_HOTPLUG_INT_STATUS; | ||
1405 | name = "DPDDC-B"; | 1407 | name = "DPDDC-B"; |
1406 | break; | 1408 | break; |
1407 | case DP_C: | 1409 | case DP_C: |
1408 | case PCH_DP_C: | 1410 | case PCH_DP_C: |
1411 | dev_priv->hotplug_supported_mask |= | ||
1412 | HDMIC_HOTPLUG_INT_STATUS; | ||
1409 | name = "DPDDC-C"; | 1413 | name = "DPDDC-C"; |
1410 | break; | 1414 | break; |
1411 | case DP_D: | 1415 | case DP_D: |
1412 | case PCH_DP_D: | 1416 | case PCH_DP_D: |
1417 | dev_priv->hotplug_supported_mask |= | ||
1418 | HDMID_HOTPLUG_INT_STATUS; | ||
1413 | name = "DPDDC-D"; | 1419 | name = "DPDDC-D"; |
1414 | break; | 1420 | break; |
1415 | } | 1421 | } |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index f04dbbe7d400..06431941b233 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -303,21 +303,26 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
303 | if (sdvox_reg == SDVOB) { | 303 | if (sdvox_reg == SDVOB) { |
304 | intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); | 304 | intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); |
305 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); | 305 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); |
306 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; | ||
306 | } else if (sdvox_reg == SDVOC) { | 307 | } else if (sdvox_reg == SDVOC) { |
307 | intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); | 308 | intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); |
308 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); | 309 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); |
310 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; | ||
309 | } else if (sdvox_reg == HDMIB) { | 311 | } else if (sdvox_reg == HDMIB) { |
310 | intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); | 312 | intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); |
311 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, | 313 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, |
312 | "HDMIB"); | 314 | "HDMIB"); |
315 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; | ||
313 | } else if (sdvox_reg == HDMIC) { | 316 | } else if (sdvox_reg == HDMIC) { |
314 | intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); | 317 | intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); |
315 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, | 318 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, |
316 | "HDMIC"); | 319 | "HDMIC"); |
320 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; | ||
317 | } else if (sdvox_reg == HDMID) { | 321 | } else if (sdvox_reg == HDMID) { |
318 | intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); | 322 | intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); |
319 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, | 323 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, |
320 | "HDMID"); | 324 | "HDMID"); |
325 | dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; | ||
321 | } | 326 | } |
322 | if (!intel_output->ddc_bus) | 327 | if (!intel_output->ddc_bus) |
323 | goto err_connector; | 328 | goto err_connector; |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 3118ce274e67..f4b4aa242df1 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -608,6 +608,13 @@ static const struct dmi_system_id bad_lid_status[] = { | |||
608 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), | 608 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), |
609 | }, | 609 | }, |
610 | }, | 610 | }, |
611 | { | ||
612 | .ident = "PC-81005", | ||
613 | .matches = { | ||
614 | DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), | ||
615 | DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), | ||
616 | }, | ||
617 | }, | ||
611 | { } | 618 | { } |
612 | }; | 619 | }; |
613 | 620 | ||
@@ -679,7 +686,14 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | |||
679 | struct drm_i915_private *dev_priv = | 686 | struct drm_i915_private *dev_priv = |
680 | container_of(nb, struct drm_i915_private, lid_notifier); | 687 | container_of(nb, struct drm_i915_private, lid_notifier); |
681 | struct drm_device *dev = dev_priv->dev; | 688 | struct drm_device *dev = dev_priv->dev; |
689 | struct drm_connector *connector = dev_priv->int_lvds_connector; | ||
682 | 690 | ||
691 | /* | ||
692 | * check and update the status of LVDS connector after receiving | ||
693 | * the LID nofication event. | ||
694 | */ | ||
695 | if (connector) | ||
696 | connector->status = connector->funcs->detect(connector); | ||
683 | if (!acpi_lid_open()) { | 697 | if (!acpi_lid_open()) { |
684 | dev_priv->modeset_on_lid = 1; | 698 | dev_priv->modeset_on_lid = 1; |
685 | return NOTIFY_OK; | 699 | return NOTIFY_OK; |
@@ -854,65 +868,6 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
854 | { } /* terminating entry */ | 868 | { } /* terminating entry */ |
855 | }; | 869 | }; |
856 | 870 | ||
857 | #ifdef CONFIG_ACPI | ||
858 | /* | ||
859 | * check_lid_device -- check whether @handle is an ACPI LID device. | ||
860 | * @handle: ACPI device handle | ||
861 | * @level : depth in the ACPI namespace tree | ||
862 | * @context: the number of LID device when we find the device | ||
863 | * @rv: a return value to fill if desired (Not use) | ||
864 | */ | ||
865 | static acpi_status | ||
866 | check_lid_device(acpi_handle handle, u32 level, void *context, | ||
867 | void **return_value) | ||
868 | { | ||
869 | struct acpi_device *acpi_dev; | ||
870 | int *lid_present = context; | ||
871 | |||
872 | acpi_dev = NULL; | ||
873 | /* Get the acpi device for device handle */ | ||
874 | if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) { | ||
875 | /* If there is no ACPI device for handle, return */ | ||
876 | return AE_OK; | ||
877 | } | ||
878 | |||
879 | if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7)) | ||
880 | *lid_present = 1; | ||
881 | |||
882 | return AE_OK; | ||
883 | } | ||
884 | |||
885 | /** | ||
886 | * check whether there exists the ACPI LID device by enumerating the ACPI | ||
887 | * device tree. | ||
888 | */ | ||
889 | static int intel_lid_present(void) | ||
890 | { | ||
891 | int lid_present = 0; | ||
892 | |||
893 | if (acpi_disabled) { | ||
894 | /* If ACPI is disabled, there is no ACPI device tree to | ||
895 | * check, so assume the LID device would have been present. | ||
896 | */ | ||
897 | return 1; | ||
898 | } | ||
899 | |||
900 | acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, | ||
901 | ACPI_UINT32_MAX, | ||
902 | check_lid_device, NULL, &lid_present, NULL); | ||
903 | |||
904 | return lid_present; | ||
905 | } | ||
906 | #else | ||
907 | static int intel_lid_present(void) | ||
908 | { | ||
909 | /* In the absence of ACPI built in, assume that the LID device would | ||
910 | * have been present. | ||
911 | */ | ||
912 | return 1; | ||
913 | } | ||
914 | #endif | ||
915 | |||
916 | /** | 871 | /** |
917 | * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID | 872 | * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID |
918 | * @dev: drm device | 873 | * @dev: drm device |
@@ -1031,12 +986,8 @@ void intel_lvds_init(struct drm_device *dev) | |||
1031 | if (dmi_check_system(intel_no_lvds)) | 986 | if (dmi_check_system(intel_no_lvds)) |
1032 | return; | 987 | return; |
1033 | 988 | ||
1034 | /* | 989 | if (!lvds_is_present_in_vbt(dev)) { |
1035 | * Assume LVDS is present if there's an ACPI lid device or if the | 990 | DRM_DEBUG_KMS("LVDS is not present in VBT\n"); |
1036 | * device is present in the VBT. | ||
1037 | */ | ||
1038 | if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) { | ||
1039 | DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n"); | ||
1040 | return; | 991 | return; |
1041 | } | 992 | } |
1042 | 993 | ||
@@ -1180,6 +1131,8 @@ out: | |||
1180 | DRM_DEBUG_KMS("lid notifier registration failed\n"); | 1131 | DRM_DEBUG_KMS("lid notifier registration failed\n"); |
1181 | dev_priv->lid_notifier.notifier_call = NULL; | 1132 | dev_priv->lid_notifier.notifier_call = NULL; |
1182 | } | 1133 | } |
1134 | /* keep the LVDS connector */ | ||
1135 | dev_priv->int_lvds_connector = connector; | ||
1183 | drm_sysfs_connector_add(connector); | 1136 | drm_sysfs_connector_add(connector); |
1184 | return; | 1137 | return; |
1185 | 1138 | ||
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 24a3dc99716c..de5144c8c153 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -2662,6 +2662,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2662 | 2662 | ||
2663 | bool intel_sdvo_init(struct drm_device *dev, int output_device) | 2663 | bool intel_sdvo_init(struct drm_device *dev, int output_device) |
2664 | { | 2664 | { |
2665 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2665 | struct drm_connector *connector; | 2666 | struct drm_connector *connector; |
2666 | struct intel_output *intel_output; | 2667 | struct intel_output *intel_output; |
2667 | struct intel_sdvo_priv *sdvo_priv; | 2668 | struct intel_sdvo_priv *sdvo_priv; |
@@ -2708,10 +2709,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
2708 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); | 2709 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); |
2709 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, | 2710 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, |
2710 | "SDVOB/VGA DDC BUS"); | 2711 | "SDVOB/VGA DDC BUS"); |
2712 | dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; | ||
2711 | } else { | 2713 | } else { |
2712 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); | 2714 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); |
2713 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, | 2715 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, |
2714 | "SDVOC/VGA DDC BUS"); | 2716 | "SDVOC/VGA DDC BUS"); |
2717 | dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; | ||
2715 | } | 2718 | } |
2716 | 2719 | ||
2717 | if (intel_output->ddc_bus == NULL) | 2720 | if (intel_output->ddc_bus == NULL) |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 552ec110b741..1d5b9b7b033f 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1840,6 +1840,8 @@ intel_tv_init(struct drm_device *dev) | |||
1840 | drm_connector_attach_property(connector, | 1840 | drm_connector_attach_property(connector, |
1841 | dev->mode_config.tv_bottom_margin_property, | 1841 | dev->mode_config.tv_bottom_margin_property, |
1842 | tv_priv->margin[TV_MARGIN_BOTTOM]); | 1842 | tv_priv->margin[TV_MARGIN_BOTTOM]); |
1843 | |||
1844 | dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS; | ||
1843 | out: | 1845 | out: |
1844 | drm_sysfs_connector_add(connector); | 1846 | drm_sysfs_connector_add(connector); |
1845 | } | 1847 | } |
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index ec3f5e80a5df..b64a8d7cdf6d 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h | |||
@@ -188,6 +188,7 @@ typedef struct _drm_i915_sarea { | |||
188 | #define DRM_I915_GEM_MADVISE 0x26 | 188 | #define DRM_I915_GEM_MADVISE 0x26 |
189 | #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 | 189 | #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 |
190 | #define DRM_I915_OVERLAY_ATTRS 0x28 | 190 | #define DRM_I915_OVERLAY_ATTRS 0x28 |
191 | #define DRM_I915_GEM_EXECBUFFER2 0x29 | ||
191 | 192 | ||
192 | #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) | 193 | #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) |
193 | #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) | 194 | #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) |
@@ -207,6 +208,7 @@ typedef struct _drm_i915_sarea { | |||
207 | #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) | 208 | #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) |
208 | #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) | 209 | #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) |
209 | #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) | 210 | #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) |
211 | #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) | ||
210 | #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) | 212 | #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) |
211 | #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) | 213 | #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) |
212 | #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) | 214 | #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) |
@@ -272,6 +274,7 @@ typedef struct drm_i915_irq_wait { | |||
272 | #define I915_PARAM_NUM_FENCES_AVAIL 6 | 274 | #define I915_PARAM_NUM_FENCES_AVAIL 6 |
273 | #define I915_PARAM_HAS_OVERLAY 7 | 275 | #define I915_PARAM_HAS_OVERLAY 7 |
274 | #define I915_PARAM_HAS_PAGEFLIPPING 8 | 276 | #define I915_PARAM_HAS_PAGEFLIPPING 8 |
277 | #define I915_PARAM_HAS_EXECBUF2 9 | ||
275 | 278 | ||
276 | typedef struct drm_i915_getparam { | 279 | typedef struct drm_i915_getparam { |
277 | int param; | 280 | int param; |
@@ -567,6 +570,57 @@ struct drm_i915_gem_execbuffer { | |||
567 | __u64 cliprects_ptr; | 570 | __u64 cliprects_ptr; |
568 | }; | 571 | }; |
569 | 572 | ||
573 | struct drm_i915_gem_exec_object2 { | ||
574 | /** | ||
575 | * User's handle for a buffer to be bound into the GTT for this | ||
576 | * operation. | ||
577 | */ | ||
578 | __u32 handle; | ||
579 | |||
580 | /** Number of relocations to be performed on this buffer */ | ||
581 | __u32 relocation_count; | ||
582 | /** | ||
583 | * Pointer to array of struct drm_i915_gem_relocation_entry containing | ||
584 | * the relocations to be performed in this buffer. | ||
585 | */ | ||
586 | __u64 relocs_ptr; | ||
587 | |||
588 | /** Required alignment in graphics aperture */ | ||
589 | __u64 alignment; | ||
590 | |||
591 | /** | ||
592 | * Returned value of the updated offset of the object, for future | ||
593 | * presumed_offset writes. | ||
594 | */ | ||
595 | __u64 offset; | ||
596 | |||
597 | #define EXEC_OBJECT_NEEDS_FENCE (1<<0) | ||
598 | __u64 flags; | ||
599 | __u64 rsvd1; | ||
600 | __u64 rsvd2; | ||
601 | }; | ||
602 | |||
603 | struct drm_i915_gem_execbuffer2 { | ||
604 | /** | ||
605 | * List of gem_exec_object2 structs | ||
606 | */ | ||
607 | __u64 buffers_ptr; | ||
608 | __u32 buffer_count; | ||
609 | |||
610 | /** Offset in the batchbuffer to start execution from. */ | ||
611 | __u32 batch_start_offset; | ||
612 | /** Bytes used in batchbuffer from batch_start_offset */ | ||
613 | __u32 batch_len; | ||
614 | __u32 DR1; | ||
615 | __u32 DR4; | ||
616 | __u32 num_cliprects; | ||
617 | /** This is a struct drm_clip_rect *cliprects */ | ||
618 | __u64 cliprects_ptr; | ||
619 | __u64 flags; /* currently unused */ | ||
620 | __u64 rsvd1; | ||
621 | __u64 rsvd2; | ||
622 | }; | ||
623 | |||
570 | struct drm_i915_gem_pin { | 624 | struct drm_i915_gem_pin { |
571 | /** Handle of the buffer to be pinned. */ | 625 | /** Handle of the buffer to be pinned. */ |
572 | __u32 handle; | 626 | __u32 handle; |