aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-09-09 22:36:55 -0400
committerDave Airlie <airlied@redhat.com>2013-09-09 22:36:55 -0400
commit48016851c89fcb1e9ea4daa7ace142e95f7875fe (patch)
tree11c2ce83e2d618ae4fc5915ed82d60da77cd9bfa
parent86a7e1224a68511d3a1ae0b7e11581b9d37723ae (diff)
parent6e1b4fdad5157bb9e88777d525704aba24389bee (diff)
Merge tag 'drm-intel-fixes-2013-09-06' of git://people.freedesktop.org/~danvet/drm-intel into drm-fixes
- Early stolen mem reservation from Jesse in x86 boot code. Acked by Ingo and hpa. This was ready much earlier but somehow I've thought it'd go in through x86 trees, hence why this is late. Avoids the pci resource code to plant mmiobars in the middle of stolen mem and other ugliness. - vgaarb improvements from Alex Williamson plus the fix from Ville for the vgacon->fbcon smooth transition "feature". - Render pageflips on ivb/hsw to avoid stalls due to the ring switching when only flipping on the blitter (Chris). - Deadlock fixes around our flush_workqueue which crept back in - lockdep isn't clever enough :( - Shrinker recursion fix from Chris - this is the thing that blew the vma patches from Ben I've taken out of 3.12. - Fixup for the relocation refactoring. Also an igt testcase to make sure we don't break this again. - Pile of smaller fixups all over, shortlog has full details. * tag 'drm-intel-fixes-2013-09-06' of git://people.freedesktop.org/~danvet/drm-intel: (29 commits) drm/i915: Delay disabling of VGA memory until vgacon->fbcon handoff is done drm/i915: try not to lose backlight CBLV precision drm/i915: Confine page flips to BCS on Valleyview drm/i915: Skip stolen region initialisation if none is reserved drm/i915: fix gpu hang vs. flip stall deadlocks drm/i915: Hold an object reference whilst we shrink it drm/i915: fix i9xx_crtc_clock_get for multiplied pixels drm/i915: handle sdvo input pixel multiplier correctly again drm/i915: fix hpd work vs. flush_work in the pageflip code deadlock drm/i915: fix up the relocate_entry refactoring drm/i915: Fix pipe config warnings when dealing with LVDS fixed mode drm/i915: Don't call sg_free_table() if sg_alloc_table() fails i915: Update VGA arbiter support for newer devices vgaarb: Fix VGA decodes changes vgaarb: Don't disable resources that are not owned drm/i915: Pin pages whilst mapping the dma-buf drm/i915: enable trickle feed on Haswell x86: add early quirk for reserving Intel graphics stolen memory v5 drm/i915: split PCI IDs out into i915_drm.h v4 i915_gem: Convert kmem_cache_alloc(...GFP_ZERO) to kmem_cache_zalloc ...
-rw-r--r--arch/x86/kernel/early-quirks.c154
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c11
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c164
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c41
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c23
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h34
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c36
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c83
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c14
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c14
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c99
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h6
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c17
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c9
-rw-r--r--drivers/gpu/vga/vgaarb.c51
-rw-r--r--include/drm/i915_drm.h34
-rw-r--r--include/drm/i915_pciids.h211
-rw-r--r--include/linux/vgaarb.h7
29 files changed, 774 insertions, 334 deletions
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 63bdb29b2549..b3cd3ebae077 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -12,6 +12,7 @@
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <linux/acpi.h> 13#include <linux/acpi.h>
14#include <linux/pci_ids.h> 14#include <linux/pci_ids.h>
15#include <drm/i915_drm.h>
15#include <asm/pci-direct.h> 16#include <asm/pci-direct.h>
16#include <asm/dma.h> 17#include <asm/dma.h>
17#include <asm/io_apic.h> 18#include <asm/io_apic.h>
@@ -216,6 +217,157 @@ static void __init intel_remapping_check(int num, int slot, int func)
216 217
217} 218}
218 219
220/*
221 * Systems with Intel graphics controllers set aside memory exclusively
222 * for gfx driver use. This memory is not marked in the E820 as reserved
223 * or as RAM, and so is subject to overlap from E820 manipulation later
224 * in the boot process. On some systems, MMIO space is allocated on top,
225 * despite the efforts of the "RAM buffer" approach, which simply rounds
226 * memory boundaries up to 64M to try to catch space that may decode
227 * as RAM and so is not suitable for MMIO.
228 *
229 * And yes, so far on current devices the base addr is always under 4G.
230 */
231static u32 __init intel_stolen_base(int num, int slot, int func)
232{
233 u32 base;
234
235 /*
236 * For the PCI IDs in this quirk, the stolen base is always
237 * in 0x5c, aka the BDSM register (yes that's really what
238 * it's called).
239 */
240 base = read_pci_config(num, slot, func, 0x5c);
241 base &= ~((1<<20) - 1);
242
243 return base;
244}
245
246#define KB(x) ((x) * 1024)
247#define MB(x) (KB (KB (x)))
248#define GB(x) (MB (KB (x)))
249
250static size_t __init gen3_stolen_size(int num, int slot, int func)
251{
252 size_t stolen_size;
253 u16 gmch_ctrl;
254
255 gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
256
257 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
258 case I855_GMCH_GMS_STOLEN_1M:
259 stolen_size = MB(1);
260 break;
261 case I855_GMCH_GMS_STOLEN_4M:
262 stolen_size = MB(4);
263 break;
264 case I855_GMCH_GMS_STOLEN_8M:
265 stolen_size = MB(8);
266 break;
267 case I855_GMCH_GMS_STOLEN_16M:
268 stolen_size = MB(16);
269 break;
270 case I855_GMCH_GMS_STOLEN_32M:
271 stolen_size = MB(32);
272 break;
273 case I915_GMCH_GMS_STOLEN_48M:
274 stolen_size = MB(48);
275 break;
276 case I915_GMCH_GMS_STOLEN_64M:
277 stolen_size = MB(64);
278 break;
279 case G33_GMCH_GMS_STOLEN_128M:
280 stolen_size = MB(128);
281 break;
282 case G33_GMCH_GMS_STOLEN_256M:
283 stolen_size = MB(256);
284 break;
285 case INTEL_GMCH_GMS_STOLEN_96M:
286 stolen_size = MB(96);
287 break;
288 case INTEL_GMCH_GMS_STOLEN_160M:
289 stolen_size = MB(160);
290 break;
291 case INTEL_GMCH_GMS_STOLEN_224M:
292 stolen_size = MB(224);
293 break;
294 case INTEL_GMCH_GMS_STOLEN_352M:
295 stolen_size = MB(352);
296 break;
297 default:
298 stolen_size = 0;
299 break;
300 }
301
302 return stolen_size;
303}
304
305static size_t __init gen6_stolen_size(int num, int slot, int func)
306{
307 u16 gmch_ctrl;
308
309 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
310 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
311 gmch_ctrl &= SNB_GMCH_GMS_MASK;
312
313 return gmch_ctrl << 25; /* 32 MB units */
314}
315
316typedef size_t (*stolen_size_fn)(int num, int slot, int func);
317
318static struct pci_device_id intel_stolen_ids[] __initdata = {
319 INTEL_I915G_IDS(gen3_stolen_size),
320 INTEL_I915GM_IDS(gen3_stolen_size),
321 INTEL_I945G_IDS(gen3_stolen_size),
322 INTEL_I945GM_IDS(gen3_stolen_size),
323 INTEL_VLV_M_IDS(gen3_stolen_size),
324 INTEL_VLV_D_IDS(gen3_stolen_size),
325 INTEL_PINEVIEW_IDS(gen3_stolen_size),
326 INTEL_I965G_IDS(gen3_stolen_size),
327 INTEL_G33_IDS(gen3_stolen_size),
328 INTEL_I965GM_IDS(gen3_stolen_size),
329 INTEL_GM45_IDS(gen3_stolen_size),
330 INTEL_G45_IDS(gen3_stolen_size),
331 INTEL_IRONLAKE_D_IDS(gen3_stolen_size),
332 INTEL_IRONLAKE_M_IDS(gen3_stolen_size),
333 INTEL_SNB_D_IDS(gen6_stolen_size),
334 INTEL_SNB_M_IDS(gen6_stolen_size),
335 INTEL_IVB_M_IDS(gen6_stolen_size),
336 INTEL_IVB_D_IDS(gen6_stolen_size),
337 INTEL_HSW_D_IDS(gen6_stolen_size),
338 INTEL_HSW_M_IDS(gen6_stolen_size),
339};
340
341static void __init intel_graphics_stolen(int num, int slot, int func)
342{
343 size_t size;
344 int i;
345 u32 start;
346 u16 device, subvendor, subdevice;
347
348 device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
349 subvendor = read_pci_config_16(num, slot, func,
350 PCI_SUBSYSTEM_VENDOR_ID);
351 subdevice = read_pci_config_16(num, slot, func, PCI_SUBSYSTEM_ID);
352
353 for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) {
354 if (intel_stolen_ids[i].device == device) {
355 stolen_size_fn stolen_size =
356 (stolen_size_fn)intel_stolen_ids[i].driver_data;
357 size = stolen_size(num, slot, func);
358 start = intel_stolen_base(num, slot, func);
359 if (size && start) {
360 /* Mark this space as reserved */
361 e820_add_region(start, size, E820_RESERVED);
362 sanitize_e820_map(e820.map,
363 ARRAY_SIZE(e820.map),
364 &e820.nr_map);
365 }
366 return;
367 }
368 }
369}
370
219#define QFLAG_APPLY_ONCE 0x1 371#define QFLAG_APPLY_ONCE 0x1
220#define QFLAG_APPLIED 0x2 372#define QFLAG_APPLIED 0x2
221#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) 373#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -251,6 +403,8 @@ static struct chipset early_qrk[] __initdata = {
251 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 403 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
252 { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, 404 { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
253 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 405 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
406 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
407 QFLAG_APPLY_ONCE, intel_graphics_stolen },
254 {} 408 {}
255}; 409};
256 410
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 55ab9246e1b9..a6f4cb5af185 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -857,7 +857,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
857 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 857 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
858 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 858 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
859 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 859 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
860 u32 rpstat, cagf; 860 u32 rpstat, cagf, reqf;
861 u32 rpupei, rpcurup, rpprevup; 861 u32 rpupei, rpcurup, rpprevup;
862 u32 rpdownei, rpcurdown, rpprevdown; 862 u32 rpdownei, rpcurdown, rpprevdown;
863 int max_freq; 863 int max_freq;
@@ -869,6 +869,14 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
869 869
870 gen6_gt_force_wake_get(dev_priv); 870 gen6_gt_force_wake_get(dev_priv);
871 871
872 reqf = I915_READ(GEN6_RPNSWREQ);
873 reqf &= ~GEN6_TURBO_DISABLE;
874 if (IS_HASWELL(dev))
875 reqf >>= 24;
876 else
877 reqf >>= 25;
878 reqf *= GT_FREQUENCY_MULTIPLIER;
879
872 rpstat = I915_READ(GEN6_RPSTAT1); 880 rpstat = I915_READ(GEN6_RPSTAT1);
873 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 881 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
874 rpcurup = I915_READ(GEN6_RP_CUR_UP); 882 rpcurup = I915_READ(GEN6_RP_CUR_UP);
@@ -893,6 +901,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
893 gt_perf_status & 0xff); 901 gt_perf_status & 0xff);
894 seq_printf(m, "Render p-state limit: %d\n", 902 seq_printf(m, "Render p-state limit: %d\n",
895 rp_state_limits & 0xff); 903 rp_state_limits & 0xff);
904 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
896 seq_printf(m, "CAGF: %dMHz\n", cagf); 905 seq_printf(m, "CAGF: %dMHz\n", cagf);
897 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 906 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
898 GEN6_CURICONT_MASK); 907 GEN6_CURICONT_MASK);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index fdaa0915ce56..9b265a4c6a3d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1290,9 +1290,12 @@ static int i915_load_modeset_init(struct drm_device *dev)
1290 * then we do not take part in VGA arbitration and the 1290 * then we do not take part in VGA arbitration and the
1291 * vga_client_register() fails with -ENODEV. 1291 * vga_client_register() fails with -ENODEV.
1292 */ 1292 */
1293 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 1293 if (!HAS_PCH_SPLIT(dev)) {
1294 if (ret && ret != -ENODEV) 1294 ret = vga_client_register(dev->pdev, dev, NULL,
1295 goto out; 1295 i915_vga_set_decode);
1296 if (ret && ret != -ENODEV)
1297 goto out;
1298 }
1296 1299
1297 intel_register_dsm_handler(); 1300 intel_register_dsm_handler();
1298 1301
@@ -1348,6 +1351,12 @@ static int i915_load_modeset_init(struct drm_device *dev)
1348 */ 1351 */
1349 intel_fbdev_initial_config(dev); 1352 intel_fbdev_initial_config(dev);
1350 1353
1354 /*
1355 * Must do this after fbcon init so that
1356 * vgacon_save_screen() works during the handover.
1357 */
1358 i915_disable_vga_mem(dev);
1359
1351 /* Only enable hotplug handling once the fbdev is fully set up. */ 1360 /* Only enable hotplug handling once the fbdev is fully set up. */
1352 dev_priv->enable_hotplug_processing = true; 1361 dev_priv->enable_hotplug_processing = true;
1353 1362
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ccb28ead3501..69d8ed5416c3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -157,25 +157,6 @@ MODULE_PARM_DESC(prefault_disable,
157static struct drm_driver driver; 157static struct drm_driver driver;
158extern int intel_agp_enabled; 158extern int intel_agp_enabled;
159 159
160#define INTEL_VGA_DEVICE(id, info) { \
161 .class = PCI_BASE_CLASS_DISPLAY << 16, \
162 .class_mask = 0xff0000, \
163 .vendor = 0x8086, \
164 .device = id, \
165 .subvendor = PCI_ANY_ID, \
166 .subdevice = PCI_ANY_ID, \
167 .driver_data = (unsigned long) info }
168
169#define INTEL_QUANTA_VGA_DEVICE(info) { \
170 .class = PCI_BASE_CLASS_DISPLAY << 16, \
171 .class_mask = 0xff0000, \
172 .vendor = 0x8086, \
173 .device = 0x16a, \
174 .subvendor = 0x152d, \
175 .subdevice = 0x8990, \
176 .driver_data = (unsigned long) info }
177
178
179static const struct intel_device_info intel_i830_info = { 160static const struct intel_device_info intel_i830_info = {
180 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, 161 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
181 .has_overlay = 1, .overlay_needs_physical = 1, 162 .has_overlay = 1, .overlay_needs_physical = 1,
@@ -350,118 +331,41 @@ static const struct intel_device_info intel_haswell_m_info = {
350 .has_vebox_ring = 1, 331 .has_vebox_ring = 1,
351}; 332};
352 333
334/*
335 * Make sure any device matches here are from most specific to most
336 * general. For example, since the Quanta match is based on the subsystem
337 * and subvendor IDs, we need it to come before the more general IVB
338 * PCI ID matches, otherwise we'll use the wrong info struct above.
339 */
340#define INTEL_PCI_IDS \
341 INTEL_I830_IDS(&intel_i830_info), \
342 INTEL_I845G_IDS(&intel_845g_info), \
343 INTEL_I85X_IDS(&intel_i85x_info), \
344 INTEL_I865G_IDS(&intel_i865g_info), \
345 INTEL_I915G_IDS(&intel_i915g_info), \
346 INTEL_I915GM_IDS(&intel_i915gm_info), \
347 INTEL_I945G_IDS(&intel_i945g_info), \
348 INTEL_I945GM_IDS(&intel_i945gm_info), \
349 INTEL_I965G_IDS(&intel_i965g_info), \
350 INTEL_G33_IDS(&intel_g33_info), \
351 INTEL_I965GM_IDS(&intel_i965gm_info), \
352 INTEL_GM45_IDS(&intel_gm45_info), \
353 INTEL_G45_IDS(&intel_g45_info), \
354 INTEL_PINEVIEW_IDS(&intel_pineview_info), \
355 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \
356 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \
357 INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \
358 INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \
359 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
360 INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \
361 INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \
362 INTEL_HSW_D_IDS(&intel_haswell_d_info), \
363 INTEL_HSW_M_IDS(&intel_haswell_m_info), \
364 INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
365 INTEL_VLV_D_IDS(&intel_valleyview_d_info)
366
353static const struct pci_device_id pciidlist[] = { /* aka */ 367static const struct pci_device_id pciidlist[] = { /* aka */
354 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ 368 INTEL_PCI_IDS,
355 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */
356 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */
357 INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
358 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */
359 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */
360 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */
361 INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */
362 INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */
363 INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */
364 INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */
365 INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */
366 INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */
367 INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */
368 INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */
369 INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */
370 INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */
371 INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */
372 INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */
373 INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */
374 INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */
375 INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */
376 INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */
377 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
378 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
379 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
380 INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
381 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
382 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
383 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
384 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
385 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
386 INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
387 INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
388 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
389 INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
390 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
391 INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
392 INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
393 INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
394 INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
395 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
396 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
397 INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */
398 INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
399 INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
400 INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
401 INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
402 INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
403 INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
404 INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
405 INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
406 INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
407 INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
408 INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
409 INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
410 INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
411 INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
412 INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
413 INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
414 INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
415 INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
416 INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
417 INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
418 INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
419 INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
420 INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
421 INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
422 INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
423 INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
424 INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
425 INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
426 INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
427 INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
428 INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
429 INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
430 INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
431 INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
432 INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
433 INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
434 INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
435 INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
436 INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
437 INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
438 INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
439 INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
440 INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
441 INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
442 INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
443 INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
444 INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
445 INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
446 INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
447 INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
448 INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
449 INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
450 INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
451 INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
452 INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
453 INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
454 INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
455 INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
456 INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
457 INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
458 INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
459 INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
460 INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
461 INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
462 INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info),
463 INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
464 INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
465 {0, 0, 0} 369 {0, 0, 0}
466}; 370};
467 371
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 52a3785a3fdf..35874b3a86dc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1236,6 +1236,13 @@ typedef struct drm_i915_private {
1236 1236
1237 unsigned int fsb_freq, mem_freq, is_ddr3; 1237 unsigned int fsb_freq, mem_freq, is_ddr3;
1238 1238
1239 /**
1240 * wq - Driver workqueue for GEM.
1241 *
1242 * NOTE: Work items scheduled here are not allowed to grab any modeset
1243 * locks, for otherwise the flushing done in the pageflip code will
1244 * result in deadlocks.
1245 */
1239 struct workqueue_struct *wq; 1246 struct workqueue_struct *wq;
1240 1247
1241 /* Display functions */ 1248 /* Display functions */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2d1cb10d846f..d9e337feef14 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -212,7 +212,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
212void *i915_gem_object_alloc(struct drm_device *dev) 212void *i915_gem_object_alloc(struct drm_device *dev)
213{ 213{
214 struct drm_i915_private *dev_priv = dev->dev_private; 214 struct drm_i915_private *dev_priv = dev->dev_private;
215 return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO); 215 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
216} 216}
217 217
218void i915_gem_object_free(struct drm_i915_gem_object *obj) 218void i915_gem_object_free(struct drm_i915_gem_object *obj)
@@ -1695,6 +1695,7 @@ static long
1695__i915_gem_shrink(struct drm_i915_private *dev_priv, long target, 1695__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1696 bool purgeable_only) 1696 bool purgeable_only)
1697{ 1697{
1698 struct list_head still_bound_list;
1698 struct drm_i915_gem_object *obj, *next; 1699 struct drm_i915_gem_object *obj, *next;
1699 long count = 0; 1700 long count = 0;
1700 1701
@@ -1709,23 +1710,55 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1709 } 1710 }
1710 } 1711 }
1711 1712
1712 list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list, 1713 /*
1713 global_list) { 1714 * As we may completely rewrite the bound list whilst unbinding
1715 * (due to retiring requests) we have to strictly process only
1716 * one element of the list at the time, and recheck the list
1717 * on every iteration.
1718 */
1719 INIT_LIST_HEAD(&still_bound_list);
1720 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
1714 struct i915_vma *vma, *v; 1721 struct i915_vma *vma, *v;
1715 1722
1723 obj = list_first_entry(&dev_priv->mm.bound_list,
1724 typeof(*obj), global_list);
1725 list_move_tail(&obj->global_list, &still_bound_list);
1726
1716 if (!i915_gem_object_is_purgeable(obj) && purgeable_only) 1727 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1717 continue; 1728 continue;
1718 1729
1730 /*
1731 * Hold a reference whilst we unbind this object, as we may
1732 * end up waiting for and retiring requests. This might
1733 * release the final reference (held by the active list)
1734 * and result in the object being freed from under us.
1735 * in this object being freed.
1736 *
1737 * Note 1: Shrinking the bound list is special since only active
1738 * (and hence bound objects) can contain such limbo objects, so
1739 * we don't need special tricks for shrinking the unbound list.
1740 * The only other place where we have to be careful with active
1741 * objects suddenly disappearing due to retiring requests is the
1742 * eviction code.
1743 *
1744 * Note 2: Even though the bound list doesn't hold a reference
1745 * to the object we can safely grab one here: The final object
1746 * unreferencing and the bound_list are both protected by the
1747 * dev->struct_mutex and so we won't ever be able to observe an
1748 * object on the bound_list with a reference count equals 0.
1749 */
1750 drm_gem_object_reference(&obj->base);
1751
1719 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) 1752 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1720 if (i915_vma_unbind(vma)) 1753 if (i915_vma_unbind(vma))
1721 break; 1754 break;
1722 1755
1723 if (!i915_gem_object_put_pages(obj)) { 1756 if (i915_gem_object_put_pages(obj) == 0)
1724 count += obj->base.size >> PAGE_SHIFT; 1757 count += obj->base.size >> PAGE_SHIFT;
1725 if (count >= target) 1758
1726 return count; 1759 drm_gem_object_unreference(&obj->base);
1727 }
1728 } 1760 }
1761 list_splice(&still_bound_list, &dev_priv->mm.bound_list);
1729 1762
1730 return count; 1763 return count;
1731} 1764}
@@ -1774,7 +1807,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1774 1807
1775 page_count = obj->base.size / PAGE_SIZE; 1808 page_count = obj->base.size / PAGE_SIZE;
1776 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { 1809 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1777 sg_free_table(st);
1778 kfree(st); 1810 kfree(st);
1779 return -ENOMEM; 1811 return -ENOMEM;
1780 } 1812 }
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index e918b05fcbdd..7d5752fda5f1 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -42,27 +42,24 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
42 42
43 ret = i915_mutex_lock_interruptible(obj->base.dev); 43 ret = i915_mutex_lock_interruptible(obj->base.dev);
44 if (ret) 44 if (ret)
45 return ERR_PTR(ret); 45 goto err;
46 46
47 ret = i915_gem_object_get_pages(obj); 47 ret = i915_gem_object_get_pages(obj);
48 if (ret) { 48 if (ret)
49 st = ERR_PTR(ret); 49 goto err_unlock;
50 goto out; 50
51 } 51 i915_gem_object_pin_pages(obj);
52 52
53 /* Copy sg so that we make an independent mapping */ 53 /* Copy sg so that we make an independent mapping */
54 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 54 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
55 if (st == NULL) { 55 if (st == NULL) {
56 st = ERR_PTR(-ENOMEM); 56 ret = -ENOMEM;
57 goto out; 57 goto err_unpin;
58 } 58 }
59 59
60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); 60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
61 if (ret) { 61 if (ret)
62 kfree(st); 62 goto err_free;
63 st = ERR_PTR(ret);
64 goto out;
65 }
66 63
67 src = obj->pages->sgl; 64 src = obj->pages->sgl;
68 dst = st->sgl; 65 dst = st->sgl;
@@ -73,17 +70,23 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
73 } 70 }
74 71
75 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { 72 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
76 sg_free_table(st); 73 ret =-ENOMEM;
77 kfree(st); 74 goto err_free_sg;
78 st = ERR_PTR(-ENOMEM);
79 goto out;
80 } 75 }
81 76
82 i915_gem_object_pin_pages(obj);
83
84out:
85 mutex_unlock(&obj->base.dev->struct_mutex); 77 mutex_unlock(&obj->base.dev->struct_mutex);
86 return st; 78 return st;
79
80err_free_sg:
81 sg_free_table(st);
82err_free:
83 kfree(st);
84err_unpin:
85 i915_gem_object_unpin_pages(obj);
86err_unlock:
87 mutex_unlock(&obj->base.dev->struct_mutex);
88err:
89 return ERR_PTR(ret);
87} 90}
88 91
89static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 92static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 792c52a235ee..bf345777ae9f 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -310,6 +310,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
310 else 310 else
311 ret = relocate_entry_gtt(obj, reloc); 311 ret = relocate_entry_gtt(obj, reloc);
312 312
313 if (ret)
314 return ret;
315
313 /* and update the user's relocation entry */ 316 /* and update the user's relocation entry */
314 reloc->presumed_offset = target_offset; 317 reloc->presumed_offset = target_offset;
315 318
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 9969d10b80f5..e15a1d90037d 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -201,6 +201,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
201 struct drm_i915_private *dev_priv = dev->dev_private; 201 struct drm_i915_private *dev_priv = dev->dev_private;
202 int bios_reserved = 0; 202 int bios_reserved = 0;
203 203
204 if (dev_priv->gtt.stolen_size == 0)
205 return 0;
206
204 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); 207 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
205 if (dev_priv->mm.stolen_base == 0) 208 if (dev_priv->mm.stolen_base == 0)
206 return 0; 209 return 0;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 558e568d5b45..aba9d7498996 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -641,7 +641,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
641 if (WARN_ON(ring->id != RCS)) 641 if (WARN_ON(ring->id != RCS))
642 return NULL; 642 return NULL;
643 643
644 obj = ring->private; 644 obj = ring->scratch.obj;
645 if (acthd >= i915_gem_obj_ggtt_offset(obj) && 645 if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
646 acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) 646 acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
647 return i915_error_object_create(dev_priv, obj); 647 return i915_error_object_create(dev_priv, obj);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a03b445ceb5f..83cce0cdb769 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1027,8 +1027,13 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
1027 dev_priv->display.hpd_irq_setup(dev); 1027 dev_priv->display.hpd_irq_setup(dev);
1028 spin_unlock(&dev_priv->irq_lock); 1028 spin_unlock(&dev_priv->irq_lock);
1029 1029
1030 queue_work(dev_priv->wq, 1030 /*
1031 &dev_priv->hotplug_work); 1031 * Our hotplug handler can grab modeset locks (by calling down into the
1032 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1033 * queue for otherwise the flush_work in the pageflip code will
1034 * deadlock.
1035 */
1036 schedule_work(&dev_priv->hotplug_work);
1032} 1037}
1033 1038
1034static void gmbus_irq_handler(struct drm_device *dev) 1039static void gmbus_irq_handler(struct drm_device *dev)
@@ -1655,7 +1660,13 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
1655 wake_up_all(&ring->irq_queue); 1660 wake_up_all(&ring->irq_queue);
1656 } 1661 }
1657 1662
1658 queue_work(dev_priv->wq, &dev_priv->gpu_error.work); 1663 /*
1664 * Our reset work can grab modeset locks (since it needs to reset the
1665 * state of outstanding pagelips). Hence it must not be run on our own
1666 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
1667 * code will deadlock.
1668 */
1669 schedule_work(&dev_priv->gpu_error.work);
1659} 1670}
1660 1671
1661static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 1672static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
@@ -2027,9 +2038,9 @@ static void i915_hangcheck_elapsed(unsigned long data)
2027 2038
2028 for_each_ring(ring, dev_priv, i) { 2039 for_each_ring(ring, dev_priv, i) {
2029 if (ring->hangcheck.score > FIRE) { 2040 if (ring->hangcheck.score > FIRE) {
2030 DRM_ERROR("%s on %s\n", 2041 DRM_INFO("%s on %s\n",
2031 stuck[i] ? "stuck" : "no progress", 2042 stuck[i] ? "stuck" : "no progress",
2032 ring->name); 2043 ring->name);
2033 rings_hung++; 2044 rings_hung++;
2034 } 2045 }
2035 } 2046 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b6a58f720f9a..c159e1a6810f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -33,21 +33,6 @@
33#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) 33#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
34#define _MASKED_BIT_DISABLE(a) ((a) << 16) 34#define _MASKED_BIT_DISABLE(a) ((a) << 16)
35 35
36/*
37 * The Bridge device's PCI config space has information about the
38 * fb aperture size and the amount of pre-reserved memory.
39 * This is all handled in the intel-gtt.ko module. i915.ko only
40 * cares about the vga bit for the vga rbiter.
41 */
42#define INTEL_GMCH_CTRL 0x52
43#define INTEL_GMCH_VGA_DISABLE (1 << 1)
44#define SNB_GMCH_CTRL 0x50
45#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
46#define SNB_GMCH_GGMS_MASK 0x3
47#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
48#define SNB_GMCH_GMS_MASK 0x1f
49
50
51/* PCI config space */ 36/* PCI config space */
52 37
53#define HPLLCC 0xc0 /* 855 only */ 38#define HPLLCC 0xc0 /* 855 only */
@@ -245,6 +230,7 @@
245 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! 230 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
246 */ 231 */
247#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) 232#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
233#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
248#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ 234#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
249#define MI_FLUSH_DW_STORE_INDEX (1<<21) 235#define MI_FLUSH_DW_STORE_INDEX (1<<21)
250#define MI_INVALIDATE_TLB (1<<18) 236#define MI_INVALIDATE_TLB (1<<18)
@@ -693,6 +679,23 @@
693#define FPGA_DBG_RM_NOCLAIM (1<<31) 679#define FPGA_DBG_RM_NOCLAIM (1<<31)
694 680
695#define DERRMR 0x44050 681#define DERRMR 0x44050
682#define DERRMR_PIPEA_SCANLINE (1<<0)
683#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1)
684#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2)
685#define DERRMR_PIPEA_VBLANK (1<<3)
686#define DERRMR_PIPEA_HBLANK (1<<5)
687#define DERRMR_PIPEB_SCANLINE (1<<8)
688#define DERRMR_PIPEB_PRI_FLIP_DONE (1<<9)
689#define DERRMR_PIPEB_SPR_FLIP_DONE (1<<10)
690#define DERRMR_PIPEB_VBLANK (1<<11)
691#define DERRMR_PIPEB_HBLANK (1<<13)
692/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
693#define DERRMR_PIPEC_SCANLINE (1<<14)
694#define DERRMR_PIPEC_PRI_FLIP_DONE (1<<15)
695#define DERRMR_PIPEC_SPR_FLIP_DONE (1<<20)
696#define DERRMR_PIPEC_VBLANK (1<<21)
697#define DERRMR_PIPEC_HBLANK (1<<22)
698
696 699
697/* GM45+ chicken bits -- debug workaround bits that may be required 700/* GM45+ chicken bits -- debug workaround bits that may be required
698 * for various sorts of correct behavior. The top 16 bits of each are 701 * for various sorts of correct behavior. The top 16 bits of each are
@@ -3310,6 +3313,7 @@
3310#define MCURSOR_PIPE_A 0x00 3313#define MCURSOR_PIPE_A 0x00
3311#define MCURSOR_PIPE_B (1 << 28) 3314#define MCURSOR_PIPE_B (1 << 28)
3312#define MCURSOR_GAMMA_ENABLE (1 << 26) 3315#define MCURSOR_GAMMA_ENABLE (1 << 26)
3316#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
3313#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084) 3317#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084)
3314#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088) 3318#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088)
3315#define CURSOR_POS_MASK 0x007FF 3319#define CURSOR_POS_MASK 0x007FF
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index a777e7f3b0df..c8c4112de110 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -224,6 +224,18 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
224 return snprintf(buf, PAGE_SIZE, "%d\n", ret); 224 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
225} 225}
226 226
227static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
228 struct device_attribute *attr, char *buf)
229{
230 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
231 struct drm_device *dev = minor->dev;
232 struct drm_i915_private *dev_priv = dev->dev_private;
233
234 return snprintf(buf, PAGE_SIZE, "%d\n",
235 vlv_gpu_freq(dev_priv->mem_freq,
236 dev_priv->rps.rpe_delay));
237}
238
227static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 239static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
228{ 240{
229 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 241 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
@@ -366,6 +378,7 @@ static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
366static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); 378static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
367static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); 379static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
368 380
381static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
369 382
370static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf); 383static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
371static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); 384static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
@@ -409,6 +422,14 @@ static const struct attribute *gen6_attrs[] = {
409 NULL, 422 NULL,
410}; 423};
411 424
425static const struct attribute *vlv_attrs[] = {
426 &dev_attr_gt_cur_freq_mhz.attr,
427 &dev_attr_gt_max_freq_mhz.attr,
428 &dev_attr_gt_min_freq_mhz.attr,
429 &dev_attr_vlv_rpe_freq_mhz.attr,
430 NULL,
431};
432
412static ssize_t error_state_read(struct file *filp, struct kobject *kobj, 433static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
413 struct bin_attribute *attr, char *buf, 434 struct bin_attribute *attr, char *buf,
414 loff_t off, size_t count) 435 loff_t off, size_t count)
@@ -492,11 +513,13 @@ void i915_setup_sysfs(struct drm_device *dev)
492 DRM_ERROR("l3 parity sysfs setup failed\n"); 513 DRM_ERROR("l3 parity sysfs setup failed\n");
493 } 514 }
494 515
495 if (INTEL_INFO(dev)->gen >= 6) { 516 ret = 0;
517 if (IS_VALLEYVIEW(dev))
518 ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs);
519 else if (INTEL_INFO(dev)->gen >= 6)
496 ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs); 520 ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
497 if (ret) 521 if (ret)
498 DRM_ERROR("gen6 sysfs setup failed\n"); 522 DRM_ERROR("RPS sysfs setup failed\n");
499 }
500 523
501 ret = sysfs_create_bin_file(&dev->primary->kdev.kobj, 524 ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
502 &error_state_attr); 525 &error_state_attr);
@@ -507,7 +530,10 @@ void i915_setup_sysfs(struct drm_device *dev)
507void i915_teardown_sysfs(struct drm_device *dev) 530void i915_teardown_sysfs(struct drm_device *dev)
508{ 531{
509 sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr); 532 sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
510 sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); 533 if (IS_VALLEYVIEW(dev))
534 sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
535 else
536 sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
511 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); 537 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
512#ifdef CONFIG_PM 538#ifdef CONFIG_PM
513 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); 539 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index b5a3875f22c7..ea9022ef15d5 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -688,7 +688,7 @@ static void intel_crt_reset(struct drm_connector *connector)
688 struct drm_i915_private *dev_priv = dev->dev_private; 688 struct drm_i915_private *dev_priv = dev->dev_private;
689 struct intel_crt *crt = intel_attached_crt(connector); 689 struct intel_crt *crt = intel_attached_crt(connector);
690 690
691 if (HAS_PCH_SPLIT(dev)) { 691 if (INTEL_INFO(dev)->gen >= 5) {
692 u32 adpa; 692 u32 adpa;
693 693
694 adpa = I915_READ(crt->adpa_reg); 694 adpa = I915_READ(crt->adpa_reg);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 38452d82ac7d..2489d0b4c7d2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2077,8 +2077,10 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2077 else 2077 else
2078 dspcntr &= ~DISPPLANE_TILED; 2078 dspcntr &= ~DISPPLANE_TILED;
2079 2079
2080 /* must disable */ 2080 if (IS_HASWELL(dev))
2081 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2081 dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
2082 else
2083 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2082 2084
2083 I915_WRITE(reg, dspcntr); 2085 I915_WRITE(reg, dspcntr);
2084 2086
@@ -6762,8 +6764,10 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
6762 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 6764 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6763 cntl |= CURSOR_MODE_DISABLE; 6765 cntl |= CURSOR_MODE_DISABLE;
6764 } 6766 }
6765 if (IS_HASWELL(dev)) 6767 if (IS_HASWELL(dev)) {
6766 cntl |= CURSOR_PIPE_CSC_ENABLE; 6768 cntl |= CURSOR_PIPE_CSC_ENABLE;
6769 cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
6770 }
6767 I915_WRITE(CURCNTR_IVB(pipe), cntl); 6771 I915_WRITE(CURCNTR_IVB(pipe), cntl);
6768 6772
6769 intel_crtc->cursor_visible = visible; 6773 intel_crtc->cursor_visible = visible;
@@ -7309,8 +7313,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7309 } 7313 }
7310 } 7314 }
7311 7315
7312 pipe_config->adjusted_mode.clock = clock.dot * 7316 pipe_config->adjusted_mode.clock = clock.dot;
7313 pipe_config->pixel_multiplier;
7314} 7317}
7315 7318
7316static void ironlake_crtc_clock_get(struct intel_crtc *crtc, 7319static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
@@ -7828,12 +7831,6 @@ err:
7828 return ret; 7831 return ret;
7829} 7832}
7830 7833
7831/*
7832 * On gen7 we currently use the blit ring because (in early silicon at least)
7833 * the render ring doesn't give us interrpts for page flip completion, which
7834 * means clients will hang after the first flip is queued. Fortunately the
7835 * blit ring generates interrupts properly, so use it instead.
7836 */
7837static int intel_gen7_queue_flip(struct drm_device *dev, 7834static int intel_gen7_queue_flip(struct drm_device *dev,
7838 struct drm_crtc *crtc, 7835 struct drm_crtc *crtc,
7839 struct drm_framebuffer *fb, 7836 struct drm_framebuffer *fb,
@@ -7842,9 +7839,13 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
7842{ 7839{
7843 struct drm_i915_private *dev_priv = dev->dev_private; 7840 struct drm_i915_private *dev_priv = dev->dev_private;
7844 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7841 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7845 struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; 7842 struct intel_ring_buffer *ring;
7846 uint32_t plane_bit = 0; 7843 uint32_t plane_bit = 0;
7847 int ret; 7844 int len, ret;
7845
7846 ring = obj->ring;
7847 if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
7848 ring = &dev_priv->ring[BCS];
7848 7849
7849 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 7850 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7850 if (ret) 7851 if (ret)
@@ -7866,10 +7867,34 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
7866 goto err_unpin; 7867 goto err_unpin;
7867 } 7868 }
7868 7869
7869 ret = intel_ring_begin(ring, 4); 7870 len = 4;
7871 if (ring->id == RCS)
7872 len += 6;
7873
7874 ret = intel_ring_begin(ring, len);
7870 if (ret) 7875 if (ret)
7871 goto err_unpin; 7876 goto err_unpin;
7872 7877
7878 /* Unmask the flip-done completion message. Note that the bspec says that
7879 * we should do this for both the BCS and RCS, and that we must not unmask
7880 * more than one flip event at any time (or ensure that one flip message
7881 * can be sent by waiting for flip-done prior to queueing new flips).
7882 * Experimentation says that BCS works despite DERRMR masking all
7883 * flip-done completion events and that unmasking all planes at once
7884 * for the RCS also doesn't appear to drop events. Setting the DERRMR
7885 * to zero does lead to lockups within MI_DISPLAY_FLIP.
7886 */
7887 if (ring->id == RCS) {
7888 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
7889 intel_ring_emit(ring, DERRMR);
7890 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
7891 DERRMR_PIPEB_PRI_FLIP_DONE |
7892 DERRMR_PIPEC_PRI_FLIP_DONE));
7893 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1));
7894 intel_ring_emit(ring, DERRMR);
7895 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
7896 }
7897
7873 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 7898 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
7874 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 7899 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7875 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 7900 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
@@ -10022,6 +10047,33 @@ static void i915_disable_vga(struct drm_device *dev)
10022 POSTING_READ(vga_reg); 10047 POSTING_READ(vga_reg);
10023} 10048}
10024 10049
10050static void i915_enable_vga_mem(struct drm_device *dev)
10051{
10052 /* Enable VGA memory on Intel HD */
10053 if (HAS_PCH_SPLIT(dev)) {
10054 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
10055 outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE);
10056 vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
10057 VGA_RSRC_LEGACY_MEM |
10058 VGA_RSRC_NORMAL_IO |
10059 VGA_RSRC_NORMAL_MEM);
10060 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
10061 }
10062}
10063
10064void i915_disable_vga_mem(struct drm_device *dev)
10065{
10066 /* Disable VGA memory on Intel HD */
10067 if (HAS_PCH_SPLIT(dev)) {
10068 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
10069 outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE);
10070 vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
10071 VGA_RSRC_NORMAL_IO |
10072 VGA_RSRC_NORMAL_MEM);
10073 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
10074 }
10075}
10076
10025void intel_modeset_init_hw(struct drm_device *dev) 10077void intel_modeset_init_hw(struct drm_device *dev)
10026{ 10078{
10027 intel_init_power_well(dev); 10079 intel_init_power_well(dev);
@@ -10300,6 +10352,7 @@ void i915_redisable_vga(struct drm_device *dev)
10300 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { 10352 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
10301 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 10353 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
10302 i915_disable_vga(dev); 10354 i915_disable_vga(dev);
10355 i915_disable_vga_mem(dev);
10303 } 10356 }
10304} 10357}
10305 10358
@@ -10513,6 +10566,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
10513 10566
10514 intel_disable_fbc(dev); 10567 intel_disable_fbc(dev);
10515 10568
10569 i915_enable_vga_mem(dev);
10570
10516 intel_disable_gt_powersave(dev); 10571 intel_disable_gt_powersave(dev);
10517 10572
10518 ironlake_teardown_rc6(dev); 10573 ironlake_teardown_rc6(dev);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 176080822a74..a47799e832c6 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -551,7 +551,7 @@ extern int intel_panel_init(struct intel_panel *panel,
551 struct drm_display_mode *fixed_mode); 551 struct drm_display_mode *fixed_mode);
552extern void intel_panel_fini(struct intel_panel *panel); 552extern void intel_panel_fini(struct intel_panel *panel);
553 553
554extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 554extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
555 struct drm_display_mode *adjusted_mode); 555 struct drm_display_mode *adjusted_mode);
556extern void intel_pch_panel_fitting(struct intel_crtc *crtc, 556extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
557 struct intel_crtc_config *pipe_config, 557 struct intel_crtc_config *pipe_config,
@@ -792,5 +792,6 @@ extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
792extern void hsw_pc8_restore_interrupts(struct drm_device *dev); 792extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
793extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); 793extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
794extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); 794extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
795extern void i915_disable_vga_mem(struct drm_device *dev);
795 796
796#endif /* __INTEL_DRV_H__ */ 797#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 4d33278e31fb..831a5c021c4b 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -128,8 +128,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
128 struct drm_device *dev = encoder->base.dev; 128 struct drm_device *dev = encoder->base.dev;
129 struct drm_i915_private *dev_priv = dev->dev_private; 129 struct drm_i915_private *dev_priv = dev->dev_private;
130 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 130 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
131 struct drm_display_mode *fixed_mode = 131 const struct drm_display_mode *adjusted_mode =
132 lvds_encoder->attached_connector->base.panel.fixed_mode; 132 &crtc->config.adjusted_mode;
133 int pipe = crtc->pipe; 133 int pipe = crtc->pipe;
134 u32 temp; 134 u32 temp;
135 135
@@ -183,9 +183,9 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
183 temp &= ~LVDS_ENABLE_DITHER; 183 temp &= ~LVDS_ENABLE_DITHER;
184 } 184 }
185 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 185 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
186 if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC) 186 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
187 temp |= LVDS_HSYNC_POLARITY; 187 temp |= LVDS_HSYNC_POLARITY;
188 if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC) 188 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
189 temp |= LVDS_VSYNC_POLARITY; 189 temp |= LVDS_VSYNC_POLARITY;
190 190
191 I915_WRITE(lvds_encoder->reg, temp); 191 I915_WRITE(lvds_encoder->reg, temp);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index cfb8fb68f09c..119771ff46ab 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -173,7 +173,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
173 return ASLE_BACKLIGHT_FAILED; 173 return ASLE_BACKLIGHT_FAILED;
174 174
175 intel_panel_set_backlight(dev, bclp, 255); 175 intel_panel_set_backlight(dev, bclp, 255);
176 iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv); 176 iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
177 177
178 return 0; 178 return 0;
179} 179}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a43c33bc4a35..42114ecbae0e 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -36,20 +36,12 @@
36#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ 36#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
37 37
38void 38void
39intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 39intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
40 struct drm_display_mode *adjusted_mode) 40 struct drm_display_mode *adjusted_mode)
41{ 41{
42 adjusted_mode->hdisplay = fixed_mode->hdisplay; 42 drm_mode_copy(adjusted_mode, fixed_mode);
43 adjusted_mode->hsync_start = fixed_mode->hsync_start;
44 adjusted_mode->hsync_end = fixed_mode->hsync_end;
45 adjusted_mode->htotal = fixed_mode->htotal;
46 43
47 adjusted_mode->vdisplay = fixed_mode->vdisplay; 44 drm_mode_set_crtcinfo(adjusted_mode, 0);
48 adjusted_mode->vsync_start = fixed_mode->vsync_start;
49 adjusted_mode->vsync_end = fixed_mode->vsync_end;
50 adjusted_mode->vtotal = fixed_mode->vtotal;
51
52 adjusted_mode->clock = fixed_mode->clock;
53} 45}
54 46
55/* adjusted_mode has been preset to be the panel's fixed mode */ 47/* adjusted_mode has been preset to be the panel's fixed mode */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 46056820d1d2..0c115cc4899f 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3447,14 +3447,24 @@ int intel_enable_rc6(const struct drm_device *dev)
3447static void gen6_enable_rps_interrupts(struct drm_device *dev) 3447static void gen6_enable_rps_interrupts(struct drm_device *dev)
3448{ 3448{
3449 struct drm_i915_private *dev_priv = dev->dev_private; 3449 struct drm_i915_private *dev_priv = dev->dev_private;
3450 u32 enabled_intrs;
3450 3451
3451 spin_lock_irq(&dev_priv->irq_lock); 3452 spin_lock_irq(&dev_priv->irq_lock);
3452 WARN_ON(dev_priv->rps.pm_iir); 3453 WARN_ON(dev_priv->rps.pm_iir);
3453 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 3454 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
3454 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); 3455 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3455 spin_unlock_irq(&dev_priv->irq_lock); 3456 spin_unlock_irq(&dev_priv->irq_lock);
3457
3456 /* only unmask PM interrupts we need. Mask all others. */ 3458 /* only unmask PM interrupts we need. Mask all others. */
3457 I915_WRITE(GEN6_PMINTRMSK, ~GEN6_PM_RPS_EVENTS); 3459 enabled_intrs = GEN6_PM_RPS_EVENTS;
3460
3461 /* IVB and SNB hard hangs on looping batchbuffer
3462 * if GEN6_PM_UP_EI_EXPIRED is masked.
3463 */
3464 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
3465 enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED;
3466
3467 I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
3458} 3468}
3459 3469
3460static void gen6_enable_rps(struct drm_device *dev) 3470static void gen6_enable_rps(struct drm_device *dev)
@@ -4950,8 +4960,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
4950 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 4960 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
4951 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 4961 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
4952 4962
4953 g4x_disable_trickle_feed(dev);
4954
4955 /* WaVSRefCountFullforceMissDisable:hsw */ 4963 /* WaVSRefCountFullforceMissDisable:hsw */
4956 gen7_setup_fixed_func_scheduler(dev_priv); 4964 gen7_setup_fixed_func_scheduler(dev_priv);
4957 4965
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f05cceac5a52..460ee1026fca 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -33,16 +33,6 @@
33#include "i915_trace.h" 33#include "i915_trace.h"
34#include "intel_drv.h" 34#include "intel_drv.h"
35 35
36/*
37 * 965+ support PIPE_CONTROL commands, which provide finer grained control
38 * over cache flushing.
39 */
40struct pipe_control {
41 struct drm_i915_gem_object *obj;
42 volatile u32 *cpu_page;
43 u32 gtt_offset;
44};
45
46static inline int ring_space(struct intel_ring_buffer *ring) 36static inline int ring_space(struct intel_ring_buffer *ring)
47{ 37{
48 int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); 38 int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
@@ -175,8 +165,7 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring,
175static int 165static int
176intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) 166intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
177{ 167{
178 struct pipe_control *pc = ring->private; 168 u32 scratch_addr = ring->scratch.gtt_offset + 128;
179 u32 scratch_addr = pc->gtt_offset + 128;
180 int ret; 169 int ret;
181 170
182 171
@@ -213,8 +202,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
213 u32 invalidate_domains, u32 flush_domains) 202 u32 invalidate_domains, u32 flush_domains)
214{ 203{
215 u32 flags = 0; 204 u32 flags = 0;
216 struct pipe_control *pc = ring->private; 205 u32 scratch_addr = ring->scratch.gtt_offset + 128;
217 u32 scratch_addr = pc->gtt_offset + 128;
218 int ret; 206 int ret;
219 207
220 /* Force SNB workarounds for PIPE_CONTROL flushes */ 208 /* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -306,8 +294,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
306 u32 invalidate_domains, u32 flush_domains) 294 u32 invalidate_domains, u32 flush_domains)
307{ 295{
308 u32 flags = 0; 296 u32 flags = 0;
309 struct pipe_control *pc = ring->private; 297 u32 scratch_addr = ring->scratch.gtt_offset + 128;
310 u32 scratch_addr = pc->gtt_offset + 128;
311 int ret; 298 int ret;
312 299
313 /* 300 /*
@@ -481,68 +468,43 @@ out:
481static int 468static int
482init_pipe_control(struct intel_ring_buffer *ring) 469init_pipe_control(struct intel_ring_buffer *ring)
483{ 470{
484 struct pipe_control *pc;
485 struct drm_i915_gem_object *obj;
486 int ret; 471 int ret;
487 472
488 if (ring->private) 473 if (ring->scratch.obj)
489 return 0; 474 return 0;
490 475
491 pc = kmalloc(sizeof(*pc), GFP_KERNEL); 476 ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
492 if (!pc) 477 if (ring->scratch.obj == NULL) {
493 return -ENOMEM;
494
495 obj = i915_gem_alloc_object(ring->dev, 4096);
496 if (obj == NULL) {
497 DRM_ERROR("Failed to allocate seqno page\n"); 478 DRM_ERROR("Failed to allocate seqno page\n");
498 ret = -ENOMEM; 479 ret = -ENOMEM;
499 goto err; 480 goto err;
500 } 481 }
501 482
502 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 483 i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
503 484
504 ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); 485 ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
505 if (ret) 486 if (ret)
506 goto err_unref; 487 goto err_unref;
507 488
508 pc->gtt_offset = i915_gem_obj_ggtt_offset(obj); 489 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
509 pc->cpu_page = kmap(sg_page(obj->pages->sgl)); 490 ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
510 if (pc->cpu_page == NULL) { 491 if (ring->scratch.cpu_page == NULL) {
511 ret = -ENOMEM; 492 ret = -ENOMEM;
512 goto err_unpin; 493 goto err_unpin;
513 } 494 }
514 495
515 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 496 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
516 ring->name, pc->gtt_offset); 497 ring->name, ring->scratch.gtt_offset);
517
518 pc->obj = obj;
519 ring->private = pc;
520 return 0; 498 return 0;
521 499
522err_unpin: 500err_unpin:
523 i915_gem_object_unpin(obj); 501 i915_gem_object_unpin(ring->scratch.obj);
524err_unref: 502err_unref:
525 drm_gem_object_unreference(&obj->base); 503 drm_gem_object_unreference(&ring->scratch.obj->base);
526err: 504err:
527 kfree(pc);
528 return ret; 505 return ret;
529} 506}
530 507
531static void
532cleanup_pipe_control(struct intel_ring_buffer *ring)
533{
534 struct pipe_control *pc = ring->private;
535 struct drm_i915_gem_object *obj;
536
537 obj = pc->obj;
538
539 kunmap(sg_page(obj->pages->sgl));
540 i915_gem_object_unpin(obj);
541 drm_gem_object_unreference(&obj->base);
542
543 kfree(pc);
544}
545
546static int init_render_ring(struct intel_ring_buffer *ring) 508static int init_render_ring(struct intel_ring_buffer *ring)
547{ 509{
548 struct drm_device *dev = ring->dev; 510 struct drm_device *dev = ring->dev;
@@ -607,16 +569,16 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
607{ 569{
608 struct drm_device *dev = ring->dev; 570 struct drm_device *dev = ring->dev;
609 571
610 if (!ring->private) 572 if (ring->scratch.obj == NULL)
611 return; 573 return;
612 574
613 if (HAS_BROKEN_CS_TLB(dev)) 575 if (INTEL_INFO(dev)->gen >= 5) {
614 drm_gem_object_unreference(to_gem_object(ring->private)); 576 kunmap(sg_page(ring->scratch.obj->pages->sgl));
615 577 i915_gem_object_unpin(ring->scratch.obj);
616 if (INTEL_INFO(dev)->gen >= 5) 578 }
617 cleanup_pipe_control(ring);
618 579
619 ring->private = NULL; 580 drm_gem_object_unreference(&ring->scratch.obj->base);
581 ring->scratch.obj = NULL;
620} 582}
621 583
622static void 584static void
@@ -742,8 +704,7 @@ do { \
742static int 704static int
743pc_render_add_request(struct intel_ring_buffer *ring) 705pc_render_add_request(struct intel_ring_buffer *ring)
744{ 706{
745 struct pipe_control *pc = ring->private; 707 u32 scratch_addr = ring->scratch.gtt_offset + 128;
746 u32 scratch_addr = pc->gtt_offset + 128;
747 int ret; 708 int ret;
748 709
749 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently 710 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@@ -761,7 +722,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
761 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 722 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
762 PIPE_CONTROL_WRITE_FLUSH | 723 PIPE_CONTROL_WRITE_FLUSH |
763 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 724 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
764 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 725 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
765 intel_ring_emit(ring, ring->outstanding_lazy_request); 726 intel_ring_emit(ring, ring->outstanding_lazy_request);
766 intel_ring_emit(ring, 0); 727 intel_ring_emit(ring, 0);
767 PIPE_CONTROL_FLUSH(ring, scratch_addr); 728 PIPE_CONTROL_FLUSH(ring, scratch_addr);
@@ -780,7 +741,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
780 PIPE_CONTROL_WRITE_FLUSH | 741 PIPE_CONTROL_WRITE_FLUSH |
781 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 742 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
782 PIPE_CONTROL_NOTIFY); 743 PIPE_CONTROL_NOTIFY);
783 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 744 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
784 intel_ring_emit(ring, ring->outstanding_lazy_request); 745 intel_ring_emit(ring, ring->outstanding_lazy_request);
785 intel_ring_emit(ring, 0); 746 intel_ring_emit(ring, 0);
786 intel_ring_advance(ring); 747 intel_ring_advance(ring);
@@ -814,15 +775,13 @@ ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
814static u32 775static u32
815pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) 776pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
816{ 777{
817 struct pipe_control *pc = ring->private; 778 return ring->scratch.cpu_page[0];
818 return pc->cpu_page[0];
819} 779}
820 780
821static void 781static void
822pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) 782pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
823{ 783{
824 struct pipe_control *pc = ring->private; 784 ring->scratch.cpu_page[0] = seqno;
825 pc->cpu_page[0] = seqno;
826} 785}
827 786
828static bool 787static bool
@@ -1141,8 +1100,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
1141 intel_ring_emit(ring, MI_NOOP); 1100 intel_ring_emit(ring, MI_NOOP);
1142 intel_ring_advance(ring); 1101 intel_ring_advance(ring);
1143 } else { 1102 } else {
1144 struct drm_i915_gem_object *obj = ring->private; 1103 u32 cs_offset = ring->scratch.gtt_offset;
1145 u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
1146 1104
1147 if (len > I830_BATCH_LIMIT) 1105 if (len > I830_BATCH_LIMIT)
1148 return -ENOSPC; 1106 return -ENOSPC;
@@ -1835,7 +1793,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1835 return ret; 1793 return ret;
1836 } 1794 }
1837 1795
1838 ring->private = obj; 1796 ring->scratch.obj = obj;
1797 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
1839 } 1798 }
1840 1799
1841 return intel_init_ring_buffer(dev, ring); 1800 return intel_init_ring_buffer(dev, ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 432ad5311ba6..68b1ca974d59 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -155,7 +155,11 @@ struct intel_ring_buffer {
155 155
156 struct intel_ring_hangcheck hangcheck; 156 struct intel_ring_hangcheck hangcheck;
157 157
158 void *private; 158 struct {
159 struct drm_i915_gem_object *obj;
160 u32 gtt_offset;
161 volatile u32 *cpu_page;
162 } scratch;
159}; 163};
160 164
161static inline bool 165static inline bool
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 317e058fb3cf..85037b9d4934 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1151,11 +1151,10 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
1151{ 1151{
1152 struct drm_device *dev = intel_encoder->base.dev; 1152 struct drm_device *dev = intel_encoder->base.dev;
1153 struct drm_i915_private *dev_priv = dev->dev_private; 1153 struct drm_i915_private *dev_priv = dev->dev_private;
1154 struct drm_crtc *crtc = intel_encoder->base.crtc; 1154 struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
1155 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1156 struct drm_display_mode *adjusted_mode = 1155 struct drm_display_mode *adjusted_mode =
1157 &intel_crtc->config.adjusted_mode; 1156 &crtc->config.adjusted_mode;
1158 struct drm_display_mode *mode = &intel_crtc->config.requested_mode; 1157 struct drm_display_mode *mode = &crtc->config.requested_mode;
1159 struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder); 1158 struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
1160 u32 sdvox; 1159 u32 sdvox;
1161 struct intel_sdvo_in_out_map in_out; 1160 struct intel_sdvo_in_out_map in_out;
@@ -1213,13 +1212,15 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
1213 * adjusted_mode. 1212 * adjusted_mode.
1214 */ 1213 */
1215 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); 1214 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1215 input_dtd.part1.clock /= crtc->config.pixel_multiplier;
1216
1216 if (intel_sdvo->is_tv || intel_sdvo->is_lvds) 1217 if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
1217 input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags; 1218 input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
1218 if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) 1219 if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
1219 DRM_INFO("Setting input timings on %s failed\n", 1220 DRM_INFO("Setting input timings on %s failed\n",
1220 SDVO_NAME(intel_sdvo)); 1221 SDVO_NAME(intel_sdvo));
1221 1222
1222 switch (intel_crtc->config.pixel_multiplier) { 1223 switch (crtc->config.pixel_multiplier) {
1223 default: 1224 default:
1224 WARN(1, "unknown pixel mutlipler specified\n"); 1225 WARN(1, "unknown pixel mutlipler specified\n");
1225 case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; 1226 case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
@@ -1252,9 +1253,9 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
1252 } 1253 }
1253 1254
1254 if (INTEL_PCH_TYPE(dev) >= PCH_CPT) 1255 if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
1255 sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe); 1256 sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
1256 else 1257 else
1257 sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe); 1258 sdvox |= SDVO_PIPE_SEL(crtc->pipe);
1258 1259
1259 if (intel_sdvo->has_hdmi_audio) 1260 if (intel_sdvo->has_hdmi_audio)
1260 sdvox |= SDVO_AUDIO_ENABLE; 1261 sdvox |= SDVO_AUDIO_ENABLE;
@@ -1264,7 +1265,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
1264 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 1265 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
1265 /* done in crtc_mode_set as it lives inside the dpll register */ 1266 /* done in crtc_mode_set as it lives inside the dpll register */
1266 } else { 1267 } else {
1267 sdvox |= (intel_crtc->config.pixel_multiplier - 1) 1268 sdvox |= (crtc->config.pixel_multiplier - 1)
1268 << SDVO_PORT_MULTIPLY_SHIFT; 1269 << SDVO_PORT_MULTIPLY_SHIFT;
1269 } 1270 }
1270 1271
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 78b621cdd108..ad6ec4b39005 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -260,8 +260,11 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
260 if (obj->tiling_mode != I915_TILING_NONE) 260 if (obj->tiling_mode != I915_TILING_NONE)
261 sprctl |= SPRITE_TILED; 261 sprctl |= SPRITE_TILED;
262 262
263 /* must disable */ 263 if (IS_HASWELL(dev))
264 sprctl |= SPRITE_TRICKLE_FEED_DISABLE; 264 sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
265 else
266 sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
267
265 sprctl |= SPRITE_ENABLE; 268 sprctl |= SPRITE_ENABLE;
266 269
267 if (IS_HASWELL(dev)) 270 if (IS_HASWELL(dev))
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8f5bc869c023..8649f1c36b00 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -261,7 +261,7 @@ void intel_uncore_init(struct drm_device *dev)
261 } 261 }
262} 262}
263 263
264void intel_uncore_sanitize(struct drm_device *dev) 264static void intel_uncore_forcewake_reset(struct drm_device *dev)
265{ 265{
266 struct drm_i915_private *dev_priv = dev->dev_private; 266 struct drm_i915_private *dev_priv = dev->dev_private;
267 267
@@ -272,6 +272,11 @@ void intel_uncore_sanitize(struct drm_device *dev)
272 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 272 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
273 __gen6_gt_force_wake_mt_reset(dev_priv); 273 __gen6_gt_force_wake_mt_reset(dev_priv);
274 } 274 }
275}
276
277void intel_uncore_sanitize(struct drm_device *dev)
278{
279 intel_uncore_forcewake_reset(dev);
275 280
276 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 281 /* BIOS often leaves RC6 enabled, but disable it for hw init */
277 intel_disable_gt_powersave(dev); 282 intel_disable_gt_powersave(dev);
@@ -549,6 +554,8 @@ static int gen6_do_reset(struct drm_device *dev)
549 /* Spin waiting for the device to ack the reset request */ 554 /* Spin waiting for the device to ack the reset request */
550 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); 555 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
551 556
557 intel_uncore_forcewake_reset(dev);
558
552 /* If reset with a user forcewake, try to restore, otherwise turn it off */ 559 /* If reset with a user forcewake, try to restore, otherwise turn it off */
553 if (dev_priv->uncore.forcewake_count) 560 if (dev_priv->uncore.forcewake_count)
554 dev_priv->uncore.funcs.force_wake_get(dev_priv); 561 dev_priv->uncore.funcs.force_wake_get(dev_priv);
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index e893f6e1937d..af0259708358 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -257,9 +257,9 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
257 if (!conflict->bridge_has_one_vga) { 257 if (!conflict->bridge_has_one_vga) {
258 vga_irq_set_state(conflict, false); 258 vga_irq_set_state(conflict, false);
259 flags |= PCI_VGA_STATE_CHANGE_DECODES; 259 flags |= PCI_VGA_STATE_CHANGE_DECODES;
260 if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM)) 260 if (match & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
261 pci_bits |= PCI_COMMAND_MEMORY; 261 pci_bits |= PCI_COMMAND_MEMORY;
262 if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) 262 if (match & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
263 pci_bits |= PCI_COMMAND_IO; 263 pci_bits |= PCI_COMMAND_IO;
264 } 264 }
265 265
@@ -267,11 +267,11 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
267 flags |= PCI_VGA_STATE_CHANGE_BRIDGE; 267 flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
268 268
269 pci_set_vga_state(conflict->pdev, false, pci_bits, flags); 269 pci_set_vga_state(conflict->pdev, false, pci_bits, flags);
270 conflict->owns &= ~lwants; 270 conflict->owns &= ~match;
271 /* If he also owned non-legacy, that is no longer the case */ 271 /* If he also owned non-legacy, that is no longer the case */
272 if (lwants & VGA_RSRC_LEGACY_MEM) 272 if (match & VGA_RSRC_LEGACY_MEM)
273 conflict->owns &= ~VGA_RSRC_NORMAL_MEM; 273 conflict->owns &= ~VGA_RSRC_NORMAL_MEM;
274 if (lwants & VGA_RSRC_LEGACY_IO) 274 if (match & VGA_RSRC_LEGACY_IO)
275 conflict->owns &= ~VGA_RSRC_NORMAL_IO; 275 conflict->owns &= ~VGA_RSRC_NORMAL_IO;
276 } 276 }
277 277
@@ -644,10 +644,12 @@ bail:
644static inline void vga_update_device_decodes(struct vga_device *vgadev, 644static inline void vga_update_device_decodes(struct vga_device *vgadev,
645 int new_decodes) 645 int new_decodes)
646{ 646{
647 int old_decodes; 647 int old_decodes, decodes_removed, decodes_unlocked;
648 struct vga_device *new_vgadev, *conflict;
649 648
650 old_decodes = vgadev->decodes; 649 old_decodes = vgadev->decodes;
650 decodes_removed = ~new_decodes & old_decodes;
651 decodes_unlocked = vgadev->locks & decodes_removed;
652 vgadev->owns &= ~decodes_removed;
651 vgadev->decodes = new_decodes; 653 vgadev->decodes = new_decodes;
652 654
653 pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n", 655 pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
@@ -656,31 +658,22 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
656 vga_iostate_to_str(vgadev->decodes), 658 vga_iostate_to_str(vgadev->decodes),
657 vga_iostate_to_str(vgadev->owns)); 659 vga_iostate_to_str(vgadev->owns));
658 660
659 661 /* if we removed locked decodes, lock count goes to zero, and release */
660 /* if we own the decodes we should move them along to 662 if (decodes_unlocked) {
661 another card */ 663 if (decodes_unlocked & VGA_RSRC_LEGACY_IO)
662 if ((vgadev->owns & old_decodes) && (vga_count > 1)) { 664 vgadev->io_lock_cnt = 0;
663 /* set us to own nothing */ 665 if (decodes_unlocked & VGA_RSRC_LEGACY_MEM)
664 vgadev->owns &= ~old_decodes; 666 vgadev->mem_lock_cnt = 0;
665 list_for_each_entry(new_vgadev, &vga_list, list) { 667 __vga_put(vgadev, decodes_unlocked);
666 if ((new_vgadev != vgadev) &&
667 (new_vgadev->decodes & VGA_RSRC_LEGACY_MASK)) {
668 pr_info("vgaarb: transferring owner from PCI:%s to PCI:%s\n", pci_name(vgadev->pdev), pci_name(new_vgadev->pdev));
669 conflict = __vga_tryget(new_vgadev, VGA_RSRC_LEGACY_MASK);
670 if (!conflict)
671 __vga_put(new_vgadev, VGA_RSRC_LEGACY_MASK);
672 break;
673 }
674 }
675 } 668 }
676 669
677 /* change decodes counter */ 670 /* change decodes counter */
678 if (old_decodes != new_decodes) { 671 if (old_decodes & VGA_RSRC_LEGACY_MASK &&
679 if (new_decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)) 672 !(new_decodes & VGA_RSRC_LEGACY_MASK))
680 vga_decode_count++; 673 vga_decode_count--;
681 else 674 if (!(old_decodes & VGA_RSRC_LEGACY_MASK) &&
682 vga_decode_count--; 675 new_decodes & VGA_RSRC_LEGACY_MASK)
683 } 676 vga_decode_count++;
684 pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); 677 pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
685} 678}
686 679
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 63d609d8a3f6..3abfa6ea226e 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -26,6 +26,7 @@
26#ifndef _I915_DRM_H_ 26#ifndef _I915_DRM_H_
27#define _I915_DRM_H_ 27#define _I915_DRM_H_
28 28
29#include <drm/i915_pciids.h>
29#include <uapi/drm/i915_drm.h> 30#include <uapi/drm/i915_drm.h>
30 31
31/* For use by IPS driver */ 32/* For use by IPS driver */
@@ -34,4 +35,37 @@ extern bool i915_gpu_raise(void);
34extern bool i915_gpu_lower(void); 35extern bool i915_gpu_lower(void);
35extern bool i915_gpu_busy(void); 36extern bool i915_gpu_busy(void);
36extern bool i915_gpu_turbo_disable(void); 37extern bool i915_gpu_turbo_disable(void);
38
39/*
40 * The Bridge device's PCI config space has information about the
41 * fb aperture size and the amount of pre-reserved memory.
42 * This is all handled in the intel-gtt.ko module. i915.ko only
43 * cares about the vga bit for the vga rbiter.
44 */
45#define INTEL_GMCH_CTRL 0x52
46#define INTEL_GMCH_VGA_DISABLE (1 << 1)
47#define SNB_GMCH_CTRL 0x50
48#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
49#define SNB_GMCH_GGMS_MASK 0x3
50#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
51#define SNB_GMCH_GMS_MASK 0x1f
52
53#define I830_GMCH_CTRL 0x52
54
55#define I855_GMCH_GMS_MASK 0xF0
56#define I855_GMCH_GMS_STOLEN_0M 0x0
57#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
58#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
59#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
60#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
61#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
62#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
63#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
64#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
65#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
66#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
67#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
68#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
69#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
70
37#endif /* _I915_DRM_H_ */ 71#endif /* _I915_DRM_H_ */
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
new file mode 100644
index 000000000000..8a10f5c354e6
--- /dev/null
+++ b/include/drm/i915_pciids.h
@@ -0,0 +1,211 @@
1/*
2 * Copyright 2013 Intel Corporation
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25#ifndef _I915_PCIIDS_H
26#define _I915_PCIIDS_H
27
28/*
29 * A pci_device_id struct {
30 * __u32 vendor, device;
31 * __u32 subvendor, subdevice;
32 * __u32 class, class_mask;
33 * kernel_ulong_t driver_data;
34 * };
35 * Don't use C99 here because "class" is reserved and we want to
36 * give userspace flexibility.
37 */
38#define INTEL_VGA_DEVICE(id, info) { \
39 0x8086, id, \
40 ~0, ~0, \
41 0x030000, 0xff0000, \
42 (unsigned long) info }
43
44#define INTEL_QUANTA_VGA_DEVICE(info) { \
45 0x8086, 0x16a, \
46 0x152d, 0x8990, \
47 0x030000, 0xff0000, \
48 (unsigned long) info }
49
50#define INTEL_I830_IDS(info) \
51 INTEL_VGA_DEVICE(0x3577, info)
52
53#define INTEL_I845G_IDS(info) \
54 INTEL_VGA_DEVICE(0x2562, info)
55
56#define INTEL_I85X_IDS(info) \
57 INTEL_VGA_DEVICE(0x3582, info), /* I855_GM */ \
58 INTEL_VGA_DEVICE(0x358e, info)
59
60#define INTEL_I865G_IDS(info) \
61 INTEL_VGA_DEVICE(0x2572, info) /* I865_G */
62
63#define INTEL_I915G_IDS(info) \
64 INTEL_VGA_DEVICE(0x2582, info), /* I915_G */ \
65 INTEL_VGA_DEVICE(0x258a, info) /* E7221_G */
66
67#define INTEL_I915GM_IDS(info) \
68 INTEL_VGA_DEVICE(0x2592, info) /* I915_GM */
69
70#define INTEL_I945G_IDS(info) \
71 INTEL_VGA_DEVICE(0x2772, info) /* I945_G */
72
73#define INTEL_I945GM_IDS(info) \
74 INTEL_VGA_DEVICE(0x27a2, info), /* I945_GM */ \
75 INTEL_VGA_DEVICE(0x27ae, info) /* I945_GME */
76
77#define INTEL_I965G_IDS(info) \
78 INTEL_VGA_DEVICE(0x2972, info), /* I946_GZ */ \
79 INTEL_VGA_DEVICE(0x2982, info), /* G35_G */ \
80 INTEL_VGA_DEVICE(0x2992, info), /* I965_Q */ \
81 INTEL_VGA_DEVICE(0x29a2, info) /* I965_G */
82
83#define INTEL_G33_IDS(info) \
84 INTEL_VGA_DEVICE(0x29b2, info), /* Q35_G */ \
85 INTEL_VGA_DEVICE(0x29c2, info), /* G33_G */ \
86 INTEL_VGA_DEVICE(0x29d2, info) /* Q33_G */
87
88#define INTEL_I965GM_IDS(info) \
89 INTEL_VGA_DEVICE(0x2a02, info), /* I965_GM */ \
90 INTEL_VGA_DEVICE(0x2a12, info) /* I965_GME */
91
92#define INTEL_GM45_IDS(info) \
93 INTEL_VGA_DEVICE(0x2a42, info) /* GM45_G */
94
95#define INTEL_G45_IDS(info) \
96 INTEL_VGA_DEVICE(0x2e02, info), /* IGD_E_G */ \
97 INTEL_VGA_DEVICE(0x2e12, info), /* Q45_G */ \
98 INTEL_VGA_DEVICE(0x2e22, info), /* G45_G */ \
99 INTEL_VGA_DEVICE(0x2e32, info), /* G41_G */ \
100 INTEL_VGA_DEVICE(0x2e42, info), /* B43_G */ \
101 INTEL_VGA_DEVICE(0x2e92, info) /* B43_G.1 */
102
103#define INTEL_PINEVIEW_IDS(info) \
104 INTEL_VGA_DEVICE(0xa001, info), \
105 INTEL_VGA_DEVICE(0xa011, info)
106
107#define INTEL_IRONLAKE_D_IDS(info) \
108 INTEL_VGA_DEVICE(0x0042, info)
109
110#define INTEL_IRONLAKE_M_IDS(info) \
111 INTEL_VGA_DEVICE(0x0046, info)
112
113#define INTEL_SNB_D_IDS(info) \
114 INTEL_VGA_DEVICE(0x0102, info), \
115 INTEL_VGA_DEVICE(0x0112, info), \
116 INTEL_VGA_DEVICE(0x0122, info), \
117 INTEL_VGA_DEVICE(0x010A, info)
118
119#define INTEL_SNB_M_IDS(info) \
120 INTEL_VGA_DEVICE(0x0106, info), \
121 INTEL_VGA_DEVICE(0x0116, info), \
122 INTEL_VGA_DEVICE(0x0126, info)
123
124#define INTEL_IVB_M_IDS(info) \
125 INTEL_VGA_DEVICE(0x0156, info), /* GT1 mobile */ \
126 INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */
127
128#define INTEL_IVB_D_IDS(info) \
129 INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \
130 INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \
131 INTEL_VGA_DEVICE(0x015a, info), /* GT1 server */ \
132 INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */
133
134#define INTEL_IVB_Q_IDS(info) \
135 INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */
136
137#define INTEL_HSW_D_IDS(info) \
138 INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
139 INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
140 INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
141 INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \
142 INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \
143 INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \
144 INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \
145 INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
146 INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
147 INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \
148 INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \
149 INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \
150 INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \
151 INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \
152 INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \
153 INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \
154 INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
155 INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
156 INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \
157 INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
158 INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
159 INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \
160 INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
161 INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
162 INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \
163 INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
164 INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
165 INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \
166 INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
167 INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
168 INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \
169 INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \
170 INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
171 INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \
172 INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
173 INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
174 INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \
175 INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
176 INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
177 INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \
178 INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
179 INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
180 INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \
181 INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
182 INTEL_VGA_DEVICE(0x0D2E, info) /* CRW GT3 reserved */ \
183
184#define INTEL_HSW_M_IDS(info) \
185 INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
186 INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
187 INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \
188 INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
189 INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
190 INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
191 INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
192 INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
193 INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
194 INTEL_VGA_DEVICE(0x0A0E, info), /* ULT GT1 reserved */ \
195 INTEL_VGA_DEVICE(0x0A1E, info), /* ULT GT2 reserved */ \
196 INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \
197 INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
198 INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
199 INTEL_VGA_DEVICE(0x0D26, info) /* CRW GT3 mobile */
200
201#define INTEL_VLV_M_IDS(info) \
202 INTEL_VGA_DEVICE(0x0f30, info), \
203 INTEL_VGA_DEVICE(0x0f31, info), \
204 INTEL_VGA_DEVICE(0x0f32, info), \
205 INTEL_VGA_DEVICE(0x0f33, info), \
206 INTEL_VGA_DEVICE(0x0157, info)
207
208#define INTEL_VLV_D_IDS(info) \
209 INTEL_VGA_DEVICE(0x0155, info)
210
211#endif /* _I915_PCIIDS_H */
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 2c02f3a8d2ba..80cf8173a65b 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -65,8 +65,15 @@ struct pci_dev;
65 * out of the arbitration process (and can be safe to take 65 * out of the arbitration process (and can be safe to take
66 * interrupts at any time. 66 * interrupts at any time.
67 */ 67 */
68#if defined(CONFIG_VGA_ARB)
68extern void vga_set_legacy_decoding(struct pci_dev *pdev, 69extern void vga_set_legacy_decoding(struct pci_dev *pdev,
69 unsigned int decodes); 70 unsigned int decodes);
71#else
72static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
73 unsigned int decodes)
74{
75}
76#endif
70 77
71/** 78/**
72 * vga_get - acquire & locks VGA resources 79 * vga_get - acquire & locks VGA resources