diff options
author | David S. Miller <davem@davemloft.net> | 2013-10-23 16:28:39 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-10-23 16:49:34 -0400 |
commit | c3fa32b9764dc45dcf8a2231b1c110abc4a63e0b (patch) | |
tree | 6cf2896a77b65bec64284681e1c3851eb3263e09 /drivers/gpu/drm/i915 | |
parent | 34d92d5315b64a3e5292b7e9511c1bb617227fb6 (diff) | |
parent | 320437af954cbe66478f1f5e8b34cb5a8d072191 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/usb/qmi_wwan.c
include/net/dst.h
Trivial merge conflicts, both were overlapping changes.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 109 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_reg.h | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 38 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dp.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_drv.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 13 |
9 files changed, 123 insertions, 80 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index c27a21034a5e..d5c784d48671 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1290,12 +1290,9 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
1290 | * then we do not take part in VGA arbitration and the | 1290 | * then we do not take part in VGA arbitration and the |
1291 | * vga_client_register() fails with -ENODEV. | 1291 | * vga_client_register() fails with -ENODEV. |
1292 | */ | 1292 | */ |
1293 | if (!HAS_PCH_SPLIT(dev)) { | 1293 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); |
1294 | ret = vga_client_register(dev->pdev, dev, NULL, | 1294 | if (ret && ret != -ENODEV) |
1295 | i915_vga_set_decode); | 1295 | goto out; |
1296 | if (ret && ret != -ENODEV) | ||
1297 | goto out; | ||
1298 | } | ||
1299 | 1296 | ||
1300 | intel_register_dsm_handler(); | 1297 | intel_register_dsm_handler(); |
1301 | 1298 | ||
@@ -1351,12 +1348,6 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
1351 | */ | 1348 | */ |
1352 | intel_fbdev_initial_config(dev); | 1349 | intel_fbdev_initial_config(dev); |
1353 | 1350 | ||
1354 | /* | ||
1355 | * Must do this after fbcon init so that | ||
1356 | * vgacon_save_screen() works during the handover. | ||
1357 | */ | ||
1358 | i915_disable_vga_mem(dev); | ||
1359 | |||
1360 | /* Only enable hotplug handling once the fbdev is fully set up. */ | 1351 | /* Only enable hotplug handling once the fbdev is fully set up. */ |
1361 | dev_priv->enable_hotplug_processing = true; | 1352 | dev_priv->enable_hotplug_processing = true; |
1362 | 1353 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 69d8ed5416c3..2ad27880cd04 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -505,6 +505,8 @@ static int i915_drm_freeze(struct drm_device *dev) | |||
505 | intel_modeset_suspend_hw(dev); | 505 | intel_modeset_suspend_hw(dev); |
506 | } | 506 | } |
507 | 507 | ||
508 | i915_gem_suspend_gtt_mappings(dev); | ||
509 | |||
508 | i915_save_state(dev); | 510 | i915_save_state(dev); |
509 | 511 | ||
510 | intel_opregion_fini(dev); | 512 | intel_opregion_fini(dev); |
@@ -648,7 +650,8 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
648 | mutex_lock(&dev->struct_mutex); | 650 | mutex_lock(&dev->struct_mutex); |
649 | i915_gem_restore_gtt_mappings(dev); | 651 | i915_gem_restore_gtt_mappings(dev); |
650 | mutex_unlock(&dev->struct_mutex); | 652 | mutex_unlock(&dev->struct_mutex); |
651 | } | 653 | } else if (drm_core_check_feature(dev, DRIVER_MODESET)) |
654 | i915_check_and_clear_faults(dev); | ||
652 | 655 | ||
653 | __i915_drm_thaw(dev); | 656 | __i915_drm_thaw(dev); |
654 | 657 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 35874b3a86dc..ab0f2c0a440c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -497,10 +497,12 @@ struct i915_address_space { | |||
497 | 497 | ||
498 | /* FIXME: Need a more generic return type */ | 498 | /* FIXME: Need a more generic return type */ |
499 | gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, | 499 | gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, |
500 | enum i915_cache_level level); | 500 | enum i915_cache_level level, |
501 | bool valid); /* Create a valid PTE */ | ||
501 | void (*clear_range)(struct i915_address_space *vm, | 502 | void (*clear_range)(struct i915_address_space *vm, |
502 | unsigned int first_entry, | 503 | unsigned int first_entry, |
503 | unsigned int num_entries); | 504 | unsigned int num_entries, |
505 | bool use_scratch); | ||
504 | void (*insert_entries)(struct i915_address_space *vm, | 506 | void (*insert_entries)(struct i915_address_space *vm, |
505 | struct sg_table *st, | 507 | struct sg_table *st, |
506 | unsigned int first_entry, | 508 | unsigned int first_entry, |
@@ -2065,6 +2067,8 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, | |||
2065 | void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, | 2067 | void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, |
2066 | struct drm_i915_gem_object *obj); | 2068 | struct drm_i915_gem_object *obj); |
2067 | 2069 | ||
2070 | void i915_check_and_clear_faults(struct drm_device *dev); | ||
2071 | void i915_gem_suspend_gtt_mappings(struct drm_device *dev); | ||
2068 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); | 2072 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); |
2069 | int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); | 2073 | int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); |
2070 | void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, | 2074 | void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 212f6d8c35ec..1f7b4caefb6e 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -58,9 +58,10 @@ | |||
58 | #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) | 58 | #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) |
59 | 59 | ||
60 | static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, | 60 | static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, |
61 | enum i915_cache_level level) | 61 | enum i915_cache_level level, |
62 | bool valid) | ||
62 | { | 63 | { |
63 | gen6_gtt_pte_t pte = GEN6_PTE_VALID; | 64 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
64 | pte |= GEN6_PTE_ADDR_ENCODE(addr); | 65 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
65 | 66 | ||
66 | switch (level) { | 67 | switch (level) { |
@@ -79,9 +80,10 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, | |||
79 | } | 80 | } |
80 | 81 | ||
81 | static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, | 82 | static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, |
82 | enum i915_cache_level level) | 83 | enum i915_cache_level level, |
84 | bool valid) | ||
83 | { | 85 | { |
84 | gen6_gtt_pte_t pte = GEN6_PTE_VALID; | 86 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
85 | pte |= GEN6_PTE_ADDR_ENCODE(addr); | 87 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
86 | 88 | ||
87 | switch (level) { | 89 | switch (level) { |
@@ -105,9 +107,10 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, | |||
105 | #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) | 107 | #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) |
106 | 108 | ||
107 | static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, | 109 | static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, |
108 | enum i915_cache_level level) | 110 | enum i915_cache_level level, |
111 | bool valid) | ||
109 | { | 112 | { |
110 | gen6_gtt_pte_t pte = GEN6_PTE_VALID; | 113 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
111 | pte |= GEN6_PTE_ADDR_ENCODE(addr); | 114 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
112 | 115 | ||
113 | /* Mark the page as writeable. Other platforms don't have a | 116 | /* Mark the page as writeable. Other platforms don't have a |
@@ -122,9 +125,10 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, | |||
122 | } | 125 | } |
123 | 126 | ||
124 | static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, | 127 | static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, |
125 | enum i915_cache_level level) | 128 | enum i915_cache_level level, |
129 | bool valid) | ||
126 | { | 130 | { |
127 | gen6_gtt_pte_t pte = GEN6_PTE_VALID; | 131 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
128 | pte |= HSW_PTE_ADDR_ENCODE(addr); | 132 | pte |= HSW_PTE_ADDR_ENCODE(addr); |
129 | 133 | ||
130 | if (level != I915_CACHE_NONE) | 134 | if (level != I915_CACHE_NONE) |
@@ -134,9 +138,10 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, | |||
134 | } | 138 | } |
135 | 139 | ||
136 | static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, | 140 | static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, |
137 | enum i915_cache_level level) | 141 | enum i915_cache_level level, |
142 | bool valid) | ||
138 | { | 143 | { |
139 | gen6_gtt_pte_t pte = GEN6_PTE_VALID; | 144 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
140 | pte |= HSW_PTE_ADDR_ENCODE(addr); | 145 | pte |= HSW_PTE_ADDR_ENCODE(addr); |
141 | 146 | ||
142 | switch (level) { | 147 | switch (level) { |
@@ -236,7 +241,8 @@ static int gen6_ppgtt_enable(struct drm_device *dev) | |||
236 | /* PPGTT support for Sandybdrige/Gen6 and later */ | 241 | /* PPGTT support for Sandybdrige/Gen6 and later */ |
237 | static void gen6_ppgtt_clear_range(struct i915_address_space *vm, | 242 | static void gen6_ppgtt_clear_range(struct i915_address_space *vm, |
238 | unsigned first_entry, | 243 | unsigned first_entry, |
239 | unsigned num_entries) | 244 | unsigned num_entries, |
245 | bool use_scratch) | ||
240 | { | 246 | { |
241 | struct i915_hw_ppgtt *ppgtt = | 247 | struct i915_hw_ppgtt *ppgtt = |
242 | container_of(vm, struct i915_hw_ppgtt, base); | 248 | container_of(vm, struct i915_hw_ppgtt, base); |
@@ -245,7 +251,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, | |||
245 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; | 251 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
246 | unsigned last_pte, i; | 252 | unsigned last_pte, i; |
247 | 253 | ||
248 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); | 254 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); |
249 | 255 | ||
250 | while (num_entries) { | 256 | while (num_entries) { |
251 | last_pte = first_pte + num_entries; | 257 | last_pte = first_pte + num_entries; |
@@ -282,7 +288,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, | |||
282 | dma_addr_t page_addr; | 288 | dma_addr_t page_addr; |
283 | 289 | ||
284 | page_addr = sg_page_iter_dma_address(&sg_iter); | 290 | page_addr = sg_page_iter_dma_address(&sg_iter); |
285 | pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level); | 291 | pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true); |
286 | if (++act_pte == I915_PPGTT_PT_ENTRIES) { | 292 | if (++act_pte == I915_PPGTT_PT_ENTRIES) { |
287 | kunmap_atomic(pt_vaddr); | 293 | kunmap_atomic(pt_vaddr); |
288 | act_pt++; | 294 | act_pt++; |
@@ -367,7 +373,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | |||
367 | } | 373 | } |
368 | 374 | ||
369 | ppgtt->base.clear_range(&ppgtt->base, 0, | 375 | ppgtt->base.clear_range(&ppgtt->base, 0, |
370 | ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES); | 376 | ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true); |
371 | 377 | ||
372 | ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); | 378 | ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); |
373 | 379 | ||
@@ -444,7 +450,8 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, | |||
444 | { | 450 | { |
445 | ppgtt->base.clear_range(&ppgtt->base, | 451 | ppgtt->base.clear_range(&ppgtt->base, |
446 | i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, | 452 | i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, |
447 | obj->base.size >> PAGE_SHIFT); | 453 | obj->base.size >> PAGE_SHIFT, |
454 | true); | ||
448 | } | 455 | } |
449 | 456 | ||
450 | extern int intel_iommu_gfx_mapped; | 457 | extern int intel_iommu_gfx_mapped; |
@@ -485,15 +492,65 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) | |||
485 | dev_priv->mm.interruptible = interruptible; | 492 | dev_priv->mm.interruptible = interruptible; |
486 | } | 493 | } |
487 | 494 | ||
495 | void i915_check_and_clear_faults(struct drm_device *dev) | ||
496 | { | ||
497 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
498 | struct intel_ring_buffer *ring; | ||
499 | int i; | ||
500 | |||
501 | if (INTEL_INFO(dev)->gen < 6) | ||
502 | return; | ||
503 | |||
504 | for_each_ring(ring, dev_priv, i) { | ||
505 | u32 fault_reg; | ||
506 | fault_reg = I915_READ(RING_FAULT_REG(ring)); | ||
507 | if (fault_reg & RING_FAULT_VALID) { | ||
508 | DRM_DEBUG_DRIVER("Unexpected fault\n" | ||
509 | "\tAddr: 0x%08lx\\n" | ||
510 | "\tAddress space: %s\n" | ||
511 | "\tSource ID: %d\n" | ||
512 | "\tType: %d\n", | ||
513 | fault_reg & PAGE_MASK, | ||
514 | fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", | ||
515 | RING_FAULT_SRCID(fault_reg), | ||
516 | RING_FAULT_FAULT_TYPE(fault_reg)); | ||
517 | I915_WRITE(RING_FAULT_REG(ring), | ||
518 | fault_reg & ~RING_FAULT_VALID); | ||
519 | } | ||
520 | } | ||
521 | POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); | ||
522 | } | ||
523 | |||
524 | void i915_gem_suspend_gtt_mappings(struct drm_device *dev) | ||
525 | { | ||
526 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
527 | |||
528 | /* Don't bother messing with faults pre GEN6 as we have little | ||
529 | * documentation supporting that it's a good idea. | ||
530 | */ | ||
531 | if (INTEL_INFO(dev)->gen < 6) | ||
532 | return; | ||
533 | |||
534 | i915_check_and_clear_faults(dev); | ||
535 | |||
536 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, | ||
537 | dev_priv->gtt.base.start / PAGE_SIZE, | ||
538 | dev_priv->gtt.base.total / PAGE_SIZE, | ||
539 | false); | ||
540 | } | ||
541 | |||
488 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) | 542 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
489 | { | 543 | { |
490 | struct drm_i915_private *dev_priv = dev->dev_private; | 544 | struct drm_i915_private *dev_priv = dev->dev_private; |
491 | struct drm_i915_gem_object *obj; | 545 | struct drm_i915_gem_object *obj; |
492 | 546 | ||
547 | i915_check_and_clear_faults(dev); | ||
548 | |||
493 | /* First fill our portion of the GTT with scratch pages */ | 549 | /* First fill our portion of the GTT with scratch pages */ |
494 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, | 550 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
495 | dev_priv->gtt.base.start / PAGE_SIZE, | 551 | dev_priv->gtt.base.start / PAGE_SIZE, |
496 | dev_priv->gtt.base.total / PAGE_SIZE); | 552 | dev_priv->gtt.base.total / PAGE_SIZE, |
553 | true); | ||
497 | 554 | ||
498 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { | 555 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
499 | i915_gem_clflush_object(obj, obj->pin_display); | 556 | i915_gem_clflush_object(obj, obj->pin_display); |
@@ -536,7 +593,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, | |||
536 | 593 | ||
537 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { | 594 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { |
538 | addr = sg_page_iter_dma_address(&sg_iter); | 595 | addr = sg_page_iter_dma_address(&sg_iter); |
539 | iowrite32(vm->pte_encode(addr, level), >t_entries[i]); | 596 | iowrite32(vm->pte_encode(addr, level, true), >t_entries[i]); |
540 | i++; | 597 | i++; |
541 | } | 598 | } |
542 | 599 | ||
@@ -548,7 +605,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, | |||
548 | */ | 605 | */ |
549 | if (i != 0) | 606 | if (i != 0) |
550 | WARN_ON(readl(>t_entries[i-1]) != | 607 | WARN_ON(readl(>t_entries[i-1]) != |
551 | vm->pte_encode(addr, level)); | 608 | vm->pte_encode(addr, level, true)); |
552 | 609 | ||
553 | /* This next bit makes the above posting read even more important. We | 610 | /* This next bit makes the above posting read even more important. We |
554 | * want to flush the TLBs only after we're certain all the PTE updates | 611 | * want to flush the TLBs only after we're certain all the PTE updates |
@@ -560,7 +617,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, | |||
560 | 617 | ||
561 | static void gen6_ggtt_clear_range(struct i915_address_space *vm, | 618 | static void gen6_ggtt_clear_range(struct i915_address_space *vm, |
562 | unsigned int first_entry, | 619 | unsigned int first_entry, |
563 | unsigned int num_entries) | 620 | unsigned int num_entries, |
621 | bool use_scratch) | ||
564 | { | 622 | { |
565 | struct drm_i915_private *dev_priv = vm->dev->dev_private; | 623 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
566 | gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = | 624 | gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = |
@@ -573,7 +631,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, | |||
573 | first_entry, num_entries, max_entries)) | 631 | first_entry, num_entries, max_entries)) |
574 | num_entries = max_entries; | 632 | num_entries = max_entries; |
575 | 633 | ||
576 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); | 634 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch); |
635 | |||
577 | for (i = 0; i < num_entries; i++) | 636 | for (i = 0; i < num_entries; i++) |
578 | iowrite32(scratch_pte, >t_base[i]); | 637 | iowrite32(scratch_pte, >t_base[i]); |
579 | readl(gtt_base); | 638 | readl(gtt_base); |
@@ -594,7 +653,8 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm, | |||
594 | 653 | ||
595 | static void i915_ggtt_clear_range(struct i915_address_space *vm, | 654 | static void i915_ggtt_clear_range(struct i915_address_space *vm, |
596 | unsigned int first_entry, | 655 | unsigned int first_entry, |
597 | unsigned int num_entries) | 656 | unsigned int num_entries, |
657 | bool unused) | ||
598 | { | 658 | { |
599 | intel_gtt_clear_range(first_entry, num_entries); | 659 | intel_gtt_clear_range(first_entry, num_entries); |
600 | } | 660 | } |
@@ -622,7 +682,8 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) | |||
622 | 682 | ||
623 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, | 683 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
624 | entry, | 684 | entry, |
625 | obj->base.size >> PAGE_SHIFT); | 685 | obj->base.size >> PAGE_SHIFT, |
686 | true); | ||
626 | 687 | ||
627 | obj->has_global_gtt_mapping = 0; | 688 | obj->has_global_gtt_mapping = 0; |
628 | } | 689 | } |
@@ -709,11 +770,11 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, | |||
709 | const unsigned long count = (hole_end - hole_start) / PAGE_SIZE; | 770 | const unsigned long count = (hole_end - hole_start) / PAGE_SIZE; |
710 | DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", | 771 | DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", |
711 | hole_start, hole_end); | 772 | hole_start, hole_end); |
712 | ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count); | 773 | ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true); |
713 | } | 774 | } |
714 | 775 | ||
715 | /* And finally clear the reserved guard page */ | 776 | /* And finally clear the reserved guard page */ |
716 | ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1); | 777 | ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true); |
717 | } | 778 | } |
718 | 779 | ||
719 | static bool | 780 | static bool |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c159e1a6810f..ef9b35479f01 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -604,6 +604,10 @@ | |||
604 | #define ARB_MODE_SWIZZLE_IVB (1<<5) | 604 | #define ARB_MODE_SWIZZLE_IVB (1<<5) |
605 | #define RENDER_HWS_PGA_GEN7 (0x04080) | 605 | #define RENDER_HWS_PGA_GEN7 (0x04080) |
606 | #define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) | 606 | #define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) |
607 | #define RING_FAULT_GTTSEL_MASK (1<<11) | ||
608 | #define RING_FAULT_SRCID(x) ((x >> 3) & 0xff) | ||
609 | #define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3) | ||
610 | #define RING_FAULT_VALID (1<<0) | ||
607 | #define DONE_REG 0x40b0 | 611 | #define DONE_REG 0x40b0 |
608 | #define BSD_HWS_PGA_GEN7 (0x04180) | 612 | #define BSD_HWS_PGA_GEN7 (0x04180) |
609 | #define BLT_HWS_PGA_GEN7 (0x04280) | 613 | #define BLT_HWS_PGA_GEN7 (0x04280) |
@@ -3881,6 +3885,9 @@ | |||
3881 | #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 | 3885 | #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 |
3882 | #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) | 3886 | #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) |
3883 | 3887 | ||
3888 | #define HSW_SCRATCH1 0xb038 | ||
3889 | #define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27) | ||
3890 | |||
3884 | #define HSW_FUSE_STRAP 0x42014 | 3891 | #define HSW_FUSE_STRAP 0x42014 |
3885 | #define HSW_CDCLK_LIMIT (1 << 24) | 3892 | #define HSW_CDCLK_LIMIT (1 << 24) |
3886 | 3893 | ||
@@ -4276,7 +4283,9 @@ | |||
4276 | #define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN) | 4283 | #define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN) |
4277 | 4284 | ||
4278 | #define SOUTH_DSPCLK_GATE_D 0xc2020 | 4285 | #define SOUTH_DSPCLK_GATE_D 0xc2020 |
4286 | #define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30) | ||
4279 | #define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) | 4287 | #define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) |
4288 | #define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14) | ||
4280 | #define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12) | 4289 | #define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12) |
4281 | 4290 | ||
4282 | /* CPU: FDI_TX */ | 4291 | /* CPU: FDI_TX */ |
@@ -4728,6 +4737,9 @@ | |||
4728 | #define GEN7_ROW_CHICKEN2_GT2 0xf4f4 | 4737 | #define GEN7_ROW_CHICKEN2_GT2 0xf4f4 |
4729 | #define DOP_CLOCK_GATING_DISABLE (1<<0) | 4738 | #define DOP_CLOCK_GATING_DISABLE (1<<0) |
4730 | 4739 | ||
4740 | #define HSW_ROW_CHICKEN3 0xe49c | ||
4741 | #define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) | ||
4742 | |||
4731 | #define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020) | 4743 | #define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020) |
4732 | #define INTEL_AUDIO_DEVCL 0x808629FB | 4744 | #define INTEL_AUDIO_DEVCL 0x808629FB |
4733 | #define INTEL_AUDIO_DEVBLC 0x80862801 | 4745 | #define INTEL_AUDIO_DEVBLC 0x80862801 |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e5822e79f912..581fb4b2f766 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -3941,8 +3941,6 @@ static void intel_connector_check_state(struct intel_connector *connector) | |||
3941 | * consider. */ | 3941 | * consider. */ |
3942 | void intel_connector_dpms(struct drm_connector *connector, int mode) | 3942 | void intel_connector_dpms(struct drm_connector *connector, int mode) |
3943 | { | 3943 | { |
3944 | struct intel_encoder *encoder = intel_attached_encoder(connector); | ||
3945 | |||
3946 | /* All the simple cases only support two dpms states. */ | 3944 | /* All the simple cases only support two dpms states. */ |
3947 | if (mode != DRM_MODE_DPMS_ON) | 3945 | if (mode != DRM_MODE_DPMS_ON) |
3948 | mode = DRM_MODE_DPMS_OFF; | 3946 | mode = DRM_MODE_DPMS_OFF; |
@@ -3953,10 +3951,8 @@ void intel_connector_dpms(struct drm_connector *connector, int mode) | |||
3953 | connector->dpms = mode; | 3951 | connector->dpms = mode; |
3954 | 3952 | ||
3955 | /* Only need to change hw state when actually enabled */ | 3953 | /* Only need to change hw state when actually enabled */ |
3956 | if (encoder->base.crtc) | 3954 | if (connector->encoder) |
3957 | intel_encoder_dpms(encoder, mode); | 3955 | intel_encoder_dpms(to_intel_encoder(connector->encoder), mode); |
3958 | else | ||
3959 | WARN_ON(encoder->connectors_active != false); | ||
3960 | 3956 | ||
3961 | intel_modeset_check_state(connector->dev); | 3957 | intel_modeset_check_state(connector->dev); |
3962 | } | 3958 | } |
@@ -10049,33 +10045,6 @@ static void i915_disable_vga(struct drm_device *dev) | |||
10049 | POSTING_READ(vga_reg); | 10045 | POSTING_READ(vga_reg); |
10050 | } | 10046 | } |
10051 | 10047 | ||
10052 | static void i915_enable_vga_mem(struct drm_device *dev) | ||
10053 | { | ||
10054 | /* Enable VGA memory on Intel HD */ | ||
10055 | if (HAS_PCH_SPLIT(dev)) { | ||
10056 | vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); | ||
10057 | outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE); | ||
10058 | vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO | | ||
10059 | VGA_RSRC_LEGACY_MEM | | ||
10060 | VGA_RSRC_NORMAL_IO | | ||
10061 | VGA_RSRC_NORMAL_MEM); | ||
10062 | vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); | ||
10063 | } | ||
10064 | } | ||
10065 | |||
10066 | void i915_disable_vga_mem(struct drm_device *dev) | ||
10067 | { | ||
10068 | /* Disable VGA memory on Intel HD */ | ||
10069 | if (HAS_PCH_SPLIT(dev)) { | ||
10070 | vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); | ||
10071 | outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE); | ||
10072 | vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO | | ||
10073 | VGA_RSRC_NORMAL_IO | | ||
10074 | VGA_RSRC_NORMAL_MEM); | ||
10075 | vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); | ||
10076 | } | ||
10077 | } | ||
10078 | |||
10079 | void intel_modeset_init_hw(struct drm_device *dev) | 10048 | void intel_modeset_init_hw(struct drm_device *dev) |
10080 | { | 10049 | { |
10081 | intel_init_power_well(dev); | 10050 | intel_init_power_well(dev); |
@@ -10354,7 +10323,6 @@ void i915_redisable_vga(struct drm_device *dev) | |||
10354 | if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { | 10323 | if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { |
10355 | DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); | 10324 | DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); |
10356 | i915_disable_vga(dev); | 10325 | i915_disable_vga(dev); |
10357 | i915_disable_vga_mem(dev); | ||
10358 | } | 10326 | } |
10359 | } | 10327 | } |
10360 | 10328 | ||
@@ -10568,8 +10536,6 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
10568 | 10536 | ||
10569 | intel_disable_fbc(dev); | 10537 | intel_disable_fbc(dev); |
10570 | 10538 | ||
10571 | i915_enable_vga_mem(dev); | ||
10572 | |||
10573 | intel_disable_gt_powersave(dev); | 10539 | intel_disable_gt_powersave(dev); |
10574 | 10540 | ||
10575 | ironlake_teardown_rc6(dev); | 10541 | ironlake_teardown_rc6(dev); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 79c14e298ba6..2c555f91bfae 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -1467,7 +1467,7 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp) | |||
1467 | 1467 | ||
1468 | /* Avoid continuous PSR exit by masking memup and hpd */ | 1468 | /* Avoid continuous PSR exit by masking memup and hpd */ |
1469 | I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP | | 1469 | I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP | |
1470 | EDP_PSR_DEBUG_MASK_HPD); | 1470 | EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); |
1471 | 1471 | ||
1472 | intel_dp->psr_setup_done = true; | 1472 | intel_dp->psr_setup_done = true; |
1473 | } | 1473 | } |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 28cae80495e2..9b7b68fd5d47 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -793,6 +793,5 @@ extern void hsw_pc8_disable_interrupts(struct drm_device *dev); | |||
793 | extern void hsw_pc8_restore_interrupts(struct drm_device *dev); | 793 | extern void hsw_pc8_restore_interrupts(struct drm_device *dev); |
794 | extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); | 794 | extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); |
795 | extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); | 795 | extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); |
796 | extern void i915_disable_vga_mem(struct drm_device *dev); | ||
797 | 796 | ||
798 | #endif /* __INTEL_DRV_H__ */ | 797 | #endif /* __INTEL_DRV_H__ */ |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index dd176b7296c1..26c2ea3e985c 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -3864,8 +3864,6 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
3864 | dev_priv->rps.rpe_delay), | 3864 | dev_priv->rps.rpe_delay), |
3865 | dev_priv->rps.rpe_delay); | 3865 | dev_priv->rps.rpe_delay); |
3866 | 3866 | ||
3867 | INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work); | ||
3868 | |||
3869 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); | 3867 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); |
3870 | 3868 | ||
3871 | gen6_enable_rps_interrupts(dev); | 3869 | gen6_enable_rps_interrupts(dev); |
@@ -4761,7 +4759,9 @@ static void cpt_init_clock_gating(struct drm_device *dev) | |||
4761 | * gating for the panel power sequencer or it will fail to | 4759 | * gating for the panel power sequencer or it will fail to |
4762 | * start up when no ports are active. | 4760 | * start up when no ports are active. |
4763 | */ | 4761 | */ |
4764 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | 4762 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE | |
4763 | PCH_DPLUNIT_CLOCK_GATE_DISABLE | | ||
4764 | PCH_CPUNIT_CLOCK_GATE_DISABLE); | ||
4765 | I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | | 4765 | I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | |
4766 | DPLS_EDP_PPS_FIX_DIS); | 4766 | DPLS_EDP_PPS_FIX_DIS); |
4767 | /* The below fixes the weird display corruption, a few pixels shifted | 4767 | /* The below fixes the weird display corruption, a few pixels shifted |
@@ -4955,6 +4955,11 @@ static void haswell_init_clock_gating(struct drm_device *dev) | |||
4955 | I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, | 4955 | I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, |
4956 | GEN7_WA_L3_CHICKEN_MODE); | 4956 | GEN7_WA_L3_CHICKEN_MODE); |
4957 | 4957 | ||
4958 | /* L3 caching of data atomics doesn't work -- disable it. */ | ||
4959 | I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); | ||
4960 | I915_WRITE(HSW_ROW_CHICKEN3, | ||
4961 | _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE)); | ||
4962 | |||
4958 | /* This is required by WaCatErrorRejectionIssue:hsw */ | 4963 | /* This is required by WaCatErrorRejectionIssue:hsw */ |
4959 | I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, | 4964 | I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, |
4960 | I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | | 4965 | I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | |
@@ -5681,5 +5686,7 @@ void intel_pm_init(struct drm_device *dev) | |||
5681 | 5686 | ||
5682 | INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, | 5687 | INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, |
5683 | intel_gen6_powersave_work); | 5688 | intel_gen6_powersave_work); |
5689 | |||
5690 | INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work); | ||
5684 | } | 5691 | } |
5685 | 5692 | ||